Kernel
Threads by month
- ----- 2026 -----
- February
- January
- ----- 2025 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 34 participants
- 22793 discussions
[PATCH v1 openEuler-26.09] Add copy to/from/in user with vectorization support
by Nikita Panov 29 Jan '26
by Nikita Panov 29 Jan '26
29 Jan '26
From: Artem Kuzin <artem.kuzin(a)huawei.com>
kunpeng inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8445
-------------------------------------------------
1. This implementation uses st1/ld1 4-vector instructions which allow to copy 64 bytes at once
2. Copy code is used only if size of data block to copy is more than 128 bytes
4. To use this functionality you need to set configuration switch CONFIG_USE_VECTORIZED_COPY=y
5. Code can be used on any ARMv8 variant
6. In kernel copy functions like memcpy are not supported now, but can be enabled in future
7. For now we use lightweght version of register context saving/restoration (4-registers)
We introduce support of vectorization for copy_from/to/in_user functions. Nowadays it
works in parallel with original FPSIMD/SVE vectorization and doesn't affect it anyhow.
We have special flag in task struct - TIF_KERNEL_FPSIMD, that set if currently we use
lightweight vectorization in kernel. Task struct has been updated by two fields:
user space fpsimd state and kernel fpsimd state. User space fpsimd state used by
kernel_fpsimd_begin(), kernel_fpsimd_end() functions that wrap lightweight FPSIMD
contexts usage in kernel space. Kernel fpsimd state is used to manage threads switch.
Now there is no support of nested calls of kernel_neon_begin()/kernel_fpsimd_begin()
and there is no plans to support this in future. This is not necessary.
We save lightweight FPSIMD context in kernel_fpsimd_begin(), and restore it in
/kernel_fpsimd_end(). On thread switch we preserve kernel FPSIMD context and restore
user space one if any. This prevens curruption of user space FPSIMD state. Before
switching to the next thread we restore it's kernel FPSIMD context if any.
It is allowed to use FPSIMD in bottom halves, due to in case of BH preemption we check
TIF_KERNEL_FPSIMD flag and save/restore contexts.
Context management if quite lightweight and executed only in case of TIF_KERNEL_FPSIMD
flag is set.
To enable this feature, you need to manually modify one of the
appropriate entries:
/proc/sys/vm/copy_from_user_threshold
/proc/sys/vm/copy_in_user_threshold
/proc/sys/vm/copy_to_user_threshold
Allowed values are following:
-1 - feature enabled
0 - feature always enabled
n (n >0) - feature enabled, if copied size is greater than n KB.
P.S.:
What I am personally don't like in current approach:
1. Additional fields and flag in task struct look quite ugly
2. No way to configure the size of chunk to copy using FPSIMD from user space
3. FPSIMD-based memory movement is not generic, need to enable for memmove(),
memcpy() and friends in future.
Co-developed-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Signed-off-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Co-developed-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Artem Kuzin <artem.kuzin(a)huawei.com>
---
arch/arm64/Kconfig | 15 ++
arch/arm64/configs/openeuler_defconfig | 2 +
arch/arm64/include/asm/fpsimd.h | 15 ++
arch/arm64/include/asm/fpsimdmacros.h | 14 ++
arch/arm64/include/asm/neon.h | 28 ++++
arch/arm64/include/asm/processor.h | 10 ++
arch/arm64/include/asm/thread_info.h | 5 +
arch/arm64/include/asm/uaccess.h | 218 ++++++++++++++++++++++++-
arch/arm64/kernel/entry-fpsimd.S | 22 +++
arch/arm64/kernel/fpsimd.c | 102 +++++++++++-
arch/arm64/kernel/process.c | 2 +-
arch/arm64/lib/copy_from_user.S | 30 ++++
arch/arm64/lib/copy_template_fpsimd.S | 180 ++++++++++++++++++++
arch/arm64/lib/copy_to_user.S | 30 ++++
kernel/softirq.c | 34 ++++
kernel/sysctl.c | 34 ++++
16 files changed, 734 insertions(+), 7 deletions(-)
create mode 100644 arch/arm64/lib/copy_template_fpsimd.S
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c3b38c890b45..8904e6476e3b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1870,6 +1870,21 @@ config ARM64_ILP32
is an ABI where long and pointers are 32bits but it uses the AARCH64
instruction set.
+config USE_VECTORIZED_COPY
+ bool "Use vectorized instructions in copy_to/from user"
+ depends on KERNEL_MODE_NEON
+ default y
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
+config VECTORIZED_COPY_VALIDATE
+ bool "Validate result of vectorized copy using regular implementation"
+ depends on KERNEL_MODE_NEON
+ depends on USE_VECTORIZED_COPY
+ default n
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
menuconfig AARCH32_EL0
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 9e7bc82cba3a..9843dec071bf 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -527,6 +527,8 @@ CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y
# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set
# CONFIG_ARM64_SW_TTBR0_PAN is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
+CONFIG_USE_VECTORIZED_COPY=y
+# CONFIG_VECTORIZED_COPY_VALIDATE is not set
CONFIG_AARCH32_EL0=y
# CONFIG_KUSER_HELPERS is not set
# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index b6c6949984d8..1fc9089b4a47 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -46,6 +46,21 @@
struct task_struct;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+extern void fpsimd_save_state_light(struct fpsimd_state *state);
+extern void fpsimd_load_state_light(struct fpsimd_state *state);
+#else
+static inline void fpsimd_save_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+static inline void fpsimd_load_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
extern void fpsimd_save_state(struct user_fpsimd_state *state);
extern void fpsimd_load_state(struct user_fpsimd_state *state);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index cdf6a35e3994..df9d3ed91931 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -8,6 +8,20 @@
#include <asm/assembler.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/* Lightweight fpsimd context saving/restoration.
+ * Necessary for vectorized kernel memory movement
+ * implementation
+ */
+.macro fpsimd_save_light state
+ st1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+
+.macro fpsimd_restore_light state
+ ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+#endif
+
.macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2]
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index d4b1d172a79b..ab84b194d7b3 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -16,4 +16,32 @@
void kernel_neon_begin(void);
void kernel_neon_end(void);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void);
+void kernel_fpsimd_end(void);
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state);
+void _kernel_fpsimd_load(struct fpsimd_state *state);
+#else
+bool kernel_fpsimd_begin(void)
+{
+ return false;
+}
+
+void kernel_fpsimd_end(void)
+{
+}
+
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 9e688b1b13d4..9b81dbcd2126 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -153,6 +153,10 @@ struct cpu_context {
unsigned long pc;
};
+struct fpsimd_state {
+ __uint128_t v[4];
+};
+
struct thread_struct {
struct cpu_context cpu_context; /* cpu context */
@@ -196,6 +200,12 @@ struct thread_struct {
KABI_RESERVE(6)
KABI_RESERVE(7)
KABI_RESERVE(8)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ KABI_EXTEND(
+ struct fpsimd_state ustate;
+ struct fpsimd_state kstate;
+ )
+#endif
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 379d24059f5b..60d0be8a2d58 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -89,6 +89,9 @@ void arch_setup_new_exec(void);
#define TIF_SME 27 /* SME in use */
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define TIF_32BIT_AARCH64 29 /* 32 bit process on AArch64(ILP32) */
+#define TIF_KERNEL_FPSIMD 31 /* Use FPSIMD in kernel */
+#define TIF_PRIV_UACC_ENABLED 32 /* Whether priviliged uaccess was manually enabled */
+
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -107,6 +110,8 @@ void arch_setup_new_exec(void);
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64)
+#define _TIF_KERNEL_FPSIMD (1 << TIF_KERNEL_FPSIMD)
+#define _TIF_PRIV_UACC_ENABLED (1 << TIF_PRIV_UACC_ENABLED)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index dd0877a75922..fc9f1a40624d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -26,6 +26,10 @@
#include <asm/memory.h>
#include <asm/extable.h>
+#ifndef __GENKSYMS__
+#include <asm/neon.h>
+#endif
+
static inline int __access_ok(const void __user *ptr, unsigned long size);
/*
@@ -134,7 +138,7 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN));
}
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_privileged(void)
{
mte_disable_tco();
@@ -144,7 +148,22 @@ static inline void uaccess_disable_privileged(void)
__uaccess_enable_hw_pan();
}
-static inline void uaccess_enable_privileged(void)
+static inline void uaccess_disable_privileged(void)
+{
+ preempt_disable();
+
+ if (!test_and_clear_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_disable_privileged();
+
+ preempt_enable();
+}
+
+static inline void __uaccess_enable_privileged(void)
{
mte_enable_tco();
@@ -154,6 +173,47 @@ static inline void uaccess_enable_privileged(void)
__uaccess_disable_hw_pan();
}
+static inline void uaccess_enable_privileged(void)
+{
+ preempt_disable();
+
+ if (test_and_set_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_enable_privileged();
+
+ preempt_enable();
+}
+
+static inline void uaccess_priviliged_context_switch(struct task_struct *next)
+{
+ bool curr_enabled = !!test_thread_flag(TIF_PRIV_UACC_ENABLED);
+ bool next_enabled = !!test_ti_thread_flag(&next->thread_info, TIF_PRIV_UACC_ENABLED);
+
+ if (curr_enabled == next_enabled)
+ return;
+
+ if (curr_enabled)
+ __uaccess_disable_privileged();
+ else
+ __uaccess_enable_privileged();
+}
+
+static inline void uaccess_priviliged_state_save(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_disable_privileged();
+}
+
+static inline void uaccess_priviliged_state_restore(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_enable_privileged();
+}
+
/*
* Sanitize a uaccess pointer such that it cannot reach any kernel address.
*
@@ -391,7 +451,97 @@ do { \
} while (0); \
} while(0)
-extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+#define USER_COPY_CHUNK_SIZE 4096
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_from_user_threshold;
+
+#define verify_fpsimd_copy(to, from, n, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FPSIMD:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FPSIMD:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ __verify_ret; \
+})
+
+#define compare_fpsimd_copy(to, from, n, ret_fpsimd, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FIXUP:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FIXUP:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ if (ret_fpsimd != ret) { \
+ pr_err("FIXUP:%s difference between FPSIMD %lu and regular %lu\n", __func__, n - ret_fpsimd, n - ret); \
+ __verify_ret |= 1; \
+ } else { \
+ __verify_ret = 0; \
+ } \
+ __verify_ret; \
+})
+
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_from_user_fpsimd(void *to, const void __user *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long __acfu_ret;
+
+ if (sysctl_copy_from_user_threshold == -1 || n < sysctl_copy_from_user_threshold) {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user(to,
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __acfu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __acfu_ret_fpsimd = __arch_copy_from_user_fpsimd((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_disable_privileged();
+
+ __acfu_ret = __acfu_ret_fpsimd;
+ kernel_fpsimd_end();
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret)) {
+
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret_fpsimd, __acfu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+
+ return __acfu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
#define raw_copy_from_user(to, from, n) \
({ \
unsigned long __acfu_ret; \
@@ -402,7 +552,66 @@ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __u
__acfu_ret; \
})
-extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+#endif
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_to_user_threshold;
+
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_to_user_fpsimd(void __user *to, const void *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long __actu_ret;
+
+
+ if (sysctl_copy_to_user_threshold == -1 || n < sysctl_copy_to_user_threshold) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __actu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __actu_ret_fpsimd = __arch_copy_to_user_fpsimd(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_disable_privileged();
+
+ kernel_fpsimd_end();
+ __actu_ret = __actu_ret_fpsimd;
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret)) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret_fpsimd, __actu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+ return __actu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
#define raw_copy_to_user(to, from, n) \
({ \
unsigned long __actu_ret; \
@@ -412,6 +621,7 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
uaccess_ttbr0_disable(); \
__actu_ret; \
})
+#endif
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 6325db1a2179..6660465f1b7c 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -11,6 +11,28 @@
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/*
+ * Save the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_save_state_light)
+ fpsimd_save_light x0
+ ret
+SYM_FUNC_END(fpsimd_save_state_light)
+
+/*
+ * Load the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_load_state_light)
+ fpsimd_restore_light x0
+ ret
+SYM_FUNC_END(fpsimd_load_state_light)
+#endif
+
/*
* Save the FP registers.
*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 998906b75075..1b6b1accfbbc 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1579,6 +1579,11 @@ void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs)
current);
}
+#ifdef CONFIG_USE_VECTORIZED_COPY
+static void kernel_fpsimd_rollback_changes(void);
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk);
+#endif
+
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
@@ -1587,10 +1592,11 @@ void fpsimd_thread_switch(struct task_struct *next)
return;
__get_cpu_fpsimd_context();
-
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_rollback_changes();
+#endif
/* Save unsaved fpsimd state, if any: */
fpsimd_save();
-
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
* state. For kernel threads, FPSIMD registers are never loaded
@@ -1603,6 +1609,9 @@ void fpsimd_thread_switch(struct task_struct *next)
update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
wrong_task || wrong_cpu);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_restore_changes(next);
+#endif
__put_cpu_fpsimd_context();
}
@@ -1933,6 +1942,95 @@ void kernel_neon_end(void)
}
EXPORT_SYMBOL_GPL(kernel_neon_end);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void)
+{
+ if (WARN_ON(!system_capabilities_finalized()) ||
+ !system_supports_fpsimd() ||
+ in_irq() || irqs_disabled() || in_nmi())
+ return false;
+
+ preempt_disable();
+ if (test_and_set_thread_flag(TIF_KERNEL_FPSIMD)) {
+ preempt_enable();
+
+ WARN_ON(1);
+ return false;
+ }
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
+ fpsimd_save_state_light(¤t->thread.ustate);
+ preempt_enable();
+
+ return true;
+}
+EXPORT_SYMBOL(kernel_fpsimd_begin);
+
+void kernel_fpsimd_end(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ preempt_disable();
+ if (test_and_clear_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(¤t->thread.ustate);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(kernel_fpsimd_end);
+
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_save_state_light(state);
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(state);
+}
+
+static void kernel_fpsimd_rollback_changes(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(¤t->thread.kstate);
+ fpsimd_load_state_light(¤t->thread.ustate);
+ }
+}
+
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_ti_thread_flag(task_thread_info(tsk), TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(&tsk->thread.ustate);
+ fpsimd_load_state_light(&tsk->thread.kstate);
+ }
+}
+#endif
+
#ifdef CONFIG_EFI
static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index e9e5ce956f15..fd895189cb7e 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -529,7 +529,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
-
+ uaccess_priviliged_context_switch(next);
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 34e317907524..60dc63e10233 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -71,3 +71,33 @@ USER(9998f, ldtrb tmp1w, [srcin])
ret
SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
+
+
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ KERNEL_ME_SAFE(9998f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_from_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0 // Nothing to copy
+ ret
+
+ // Exception fixups
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+ strb tmp1w, [dst], #1
+9998: sub x0, end, dst // bytes not copied
+ ret
+SYM_FUNC_END(__arch_copy_from_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_from_user_fpsimd)
+#endif
\ No newline at end of file
diff --git a/arch/arm64/lib/copy_template_fpsimd.S b/arch/arm64/lib/copy_template_fpsimd.S
new file mode 100644
index 000000000000..9b2e7ce1e4d2
--- /dev/null
+++ b/arch/arm64/lib/copy_template_fpsimd.S
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ */
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ * x0 - dest
+ * x1 - src
+ * x2 - n
+ * Returns:
+ * x0 - dest
+ */
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
+V_a .req v20
+V_b .req v21
+V_c .req v22
+V_d .req v23
+
+ mov dst, dstin
+ cmp count, #16
+ /*When memory length is less than 16, the accessed are not aligned.*/
+ b.lo .Ltiny15_fpsimd
+
+ neg tmp2, src
+ ands tmp2, tmp2, #15/* Bytes to reach alignment. */
+ b.eq .LSrcAligned_fpsimd
+ sub count, count, tmp2
+ /*
+ * Copy the leading memory data from src to dst in an increasing
+ * address order.By this way,the risk of overwriting the source
+ * memory data is eliminated when the distance between src and
+ * dst is less than 16. The memory accesses here are alignment.
+ */
+ tbz tmp2, #0, 1f
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+1:
+ tbz tmp2, #1, 2f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+2:
+ tbz tmp2, #2, 3f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+3:
+ tbz tmp2, #3, .LSrcAligned_fpsimd
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+
+.LSrcAligned_fpsimd:
+ cmp count, #64
+ b.ge .Lcpy_over64_fpsimd
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63_fpsimd:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltiny15_fpsimd
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+1:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+2:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+.Ltiny15_fpsimd:
+ /*
+ * Prefer to break one ldp/stp into several load/store to access
+ * memory in an increasing address order,rather than to load/store 16
+ * bytes from (src-16) to (dst-16) and to backward the src to aligned
+ * address,which way is used in original cortex memcpy. If keeping
+ * the original memcpy process here, memmove need to satisfy the
+ * precondition that src address is at least 16 bytes bigger than dst
+ * address,otherwise some source data will be overwritten when memove
+ * call memcpy directly. To make memmove simpler and decouple the
+ * memcpy's dependency on memmove, withdrew the original process.
+ */
+ tbz count, #3, 1f
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+1:
+ tbz count, #2, 2f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+2:
+ tbz count, #1, 3f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+3:
+ tbz count, #0, .Lexitfunc_fpsimd
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+
+ b .Lexitfunc_fpsimd
+
+.Lcpy_over64_fpsimd:
+ subs count, count, #128
+ b.ge .Lcpy_body_large_fpsimd
+ /*
+ * Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail.
+ */
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+ ldp1 B_l, B_h, src, #16
+ ldp1 C_l, C_h, src, #16
+ stp1 B_l, B_h, dst, #16
+ stp1 C_l, C_h, dst, #16
+ ldp1 D_l, D_h, src, #16
+ stp1 D_l, D_h, dst, #16
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+ b .Lexitfunc_fpsimd
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large_fpsimd:
+ /* pre-get 64 bytes data. */
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add src, src, #64
+
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add dst, dst, #64
+ add src, src, #64
+
+ subs count, count, #64
+ b.ge 1b
+
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ add dst, dst, #64
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+.Lexitfunc_fpsimd:
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 2ac716c0d6d8..c190e5f8a989 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -71,3 +71,33 @@ USER(9998f, sttrb tmp1w, [dst])
ret
SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
+
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ KERNEL_ME_SAFE(9998f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_to_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0
+ ret
+
+ // Exception fixups
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+KERNEL_ME_SAFE(9998f, ldrb tmp1w, [srcin])
+USER(9998f, sttrb tmp1w, [dst])
+ add dst, dst, #1
+9998: sub x0, end, dst // bytes not copied
+ ret
+SYM_FUNC_END(__arch_copy_to_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_to_user_fpsimd)
+#endif
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f8cf88cc46c6..39b84ffbf4e5 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -30,6 +30,10 @@
#include <asm/softirq_stack.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+#include <asm/fpsimd.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -524,6 +528,9 @@ static void handle_softirqs(bool ksirqd)
__u32 pending;
int softirq_bit;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
/*
* Mask out PF_MEMALLOC as the current task context is borrowed for the
* softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
@@ -533,10 +540,16 @@ static void handle_softirqs(bool ksirqd)
pending = local_softirq_pending();
+
softirq_handle_begin();
in_hardirq = lockdep_softirq_start();
account_softirq_enter(current);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
+
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
@@ -585,7 +598,14 @@ static void handle_softirqs(bool ksirqd)
account_softirq_exit(current);
lockdep_softirq_end(in_hardirq);
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
+
softirq_handle_end();
+
current_restore_flags(old_flags, PF_MEMALLOC);
}
@@ -819,12 +839,21 @@ static void tasklet_action_common(struct softirq_action *a,
{
struct tasklet_struct *list;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
+
local_irq_disable();
list = tl_head->head;
tl_head->head = NULL;
tl_head->tail = &tl_head->head;
local_irq_enable();
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
+
while (list) {
struct tasklet_struct *t = list;
@@ -856,6 +885,11 @@ static void tasklet_action_common(struct softirq_action *a,
__raise_softirq_irqoff(softirq_nr);
local_irq_enable();
}
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
}
static __latent_entropy void tasklet_action(struct softirq_action *a)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e84df0818517..6f8e22102bdc 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -137,6 +137,17 @@ int sysctl_legacy_va_layout;
#endif /* CONFIG_SYSCTL */
+#ifdef CONFIG_USE_VECTORIZED_COPY
+int sysctl_copy_to_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_to_user_threshold);
+
+int sysctl_copy_from_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_from_user_threshold);
+
+int sysctl_copy_in_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_in_user_threshold);
+#endif
+
/*
* /proc/sys support
*/
@@ -2250,6 +2261,29 @@ static struct ctl_table vm_table[] = {
.extra1 = (void *)&mmap_rnd_compat_bits_min,
.extra2 = (void *)&mmap_rnd_compat_bits_max,
},
+#endif
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ {
+ .procname = "copy_to_user_threshold",
+ .data = &sysctl_copy_to_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_from_user_threshold",
+ .data = &sysctl_copy_from_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_in_user_threshold",
+ .data = &sysctl_copy_in_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
#endif
{ }
};
--
2.34.1
2
1
[PATCH v1 openEuler-25.03] Add copy to/from/in user with vectorization support
by Nikita Panov 28 Jan '26
by Nikita Panov 28 Jan '26
28 Jan '26
From: Artem Kuzin <artem.kuzin(a)huawei.com>
kunpeng inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8445
-------------------------------------------------
1. This implementation uses st1/ld1 4-vector instructions which allow to copy 64 bytes at once
2. Copy code is used only if size of data block to copy is more than 128 bytes
4. To use this functionality you need to set configuration switch CONFIG_USE_VECTORIZED_COPY=y
5. Code can be used on any ARMv8 variant
6. In kernel copy functions like memcpy are not supported now, but can be enabled in future
7. For now we use lightweght version of register context saving/restoration (4-registers)
We introduce support of vectorization for copy_from/to/in_user functions. Nowadays it
works in parallel with original FPSIMD/SVE vectorization and doesn't affect it anyhow.
We have special flag in task struct - TIF_KERNEL_FPSIMD, that set if currently we use
lightweight vectorization in kernel. Task struct has been updated by two fields:
user space fpsimd state and kernel fpsimd state. User space fpsimd state used by
kernel_fpsimd_begin(), kernel_fpsimd_end() functions that wrap lightweight FPSIMD
contexts usage in kernel space. Kernel fpsimd state is used to manage threads switch.
Now there is no support of nested calls of kernel_neon_begin()/kernel_fpsimd_begin()
and there is no plans to support this in future. This is not necessary.
We save lightweight FPSIMD context in kernel_fpsimd_begin(), and restore it in
/kernel_fpsimd_end(). On thread switch we preserve kernel FPSIMD context and restore
user space one if any. This prevens curruption of user space FPSIMD state. Before
switching to the next thread we restore it's kernel FPSIMD context if any.
It is allowed to use FPSIMD in bottom halves, due to in case of BH preemption we check
TIF_KERNEL_FPSIMD flag and save/restore contexts.
Context management if quite lightweight and executed only in case of TIF_KERNEL_FPSIMD
flag is set.
To enable this feature, you need to manually modify one of the
appropriate entries:
/proc/sys/vm/copy_from_user_threshold
/proc/sys/vm/copy_in_user_threshold
/proc/sys/vm/copy_to_user_threshold
Allowed values are following:
-1 - feature enabled
0 - feature always enabled
n (n >0) - feature enabled, if copied size is greater than n KB.
P.S.:
What I am personally don't like in current approach:
1. Additional fields and flag in task struct look quite ugly
2. No way to configure the size of chunk to copy using FPSIMD from user space
3. FPSIMD-based memory movement is not generic, need to enable for memmove(),
memcpy() and friends in future.
Co-developed-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Signed-off-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Co-developed-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Artem Kuzin <artem.kuzin(a)huawei.com>
---
arch/arm64/Kconfig | 15 ++
arch/arm64/configs/openeuler_defconfig | 2 +
arch/arm64/include/asm/fpsimd.h | 15 ++
arch/arm64/include/asm/fpsimdmacros.h | 14 ++
arch/arm64/include/asm/neon.h | 28 ++++
arch/arm64/include/asm/processor.h | 10 ++
arch/arm64/include/asm/thread_info.h | 5 +
arch/arm64/include/asm/uaccess.h | 218 ++++++++++++++++++++++++-
arch/arm64/kernel/entry-fpsimd.S | 22 +++
arch/arm64/kernel/fpsimd.c | 102 +++++++++++-
arch/arm64/kernel/process.c | 2 +-
arch/arm64/lib/copy_from_user.S | 30 ++++
arch/arm64/lib/copy_template_fpsimd.S | 180 ++++++++++++++++++++
arch/arm64/lib/copy_to_user.S | 30 ++++
kernel/softirq.c | 34 ++++
kernel/sysctl.c | 34 ++++
16 files changed, 734 insertions(+), 7 deletions(-)
create mode 100644 arch/arm64/lib/copy_template_fpsimd.S
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d3ce44c166ce..0cf5ab2d7574 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1828,6 +1828,21 @@ config ARM64_ILP32
is an ABI where long and pointers are 32bits but it uses the AARCH64
instruction set.
+config USE_VECTORIZED_COPY
+ bool "Use vectorized instructions in copy_to/from user"
+ depends on KERNEL_MODE_NEON
+ default y
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
+config VECTORIZED_COPY_VALIDATE
+ bool "Validate result of vectorized copy using regular implementation"
+ depends on KERNEL_MODE_NEON
+ depends on USE_VECTORIZED_COPY
+ default n
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
menuconfig AARCH32_EL0
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 8f97574813ca..dbad22bcbd57 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -499,6 +499,8 @@ CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y
# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set
# CONFIG_ARM64_SW_TTBR0_PAN is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
+CONFIG_USE_VECTORIZED_COPY=y
+# CONFIG_VECTORIZED_COPY_VALIDATE is not set
CONFIG_AARCH32_EL0=y
# CONFIG_KUSER_HELPERS is not set
# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 40a99d8607fe..f71b3ac578c1 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -46,6 +46,21 @@
struct task_struct;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+extern void fpsimd_save_state_light(struct fpsimd_state *state);
+extern void fpsimd_load_state_light(struct fpsimd_state *state);
+#else
+static inline void fpsimd_save_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+static inline void fpsimd_load_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
extern void fpsimd_save_state(struct user_fpsimd_state *state);
extern void fpsimd_load_state(struct user_fpsimd_state *state);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index cdf6a35e3994..df9d3ed91931 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -8,6 +8,20 @@
#include <asm/assembler.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/* Lightweight fpsimd context saving/restoration.
+ * Necessary for vectorized kernel memory movement
+ * implementation
+ */
+.macro fpsimd_save_light state
+ st1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+
+.macro fpsimd_restore_light state
+ ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+#endif
+
.macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2]
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index d4b1d172a79b..ab84b194d7b3 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -16,4 +16,32 @@
void kernel_neon_begin(void);
void kernel_neon_end(void);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void);
+void kernel_fpsimd_end(void);
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state);
+void _kernel_fpsimd_load(struct fpsimd_state *state);
+#else
+bool kernel_fpsimd_begin(void)
+{
+ return false;
+}
+
+void kernel_fpsimd_end(void)
+{
+}
+
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 9e688b1b13d4..9b81dbcd2126 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -153,6 +153,10 @@ struct cpu_context {
unsigned long pc;
};
+struct fpsimd_state {
+ __uint128_t v[4];
+};
+
struct thread_struct {
struct cpu_context cpu_context; /* cpu context */
@@ -196,6 +200,12 @@ struct thread_struct {
KABI_RESERVE(6)
KABI_RESERVE(7)
KABI_RESERVE(8)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ KABI_EXTEND(
+ struct fpsimd_state ustate;
+ struct fpsimd_state kstate;
+ )
+#endif
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 379d24059f5b..60d0be8a2d58 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -89,6 +89,9 @@ void arch_setup_new_exec(void);
#define TIF_SME 27 /* SME in use */
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define TIF_32BIT_AARCH64 29 /* 32 bit process on AArch64(ILP32) */
+#define TIF_KERNEL_FPSIMD 31 /* Use FPSIMD in kernel */
+#define TIF_PRIV_UACC_ENABLED 32 /* Whether priviliged uaccess was manually enabled */
+
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -107,6 +110,8 @@ void arch_setup_new_exec(void);
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64)
+#define _TIF_KERNEL_FPSIMD (1 << TIF_KERNEL_FPSIMD)
+#define _TIF_PRIV_UACC_ENABLED (1 << TIF_PRIV_UACC_ENABLED)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index dd0877a75922..fc9f1a40624d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -26,6 +26,10 @@
#include <asm/memory.h>
#include <asm/extable.h>
+#ifndef __GENKSYMS__
+#include <asm/neon.h>
+#endif
+
static inline int __access_ok(const void __user *ptr, unsigned long size);
/*
@@ -134,7 +138,7 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN));
}
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_privileged(void)
{
mte_disable_tco();
@@ -144,7 +148,22 @@ static inline void uaccess_disable_privileged(void)
__uaccess_enable_hw_pan();
}
-static inline void uaccess_enable_privileged(void)
+static inline void uaccess_disable_privileged(void)
+{
+ preempt_disable();
+
+ if (!test_and_clear_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_disable_privileged();
+
+ preempt_enable();
+}
+
+static inline void __uaccess_enable_privileged(void)
{
mte_enable_tco();
@@ -154,6 +173,47 @@ static inline void uaccess_enable_privileged(void)
__uaccess_disable_hw_pan();
}
+static inline void uaccess_enable_privileged(void)
+{
+ preempt_disable();
+
+ if (test_and_set_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_enable_privileged();
+
+ preempt_enable();
+}
+
+static inline void uaccess_priviliged_context_switch(struct task_struct *next)
+{
+ bool curr_enabled = !!test_thread_flag(TIF_PRIV_UACC_ENABLED);
+ bool next_enabled = !!test_ti_thread_flag(&next->thread_info, TIF_PRIV_UACC_ENABLED);
+
+ if (curr_enabled == next_enabled)
+ return;
+
+ if (curr_enabled)
+ __uaccess_disable_privileged();
+ else
+ __uaccess_enable_privileged();
+}
+
+static inline void uaccess_priviliged_state_save(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_disable_privileged();
+}
+
+static inline void uaccess_priviliged_state_restore(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_enable_privileged();
+}
+
/*
* Sanitize a uaccess pointer such that it cannot reach any kernel address.
*
@@ -391,7 +451,97 @@ do { \
} while (0); \
} while(0)
-extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+#define USER_COPY_CHUNK_SIZE 4096
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_from_user_threshold;
+
+#define verify_fpsimd_copy(to, from, n, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FPSIMD:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FPSIMD:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ __verify_ret; \
+})
+
+#define compare_fpsimd_copy(to, from, n, ret_fpsimd, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FIXUP:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FIXUP:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ if (ret_fpsimd != ret) { \
+ pr_err("FIXUP:%s difference between FPSIMD %lu and regular %lu\n", __func__, n - ret_fpsimd, n - ret); \
+ __verify_ret |= 1; \
+ } else { \
+ __verify_ret = 0; \
+ } \
+ __verify_ret; \
+})
+
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_from_user_fpsimd(void *to, const void __user *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long __acfu_ret;
+
+ if (sysctl_copy_from_user_threshold == -1 || n < sysctl_copy_from_user_threshold) {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user(to,
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __acfu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __acfu_ret_fpsimd = __arch_copy_from_user_fpsimd((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_disable_privileged();
+
+ __acfu_ret = __acfu_ret_fpsimd;
+ kernel_fpsimd_end();
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret)) {
+
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret_fpsimd, __acfu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+
+ return __acfu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
#define raw_copy_from_user(to, from, n) \
({ \
unsigned long __acfu_ret; \
@@ -402,7 +552,66 @@ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __u
__acfu_ret; \
})
-extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+#endif
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_to_user_threshold;
+
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_to_user_fpsimd(void __user *to, const void *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long __actu_ret;
+
+
+ if (sysctl_copy_to_user_threshold == -1 || n < sysctl_copy_to_user_threshold) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __actu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __actu_ret_fpsimd = __arch_copy_to_user_fpsimd(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_disable_privileged();
+
+ kernel_fpsimd_end();
+ __actu_ret = __actu_ret_fpsimd;
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret)) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret_fpsimd, __actu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+ return __actu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
#define raw_copy_to_user(to, from, n) \
({ \
unsigned long __actu_ret; \
@@ -412,6 +621,7 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
uaccess_ttbr0_disable(); \
__actu_ret; \
})
+#endif
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 6325db1a2179..6660465f1b7c 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -11,6 +11,28 @@
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/*
+ * Save the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_save_state_light)
+ fpsimd_save_light x0
+ ret
+SYM_FUNC_END(fpsimd_save_state_light)
+
+/*
+ * Load the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_load_state_light)
+ fpsimd_restore_light x0
+ ret
+SYM_FUNC_END(fpsimd_load_state_light)
+#endif
+
/*
* Save the FP registers.
*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 0137d987631e..19fcf3a3ac66 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1577,6 +1577,11 @@ void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs)
current);
}
+#ifdef CONFIG_USE_VECTORIZED_COPY
+static void kernel_fpsimd_rollback_changes(void);
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk);
+#endif
+
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
@@ -1585,10 +1590,11 @@ void fpsimd_thread_switch(struct task_struct *next)
return;
__get_cpu_fpsimd_context();
-
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_rollback_changes();
+#endif
/* Save unsaved fpsimd state, if any: */
fpsimd_save();
-
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
* state. For kernel threads, FPSIMD registers are never loaded
@@ -1601,6 +1607,9 @@ void fpsimd_thread_switch(struct task_struct *next)
update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
wrong_task || wrong_cpu);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_restore_changes(next);
+#endif
__put_cpu_fpsimd_context();
}
@@ -1956,6 +1965,95 @@ void kernel_neon_end(void)
}
EXPORT_SYMBOL_GPL(kernel_neon_end);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void)
+{
+ if (WARN_ON(!system_capabilities_finalized()) ||
+ !system_supports_fpsimd() ||
+ in_irq() || irqs_disabled() || in_nmi())
+ return false;
+
+ preempt_disable();
+ if (test_and_set_thread_flag(TIF_KERNEL_FPSIMD)) {
+ preempt_enable();
+
+ WARN_ON(1);
+ return false;
+ }
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
+ fpsimd_save_state_light(¤t->thread.ustate);
+ preempt_enable();
+
+ return true;
+}
+EXPORT_SYMBOL(kernel_fpsimd_begin);
+
+void kernel_fpsimd_end(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ preempt_disable();
+ if (test_and_clear_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(¤t->thread.ustate);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(kernel_fpsimd_end);
+
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_save_state_light(state);
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(state);
+}
+
+static void kernel_fpsimd_rollback_changes(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(¤t->thread.kstate);
+ fpsimd_load_state_light(¤t->thread.ustate);
+ }
+}
+
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_ti_thread_flag(task_thread_info(tsk), TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(&tsk->thread.ustate);
+ fpsimd_load_state_light(&tsk->thread.kstate);
+ }
+}
+#endif
+
#ifdef CONFIG_EFI
static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 068e5bb2661b..bbeb36e671de 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -524,7 +524,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
-
+ uaccess_priviliged_context_switch(next);
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 34e317907524..60dc63e10233 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -71,3 +71,33 @@ USER(9998f, ldtrb tmp1w, [srcin])
ret
SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
+
+
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ KERNEL_ME_SAFE(9998f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_from_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0 // Nothing to copy
+ ret
+
+ // Exception fixups
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+ strb tmp1w, [dst], #1
+9998: sub x0, end, dst // bytes not copied
+ ret
+SYM_FUNC_END(__arch_copy_from_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_from_user_fpsimd)
+#endif
\ No newline at end of file
diff --git a/arch/arm64/lib/copy_template_fpsimd.S b/arch/arm64/lib/copy_template_fpsimd.S
new file mode 100644
index 000000000000..9b2e7ce1e4d2
--- /dev/null
+++ b/arch/arm64/lib/copy_template_fpsimd.S
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ */
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ * x0 - dest
+ * x1 - src
+ * x2 - n
+ * Returns:
+ * x0 - dest
+ */
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
+V_a .req v20
+V_b .req v21
+V_c .req v22
+V_d .req v23
+
+ mov dst, dstin
+ cmp count, #16
+ /*When memory length is less than 16, the accessed are not aligned.*/
+ b.lo .Ltiny15_fpsimd
+
+ neg tmp2, src
+ ands tmp2, tmp2, #15/* Bytes to reach alignment. */
+ b.eq .LSrcAligned_fpsimd
+ sub count, count, tmp2
+ /*
+ * Copy the leading memory data from src to dst in an increasing
+ * address order.By this way,the risk of overwriting the source
+ * memory data is eliminated when the distance between src and
+ * dst is less than 16. The memory accesses here are alignment.
+ */
+ tbz tmp2, #0, 1f
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+1:
+ tbz tmp2, #1, 2f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+2:
+ tbz tmp2, #2, 3f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+3:
+ tbz tmp2, #3, .LSrcAligned_fpsimd
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+
+.LSrcAligned_fpsimd:
+ cmp count, #64
+ b.ge .Lcpy_over64_fpsimd
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63_fpsimd:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltiny15_fpsimd
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+1:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+2:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+.Ltiny15_fpsimd:
+ /*
+ * Prefer to break one ldp/stp into several load/store to access
+ * memory in an increasing address order,rather than to load/store 16
+ * bytes from (src-16) to (dst-16) and to backward the src to aligned
+ * address,which way is used in original cortex memcpy. If keeping
+ * the original memcpy process here, memmove need to satisfy the
+ * precondition that src address is at least 16 bytes bigger than dst
+ * address,otherwise some source data will be overwritten when memove
+ * call memcpy directly. To make memmove simpler and decouple the
+ * memcpy's dependency on memmove, withdrew the original process.
+ */
+ tbz count, #3, 1f
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+1:
+ tbz count, #2, 2f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+2:
+ tbz count, #1, 3f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+3:
+ tbz count, #0, .Lexitfunc_fpsimd
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+
+ b .Lexitfunc_fpsimd
+
+.Lcpy_over64_fpsimd:
+ subs count, count, #128
+ b.ge .Lcpy_body_large_fpsimd
+ /*
+ * Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail.
+ */
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+ ldp1 B_l, B_h, src, #16
+ ldp1 C_l, C_h, src, #16
+ stp1 B_l, B_h, dst, #16
+ stp1 C_l, C_h, dst, #16
+ ldp1 D_l, D_h, src, #16
+ stp1 D_l, D_h, dst, #16
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+ b .Lexitfunc_fpsimd
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large_fpsimd:
+ /* pre-get 64 bytes data. */
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add src, src, #64
+
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add dst, dst, #64
+ add src, src, #64
+
+ subs count, count, #64
+ b.ge 1b
+
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ add dst, dst, #64
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+.Lexitfunc_fpsimd:
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 2ac716c0d6d8..c190e5f8a989 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -71,3 +71,33 @@ USER(9998f, sttrb tmp1w, [dst])
ret
SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
+
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ KERNEL_ME_SAFE(9998f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_to_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0
+ ret
+
+ // Exception fixups
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+KERNEL_ME_SAFE(9998f, ldrb tmp1w, [srcin])
+USER(9998f, sttrb tmp1w, [dst])
+ add dst, dst, #1
+9998: sub x0, end, dst // bytes not copied
+ ret
+SYM_FUNC_END(__arch_copy_to_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_to_user_fpsimd)
+#endif
diff --git a/kernel/softirq.c b/kernel/softirq.c
index cd8770b2f76c..e8ce3275a099 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -30,6 +30,10 @@
#include <asm/softirq_stack.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+#include <asm/fpsimd.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -517,6 +521,9 @@ static void handle_softirqs(bool ksirqd)
__u32 pending;
int softirq_bit;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
/*
* Mask out PF_MEMALLOC as the current task context is borrowed for the
* softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
@@ -526,10 +533,16 @@ static void handle_softirqs(bool ksirqd)
pending = local_softirq_pending();
+
softirq_handle_begin();
in_hardirq = lockdep_softirq_start();
account_softirq_enter(current);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
+
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
@@ -578,7 +591,14 @@ static void handle_softirqs(bool ksirqd)
account_softirq_exit(current);
lockdep_softirq_end(in_hardirq);
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
+
softirq_handle_end();
+
current_restore_flags(old_flags, PF_MEMALLOC);
}
@@ -812,12 +832,21 @@ static void tasklet_action_common(struct softirq_action *a,
{
struct tasklet_struct *list;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
+
local_irq_disable();
list = tl_head->head;
tl_head->head = NULL;
tl_head->tail = &tl_head->head;
local_irq_enable();
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
+
while (list) {
struct tasklet_struct *t = list;
@@ -849,6 +878,11 @@ static void tasklet_action_common(struct softirq_action *a,
__raise_softirq_irqoff(softirq_nr);
local_irq_enable();
}
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
}
static __latent_entropy void tasklet_action(struct softirq_action *a)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e84df0818517..6f8e22102bdc 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -137,6 +137,17 @@ int sysctl_legacy_va_layout;
#endif /* CONFIG_SYSCTL */
+#ifdef CONFIG_USE_VECTORIZED_COPY
+int sysctl_copy_to_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_to_user_threshold);
+
+int sysctl_copy_from_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_from_user_threshold);
+
+int sysctl_copy_in_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_in_user_threshold);
+#endif
+
/*
* /proc/sys support
*/
@@ -2250,6 +2261,29 @@ static struct ctl_table vm_table[] = {
.extra1 = (void *)&mmap_rnd_compat_bits_min,
.extra2 = (void *)&mmap_rnd_compat_bits_max,
},
+#endif
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ {
+ .procname = "copy_to_user_threshold",
+ .data = &sysctl_copy_to_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_from_user_threshold",
+ .data = &sysctl_copy_from_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_in_user_threshold",
+ .data = &sysctl_copy_in_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
#endif
{ }
};
--
2.34.1
2
1
28 Jan '26
From: Artem Kuzin <artem.kuzin(a)huawei.com>
kunpeng inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8445
-------------------------------------------------
1. This implementation uses st1/ld1 4-vector instructions which allow to copy 64 bytes at once
2. Copy code is used only if size of data block to copy is more than 128 bytes
4. To use this functionality you need to set configuration switch CONFIG_USE_VECTORIZED_COPY=y
5. Code can be used on any ARMv8 variant
6. In kernel copy functions like memcpy are not supported now, but can be enabled in future
7. For now we use lightweght version of register context saving/restoration (4-registers)
We introduce support of vectorization for copy_from/to/in_user functions. Nowadays it
works in parallel with original FPSIMD/SVE vectorization and doesn't affect it anyhow.
We have special flag in task struct - TIF_KERNEL_FPSIMD, that set if currently we use
lightweight vectorization in kernel. Task struct has been updated by two fields:
user space fpsimd state and kernel fpsimd state. User space fpsimd state used by
kernel_fpsimd_begin(), kernel_fpsimd_end() functions that wrap lightweight FPSIMD
contexts usage in kernel space. Kernel fpsimd state is used to manage threads switch.
Now there is no support of nested calls of kernel_neon_begin()/kernel_fpsimd_begin()
and there is no plans to support this in future. This is not necessary.
We save lightweight FPSIMD context in kernel_fpsimd_begin(), and restore it in
/kernel_fpsimd_end(). On thread switch we preserve kernel FPSIMD context and restore
user space one if any. This prevens curruption of user space FPSIMD state. Before
switching to the next thread we restore it's kernel FPSIMD context if any.
It is allowed to use FPSIMD in bottom halves, due to in case of BH preemption we check
TIF_KERNEL_FPSIMD flag and save/restore contexts.
Context management if quite lightweight and executed only in case of TIF_KERNEL_FPSIMD
flag is set.
To enable this feature, you need to manually modify one of the
appropriate entries:
/proc/sys/vm/copy_from_user_threshold
/proc/sys/vm/copy_in_user_threshold
/proc/sys/vm/copy_to_user_threshold
Allowed values are following:
-1 - feature enabled
0 - feature always enabled
n (n >0) - feature enabled, if copied size is greater than n KB.
P.S.:
What I am personally don't like in current approach:
1. Additional fields and flag in task struct look quite ugly
2. No way to configure the size of chunk to copy using FPSIMD from user space
3. FPSIMD-based memory movement is not generic, need to enable for memmove(),
memcpy() and friends in future.
Co-developed-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Signed-off-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Co-developed-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Artem Kuzin <artem.kuzin(a)huawei.com>
---
arch/arm64/Kconfig | 15 ++
arch/arm64/configs/openeuler_defconfig | 2 +
arch/arm64/include/asm/fpsimd.h | 15 ++
arch/arm64/include/asm/fpsimdmacros.h | 14 ++
arch/arm64/include/asm/neon.h | 28 ++++
arch/arm64/include/asm/processor.h | 10 ++
arch/arm64/include/asm/thread_info.h | 5 +
arch/arm64/include/asm/uaccess.h | 218 ++++++++++++++++++++++++-
arch/arm64/kernel/entry-fpsimd.S | 22 +++
arch/arm64/kernel/fpsimd.c | 102 +++++++++++-
arch/arm64/kernel/process.c | 2 +-
arch/arm64/lib/copy_from_user.S | 30 ++++
arch/arm64/lib/copy_template_fpsimd.S | 180 ++++++++++++++++++++
arch/arm64/lib/copy_to_user.S | 30 ++++
kernel/softirq.c | 34 ++++
kernel/sysctl.c | 34 ++++
16 files changed, 734 insertions(+), 7 deletions(-)
create mode 100644 arch/arm64/lib/copy_template_fpsimd.S
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ef8c524a296d..15ec2232994a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1870,6 +1870,21 @@ config ARM64_ILP32
is an ABI where long and pointers are 32bits but it uses the AARCH64
instruction set.
+config USE_VECTORIZED_COPY
+ bool "Use vectorized instructions in copy_to/from user"
+ depends on KERNEL_MODE_NEON
+ default y
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
+config VECTORIZED_COPY_VALIDATE
+ bool "Validate result of vectorized copy using regular implementation"
+ depends on KERNEL_MODE_NEON
+ depends on USE_VECTORIZED_COPY
+ default n
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
menuconfig AARCH32_EL0
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 425616aa8422..331077d556ca 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -525,6 +525,8 @@ CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y
# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set
# CONFIG_ARM64_SW_TTBR0_PAN is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
+CONFIG_USE_VECTORIZED_COPY=y
+# CONFIG_VECTORIZED_COPY_VALIDATE is not set
CONFIG_AARCH32_EL0=y
# CONFIG_KUSER_HELPERS is not set
# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index b6c6949984d8..1fc9089b4a47 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -46,6 +46,21 @@
struct task_struct;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+extern void fpsimd_save_state_light(struct fpsimd_state *state);
+extern void fpsimd_load_state_light(struct fpsimd_state *state);
+#else
+static inline void fpsimd_save_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+static inline void fpsimd_load_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
extern void fpsimd_save_state(struct user_fpsimd_state *state);
extern void fpsimd_load_state(struct user_fpsimd_state *state);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index cdf6a35e3994..df9d3ed91931 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -8,6 +8,20 @@
#include <asm/assembler.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/* Lightweight fpsimd context saving/restoration.
+ * Necessary for vectorized kernel memory movement
+ * implementation
+ */
+.macro fpsimd_save_light state
+ st1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+
+.macro fpsimd_restore_light state
+ ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+#endif
+
.macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2]
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index d4b1d172a79b..ab84b194d7b3 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -16,4 +16,32 @@
void kernel_neon_begin(void);
void kernel_neon_end(void);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void);
+void kernel_fpsimd_end(void);
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state);
+void _kernel_fpsimd_load(struct fpsimd_state *state);
+#else
+bool kernel_fpsimd_begin(void)
+{
+ return false;
+}
+
+void kernel_fpsimd_end(void)
+{
+}
+
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 9e688b1b13d4..9b81dbcd2126 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -153,6 +153,10 @@ struct cpu_context {
unsigned long pc;
};
+struct fpsimd_state {
+ __uint128_t v[4];
+};
+
struct thread_struct {
struct cpu_context cpu_context; /* cpu context */
@@ -196,6 +200,12 @@ struct thread_struct {
KABI_RESERVE(6)
KABI_RESERVE(7)
KABI_RESERVE(8)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ KABI_EXTEND(
+ struct fpsimd_state ustate;
+ struct fpsimd_state kstate;
+ )
+#endif
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 379d24059f5b..60d0be8a2d58 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -89,6 +89,9 @@ void arch_setup_new_exec(void);
#define TIF_SME 27 /* SME in use */
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define TIF_32BIT_AARCH64 29 /* 32 bit process on AArch64(ILP32) */
+#define TIF_KERNEL_FPSIMD 31 /* Use FPSIMD in kernel */
+#define TIF_PRIV_UACC_ENABLED 32 /* Whether priviliged uaccess was manually enabled */
+
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -107,6 +110,8 @@ void arch_setup_new_exec(void);
#define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64)
+#define _TIF_KERNEL_FPSIMD (1 << TIF_KERNEL_FPSIMD)
+#define _TIF_PRIV_UACC_ENABLED (1 << TIF_PRIV_UACC_ENABLED)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index dd0877a75922..fc9f1a40624d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -26,6 +26,10 @@
#include <asm/memory.h>
#include <asm/extable.h>
+#ifndef __GENKSYMS__
+#include <asm/neon.h>
+#endif
+
static inline int __access_ok(const void __user *ptr, unsigned long size);
/*
@@ -134,7 +138,7 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN));
}
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_privileged(void)
{
mte_disable_tco();
@@ -144,7 +148,22 @@ static inline void uaccess_disable_privileged(void)
__uaccess_enable_hw_pan();
}
-static inline void uaccess_enable_privileged(void)
+static inline void uaccess_disable_privileged(void)
+{
+ preempt_disable();
+
+ if (!test_and_clear_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_disable_privileged();
+
+ preempt_enable();
+}
+
+static inline void __uaccess_enable_privileged(void)
{
mte_enable_tco();
@@ -154,6 +173,47 @@ static inline void uaccess_enable_privileged(void)
__uaccess_disable_hw_pan();
}
+static inline void uaccess_enable_privileged(void)
+{
+ preempt_disable();
+
+ if (test_and_set_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_enable_privileged();
+
+ preempt_enable();
+}
+
+static inline void uaccess_priviliged_context_switch(struct task_struct *next)
+{
+ bool curr_enabled = !!test_thread_flag(TIF_PRIV_UACC_ENABLED);
+ bool next_enabled = !!test_ti_thread_flag(&next->thread_info, TIF_PRIV_UACC_ENABLED);
+
+ if (curr_enabled == next_enabled)
+ return;
+
+ if (curr_enabled)
+ __uaccess_disable_privileged();
+ else
+ __uaccess_enable_privileged();
+}
+
+static inline void uaccess_priviliged_state_save(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_disable_privileged();
+}
+
+static inline void uaccess_priviliged_state_restore(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_enable_privileged();
+}
+
/*
* Sanitize a uaccess pointer such that it cannot reach any kernel address.
*
@@ -391,7 +451,97 @@ do { \
} while (0); \
} while(0)
-extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+#define USER_COPY_CHUNK_SIZE 4096
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_from_user_threshold;
+
+#define verify_fpsimd_copy(to, from, n, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FPSIMD:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FPSIMD:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ __verify_ret; \
+})
+
+#define compare_fpsimd_copy(to, from, n, ret_fpsimd, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FIXUP:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FIXUP:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ if (ret_fpsimd != ret) { \
+ pr_err("FIXUP:%s difference between FPSIMD %lu and regular %lu\n", __func__, n - ret_fpsimd, n - ret); \
+ __verify_ret |= 1; \
+ } else { \
+ __verify_ret = 0; \
+ } \
+ __verify_ret; \
+})
+
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_from_user_fpsimd(void *to, const void __user *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long __acfu_ret;
+
+ if (sysctl_copy_from_user_threshold == -1 || n < sysctl_copy_from_user_threshold) {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user(to,
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __acfu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __acfu_ret_fpsimd = __arch_copy_from_user_fpsimd((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_disable_privileged();
+
+ __acfu_ret = __acfu_ret_fpsimd;
+ kernel_fpsimd_end();
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret)) {
+
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret_fpsimd, __acfu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+
+ return __acfu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
#define raw_copy_from_user(to, from, n) \
({ \
unsigned long __acfu_ret; \
@@ -402,7 +552,66 @@ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __u
__acfu_ret; \
})
-extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+#endif
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_to_user_threshold;
+
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_to_user_fpsimd(void __user *to, const void *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long __actu_ret;
+
+
+ if (sysctl_copy_to_user_threshold == -1 || n < sysctl_copy_to_user_threshold) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __actu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __actu_ret_fpsimd = __arch_copy_to_user_fpsimd(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_disable_privileged();
+
+ kernel_fpsimd_end();
+ __actu_ret = __actu_ret_fpsimd;
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret)) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret_fpsimd, __actu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+ return __actu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
#define raw_copy_to_user(to, from, n) \
({ \
unsigned long __actu_ret; \
@@ -412,6 +621,7 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
uaccess_ttbr0_disable(); \
__actu_ret; \
})
+#endif
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 6325db1a2179..6660465f1b7c 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -11,6 +11,28 @@
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/*
+ * Save the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_save_state_light)
+ fpsimd_save_light x0
+ ret
+SYM_FUNC_END(fpsimd_save_state_light)
+
+/*
+ * Load the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_load_state_light)
+ fpsimd_restore_light x0
+ ret
+SYM_FUNC_END(fpsimd_load_state_light)
+#endif
+
/*
* Save the FP registers.
*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index b86a50646700..103559cccb07 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1579,6 +1579,11 @@ void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs)
current);
}
+#ifdef CONFIG_USE_VECTORIZED_COPY
+static void kernel_fpsimd_rollback_changes(void);
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk);
+#endif
+
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
@@ -1587,10 +1592,11 @@ void fpsimd_thread_switch(struct task_struct *next)
return;
__get_cpu_fpsimd_context();
-
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_rollback_changes();
+#endif
/* Save unsaved fpsimd state, if any: */
fpsimd_save();
-
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
* state. For kernel threads, FPSIMD registers are never loaded
@@ -1603,6 +1609,9 @@ void fpsimd_thread_switch(struct task_struct *next)
update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
wrong_task || wrong_cpu);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_restore_changes(next);
+#endif
__put_cpu_fpsimd_context();
}
@@ -1933,6 +1942,95 @@ void kernel_neon_end(void)
}
EXPORT_SYMBOL_GPL(kernel_neon_end);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void)
+{
+ if (WARN_ON(!system_capabilities_finalized()) ||
+ !system_supports_fpsimd() ||
+ in_irq() || irqs_disabled() || in_nmi())
+ return false;
+
+ preempt_disable();
+ if (test_and_set_thread_flag(TIF_KERNEL_FPSIMD)) {
+ preempt_enable();
+
+ WARN_ON(1);
+ return false;
+ }
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
+ fpsimd_save_state_light(¤t->thread.ustate);
+ preempt_enable();
+
+ return true;
+}
+EXPORT_SYMBOL(kernel_fpsimd_begin);
+
+void kernel_fpsimd_end(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ preempt_disable();
+ if (test_and_clear_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(¤t->thread.ustate);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(kernel_fpsimd_end);
+
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_save_state_light(state);
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(state);
+}
+
+static void kernel_fpsimd_rollback_changes(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(¤t->thread.kstate);
+ fpsimd_load_state_light(¤t->thread.ustate);
+ }
+}
+
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_ti_thread_flag(task_thread_info(tsk), TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(&tsk->thread.ustate);
+ fpsimd_load_state_light(&tsk->thread.kstate);
+ }
+}
+#endif
+
#ifdef CONFIG_EFI
static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index e9e5ce956f15..fd895189cb7e 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -529,7 +529,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
-
+ uaccess_priviliged_context_switch(next);
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 34e317907524..60dc63e10233 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -71,3 +71,33 @@ USER(9998f, ldtrb tmp1w, [srcin])
ret
SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
+
+
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ KERNEL_ME_SAFE(9998f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_from_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0 // Nothing to copy
+ ret
+
+ // Exception fixups
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+USER(9998f, ldtrb tmp1w, [srcin])
+ strb tmp1w, [dst], #1
+9998: sub x0, end, dst // bytes not copied
+ ret
+SYM_FUNC_END(__arch_copy_from_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_from_user_fpsimd)
+#endif
\ No newline at end of file
diff --git a/arch/arm64/lib/copy_template_fpsimd.S b/arch/arm64/lib/copy_template_fpsimd.S
new file mode 100644
index 000000000000..9b2e7ce1e4d2
--- /dev/null
+++ b/arch/arm64/lib/copy_template_fpsimd.S
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ */
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ * x0 - dest
+ * x1 - src
+ * x2 - n
+ * Returns:
+ * x0 - dest
+ */
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
+V_a .req v20
+V_b .req v21
+V_c .req v22
+V_d .req v23
+
+ mov dst, dstin
+ cmp count, #16
+ /*When memory length is less than 16, the accessed are not aligned.*/
+ b.lo .Ltiny15_fpsimd
+
+ neg tmp2, src
+ ands tmp2, tmp2, #15/* Bytes to reach alignment. */
+ b.eq .LSrcAligned_fpsimd
+ sub count, count, tmp2
+ /*
+ * Copy the leading memory data from src to dst in an increasing
+ * address order.By this way,the risk of overwriting the source
+ * memory data is eliminated when the distance between src and
+ * dst is less than 16. The memory accesses here are alignment.
+ */
+ tbz tmp2, #0, 1f
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+1:
+ tbz tmp2, #1, 2f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+2:
+ tbz tmp2, #2, 3f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+3:
+ tbz tmp2, #3, .LSrcAligned_fpsimd
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+
+.LSrcAligned_fpsimd:
+ cmp count, #64
+ b.ge .Lcpy_over64_fpsimd
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63_fpsimd:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltiny15_fpsimd
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+1:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+2:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+.Ltiny15_fpsimd:
+ /*
+ * Prefer to break one ldp/stp into several load/store to access
+ * memory in an increasing address order,rather than to load/store 16
+ * bytes from (src-16) to (dst-16) and to backward the src to aligned
+ * address,which way is used in original cortex memcpy. If keeping
+ * the original memcpy process here, memmove need to satisfy the
+ * precondition that src address is at least 16 bytes bigger than dst
+ * address,otherwise some source data will be overwritten when memove
+ * call memcpy directly. To make memmove simpler and decouple the
+ * memcpy's dependency on memmove, withdrew the original process.
+ */
+ tbz count, #3, 1f
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+1:
+ tbz count, #2, 2f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+2:
+ tbz count, #1, 3f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+3:
+ tbz count, #0, .Lexitfunc_fpsimd
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+
+ b .Lexitfunc_fpsimd
+
+.Lcpy_over64_fpsimd:
+ subs count, count, #128
+ b.ge .Lcpy_body_large_fpsimd
+ /*
+ * Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail.
+ */
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+ ldp1 B_l, B_h, src, #16
+ ldp1 C_l, C_h, src, #16
+ stp1 B_l, B_h, dst, #16
+ stp1 C_l, C_h, dst, #16
+ ldp1 D_l, D_h, src, #16
+ stp1 D_l, D_h, dst, #16
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+ b .Lexitfunc_fpsimd
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large_fpsimd:
+ /* pre-get 64 bytes data. */
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add src, src, #64
+
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add dst, dst, #64
+ add src, src, #64
+
+ subs count, count, #64
+ b.ge 1b
+
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ add dst, dst, #64
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+.Lexitfunc_fpsimd:
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 2ac716c0d6d8..c190e5f8a989 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -71,3 +71,33 @@ USER(9998f, sttrb tmp1w, [dst])
ret
SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
+
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ KERNEL_ME_SAFE(9998f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_to_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0
+ ret
+
+ // Exception fixups
+9997: cmp dst, dstin
+ b.ne 9998f
+ // Before being absolutely sure we couldn't copy anything, try harder
+KERNEL_ME_SAFE(9998f, ldrb tmp1w, [srcin])
+USER(9998f, sttrb tmp1w, [dst])
+ add dst, dst, #1
+9998: sub x0, end, dst // bytes not copied
+ ret
+SYM_FUNC_END(__arch_copy_to_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_to_user_fpsimd)
+#endif
diff --git a/kernel/softirq.c b/kernel/softirq.c
index bd10ff418865..9935a11be1e8 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -30,6 +30,10 @@
#include <asm/softirq_stack.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+#include <asm/fpsimd.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -542,6 +546,9 @@ static void handle_softirqs(bool ksirqd)
__u32 pending;
int softirq_bit;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
/*
* Mask out PF_MEMALLOC as the current task context is borrowed for the
* softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
@@ -551,10 +558,16 @@ static void handle_softirqs(bool ksirqd)
pending = local_softirq_pending();
+
softirq_handle_begin();
in_hardirq = lockdep_softirq_start();
account_softirq_enter(current);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
+
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
@@ -603,7 +616,14 @@ static void handle_softirqs(bool ksirqd)
account_softirq_exit(current);
lockdep_softirq_end(in_hardirq);
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
+
softirq_handle_end();
+
current_restore_flags(old_flags, PF_MEMALLOC);
}
@@ -837,12 +857,21 @@ static void tasklet_action_common(struct softirq_action *a,
{
struct tasklet_struct *list;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
+
local_irq_disable();
list = tl_head->head;
tl_head->head = NULL;
tl_head->tail = &tl_head->head;
local_irq_enable();
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
+
while (list) {
struct tasklet_struct *t = list;
@@ -874,6 +903,11 @@ static void tasklet_action_common(struct softirq_action *a,
__raise_softirq_irqoff(softirq_nr);
local_irq_enable();
}
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
}
static __latent_entropy void tasklet_action(struct softirq_action *a)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e84df0818517..6f8e22102bdc 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -137,6 +137,17 @@ int sysctl_legacy_va_layout;
#endif /* CONFIG_SYSCTL */
+#ifdef CONFIG_USE_VECTORIZED_COPY
+int sysctl_copy_to_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_to_user_threshold);
+
+int sysctl_copy_from_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_from_user_threshold);
+
+int sysctl_copy_in_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_in_user_threshold);
+#endif
+
/*
* /proc/sys support
*/
@@ -2250,6 +2261,29 @@ static struct ctl_table vm_table[] = {
.extra1 = (void *)&mmap_rnd_compat_bits_min,
.extra2 = (void *)&mmap_rnd_compat_bits_max,
},
+#endif
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ {
+ .procname = "copy_to_user_threshold",
+ .data = &sysctl_copy_to_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_from_user_threshold",
+ .data = &sysctl_copy_from_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_in_user_threshold",
+ .data = &sysctl_copy_in_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
#endif
{ }
};
--
2.34.1
2
1
28 Jan '26
From: Artem Kuzin <artem.kuzin(a)huawei.com>
kunpeng inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8445
-------------------------------------------------
1. This implementation uses st1/ld1 4-vector instructions which allow to copy 64 bytes at once
2. Copy code is used only if size of data block to copy is more than 128 bytes
4. To use this functionality you need to set configuration switch CONFIG_USE_VECTORIZED_COPY=y
5. Code can be used on any ARMv8 variant
6. In kernel copy functions like memcpy are not supported now, but can be enabled in future
7. For now we use lightweght version of register context saving/restoration (4-registers)
We introduce support of vectorization for copy_from/to/in_user functions. Nowadays it
works in parallel with original FPSIMD/SVE vectorization and doesn't affect it anyhow.
We have special flag in task struct - TIF_KERNEL_FPSIMD, that set if currently we use
lightweight vectorization in kernel. Task struct has been updated by two fields:
user space fpsimd state and kernel fpsimd state. User space fpsimd state used by
kernel_fpsimd_begin(), kernel_fpsimd_end() functions that wrap lightweight FPSIMD
contexts usage in kernel space. Kernel fpsimd state is used to manage threads switch.
Now there is no support of nested calls of kernel_neon_begin()/kernel_fpsimd_begin()
and there is no plans to support this in future. This is not necessary.
We save lightweight FPSIMD context in kernel_fpsimd_begin(), and restore it in
/kernel_fpsimd_end(). On thread switch we preserve kernel FPSIMD context and restore
user space one if any. This prevens curruption of user space FPSIMD state. Before
switching to the next thread we restore it's kernel FPSIMD context if any.
It is allowed to use FPSIMD in bottom halves, due to in case of BH preemption we check
TIF_KERNEL_FPSIMD flag and save/restore contexts.
Context management if quite lightweight and executed only in case of TIF_KERNEL_FPSIMD
flag is set.
To enable this feature, you need to manually modify one of the
appropriate entries:
/proc/sys/vm/copy_from_user_threshold
/proc/sys/vm/copy_in_user_threshold
/proc/sys/vm/copy_to_user_threshold
Allowed values are following:
-1 - feature enabled
0 - feature always enabled
n (n >0) - feature enabled, if copied size is greater than n KB.
P.S.:
What I am personally don't like in current approach:
1. Additional fields and flag in task struct look quite ugly
2. No way to configure the size of chunk to copy using FPSIMD from user space
3. FPSIMD-based memory movement is not generic, need to enable for memmove(),
memcpy() and friends in future.
Co-developed-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Signed-off-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Co-developed-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Artem Kuzin <artem.kuzin(a)huawei.com>
---
arch/arm64/Kconfig | 15 ++
arch/arm64/configs/openeuler_defconfig | 2 +
arch/arm64/include/asm/fpsimd.h | 15 ++
arch/arm64/include/asm/fpsimdmacros.h | 14 ++
arch/arm64/include/asm/neon.h | 28 +++
arch/arm64/include/asm/processor.h | 10 +
arch/arm64/include/asm/thread_info.h | 4 +
arch/arm64/include/asm/uaccess.h | 274 ++++++++++++++++++++++++-
arch/arm64/kernel/entry-fpsimd.S | 22 ++
arch/arm64/kernel/fpsimd.c | 102 ++++++++-
arch/arm64/kernel/process.c | 2 +-
arch/arm64/lib/copy_from_user.S | 18 ++
arch/arm64/lib/copy_in_user.S | 19 ++
arch/arm64/lib/copy_template_fpsimd.S | 180 ++++++++++++++++
arch/arm64/lib/copy_to_user.S | 19 ++
kernel/softirq.c | 31 ++-
kernel/sysctl.c | 35 ++++
17 files changed, 782 insertions(+), 8 deletions(-)
create mode 100644 arch/arm64/lib/copy_template_fpsimd.S
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index eb30ef59aca2..959af31f7e70 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1470,6 +1470,21 @@ config ARM64_ILP32
is an ABI where long and pointers are 32bits but it uses the AARCH64
instruction set.
+config USE_VECTORIZED_COPY
+ bool "Use vectorized instructions in copy_to/from user"
+ depends on KERNEL_MODE_NEON
+ default y
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
+config VECTORIZED_COPY_VALIDATE
+ bool "Validate result of vectorized copy using regular implementation"
+ depends on KERNEL_MODE_NEON
+ depends on USE_VECTORIZED_COPY
+ default n
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
menuconfig AARCH32_EL0
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index be1faf2da008..84408352a95e 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -484,6 +484,8 @@ CONFIG_ARM64_PMEM_RESERVE=y
CONFIG_ARM64_PMEM_LEGACY=m
# CONFIG_ARM64_SW_TTBR0_PAN is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
+CONFIG_USE_VECTORIZED_COPY=y
+# CONFIG_VECTORIZED_COPY_VALIDATE is not set
CONFIG_AARCH32_EL0=y
# CONFIG_KUSER_HELPERS is not set
CONFIG_ARMV8_DEPRECATED=y
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 22f6c6e23441..cb53767105ef 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -46,6 +46,21 @@
struct task_struct;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+extern void fpsimd_save_state_light(struct fpsimd_state *state);
+extern void fpsimd_load_state_light(struct fpsimd_state *state);
+#else
+static inline void fpsimd_save_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+static inline void fpsimd_load_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
extern void fpsimd_save_state(struct user_fpsimd_state *state);
extern void fpsimd_load_state(struct user_fpsimd_state *state);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index ea2577e159f6..62f5f8a0540a 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -8,6 +8,20 @@
#include <asm/assembler.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/* Lightweight fpsimd context saving/restoration.
+ * Necessary for vectorized kernel memory movement
+ * implementation
+ */
+.macro fpsimd_save_light state
+ st1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+
+.macro fpsimd_restore_light state
+ ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+#endif
+
.macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2]
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index d4b1d172a79b..ab84b194d7b3 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -16,4 +16,32 @@
void kernel_neon_begin(void);
void kernel_neon_end(void);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void);
+void kernel_fpsimd_end(void);
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state);
+void _kernel_fpsimd_load(struct fpsimd_state *state);
+#else
+bool kernel_fpsimd_begin(void)
+{
+ return false;
+}
+
+void kernel_fpsimd_end(void)
+{
+}
+
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 66186f3ab550..d6ca823f7f0f 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -137,6 +137,10 @@ struct cpu_context {
unsigned long pc;
};
+struct fpsimd_state {
+ __uint128_t v[4];
+};
+
struct thread_struct {
struct cpu_context cpu_context; /* cpu context */
@@ -174,6 +178,12 @@ struct thread_struct {
KABI_RESERVE(6)
KABI_RESERVE(7)
KABI_RESERVE(8)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ KABI_EXTEND(
+ struct fpsimd_state ustate;
+ struct fpsimd_state kstate;
+ )
+#endif
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 390d9612546b..2e395ebcc856 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -89,6 +89,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_PATCH_PENDING 28 /* pending live patching update */
#define TIF_SME 29 /* SME in use */
#define TIF_SME_VL_INHERIT 30 /* Inherit SME vl_onexec across exec */
+#define TIF_KERNEL_FPSIMD 31 /* Use FPSIMD in kernel */
+#define TIF_PRIV_UACC_ENABLED 32 /* Whether priviliged uaccess was manually enabled */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -108,6 +110,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_KERNEL_FPSIMD (1 << TIF_KERNEL_FPSIMD)
+#define _TIF_PRIV_UACC_ENABLED (1 << TIF_PRIV_UACC_ENABLED)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 03c2db710f92..4e4eec098cbc 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -24,6 +24,10 @@
#include <asm/memory.h>
#include <asm/extable.h>
+#ifndef __GENKSYMS__
+#include <asm/neon.h>
+#endif
+
#define HAVE_GET_KERNEL_NOFAULT
/*
@@ -174,7 +178,7 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN));
}
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_privileged(void)
{
if (uaccess_ttbr0_disable())
return;
@@ -182,7 +186,22 @@ static inline void uaccess_disable_privileged(void)
__uaccess_enable_hw_pan();
}
-static inline void uaccess_enable_privileged(void)
+static inline void uaccess_disable_privileged(void)
+{
+ preempt_disable();
+
+ if (!test_and_clear_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_disable_privileged();
+
+ preempt_enable();
+}
+
+static inline void __uaccess_enable_privileged(void)
{
if (uaccess_ttbr0_enable())
return;
@@ -190,6 +209,47 @@ static inline void uaccess_enable_privileged(void)
__uaccess_disable_hw_pan();
}
+static inline void uaccess_enable_privileged(void)
+{
+ preempt_disable();
+
+ if (test_and_set_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_enable_privileged();
+
+ preempt_enable();
+}
+
+static inline void uaccess_priviliged_context_switch(struct task_struct *next)
+{
+ bool curr_enabled = !!test_thread_flag(TIF_PRIV_UACC_ENABLED);
+ bool next_enabled = !!test_ti_thread_flag(&next->thread_info, TIF_PRIV_UACC_ENABLED);
+
+ if (curr_enabled == next_enabled)
+ return;
+
+ if (curr_enabled)
+ __uaccess_disable_privileged();
+ else
+ __uaccess_enable_privileged();
+}
+
+static inline void uaccess_priviliged_state_save(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_disable_privileged();
+}
+
+static inline void uaccess_priviliged_state_restore(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_enable_privileged();
+}
+
/*
* Sanitise a uaccess pointer such that it becomes NULL if above the maximum
* user address. In case the pointer is tagged (has the top byte set), untag
@@ -386,7 +446,97 @@ do { \
goto err_label; \
} while(0)
-extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+#define USER_COPY_CHUNK_SIZE 4096
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_from_user_threshold;
+
+#define verify_fpsimd_copy(to, from, n, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FPSIMD:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FPSIMD:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ __verify_ret; \
+})
+
+#define compare_fpsimd_copy(to, from, n, ret_fpsimd, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FIXUP:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FIXUP:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ if (ret_fpsimd != ret) { \
+ pr_err("FIXUP:%s difference between FPSIMD %lu and regular %lu\n", __func__, n - ret_fpsimd, n - ret); \
+ __verify_ret |= 1; \
+ } else { \
+ __verify_ret = 0; \
+ } \
+ __verify_ret; \
+})
+
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_from_user_fpsimd(void *to, const void __user *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long __acfu_ret;
+
+ if (sysctl_copy_from_user_threshold == -1 || n < sysctl_copy_from_user_threshold) {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user(to,
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __acfu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __acfu_ret_fpsimd = __arch_copy_from_user_fpsimd((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_disable_privileged();
+
+ __acfu_ret = __acfu_ret_fpsimd;
+ kernel_fpsimd_end();
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret)) {
+
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret_fpsimd, __acfu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+
+ return __acfu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
#define raw_copy_from_user(to, from, n) \
({ \
unsigned long __acfu_ret; \
@@ -397,7 +547,66 @@ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __u
__acfu_ret; \
})
-extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+#endif
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_to_user_threshold;
+
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_to_user_fpsimd(void __user *to, const void *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long __actu_ret;
+
+
+ if (sysctl_copy_to_user_threshold == -1 || n < sysctl_copy_to_user_threshold) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __actu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __actu_ret_fpsimd = __arch_copy_to_user_fpsimd(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_disable_privileged();
+
+ kernel_fpsimd_end();
+ __actu_ret = __actu_ret_fpsimd;
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret)) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret_fpsimd, __actu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+ return __actu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
#define raw_copy_to_user(to, from, n) \
({ \
unsigned long __actu_ret; \
@@ -407,7 +616,62 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
uaccess_ttbr0_disable(); \
__actu_ret; \
})
+#endif
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_in_user_threshold;
+
+extern unsigned long __must_check
+__arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_in_user_fpsimd(void __user *to, const void __user *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ unsigned long __aciu_ret;
+
+ if (sysctl_copy_in_user_threshold == -1 || n < sysctl_copy_in_user_threshold) {
+ uaccess_ttbr0_enable();
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __aciu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __aciu_ret_fpsimd = __arch_copy_in_user_fpsimd(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_disable_privileged();
+
+ kernel_fpsimd_end();
+ __aciu_ret = __aciu_ret_fpsimd;
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n,
+ __aciu_ret)) {
+ uaccess_ttbr0_enable();
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n,
+ __aciu_ret_fpsimd, __aciu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+ return __aciu_ret;
+}
+#else
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
#define raw_copy_in_user(to, from, n) \
({ \
@@ -419,6 +683,8 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
__aciu_ret; \
})
+#endif
+
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 8d12aaac7862..848ca6a351d7 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -11,6 +11,28 @@
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/*
+ * Save the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_save_state_light)
+ fpsimd_save_light x0
+ ret
+SYM_FUNC_END(fpsimd_save_state_light)
+
+/*
+ * Load the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_load_state_light)
+ fpsimd_restore_light x0
+ ret
+SYM_FUNC_END(fpsimd_load_state_light)
+#endif
+
/*
* Save the FP registers.
*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index c2489a72b0b9..1a08c19a181f 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1492,6 +1492,11 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
current);
}
+#ifdef CONFIG_USE_VECTORIZED_COPY
+static void kernel_fpsimd_rollback_changes(void);
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk);
+#endif
+
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
@@ -1500,10 +1505,11 @@ void fpsimd_thread_switch(struct task_struct *next)
return;
__get_cpu_fpsimd_context();
-
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_rollback_changes();
+#endif
/* Save unsaved fpsimd state, if any: */
fpsimd_save();
-
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
* state. For kernel threads, FPSIMD registers are never loaded
@@ -1516,6 +1522,9 @@ void fpsimd_thread_switch(struct task_struct *next)
update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
wrong_task || wrong_cpu);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_restore_changes(next);
+#endif
__put_cpu_fpsimd_context();
}
@@ -1835,6 +1844,95 @@ void kernel_neon_end(void)
}
EXPORT_SYMBOL(kernel_neon_end);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void)
+{
+ if (WARN_ON(!system_capabilities_finalized()) ||
+ !system_supports_fpsimd() ||
+ in_irq() || irqs_disabled() || in_nmi())
+ return false;
+
+ preempt_disable();
+ if (test_and_set_thread_flag(TIF_KERNEL_FPSIMD)) {
+ preempt_enable();
+
+ WARN_ON(1);
+ return false;
+ }
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
+ fpsimd_save_state_light(¤t->thread.ustate);
+ preempt_enable();
+
+ return true;
+}
+EXPORT_SYMBOL(kernel_fpsimd_begin);
+
+void kernel_fpsimd_end(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ preempt_disable();
+ if (test_and_clear_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(¤t->thread.ustate);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(kernel_fpsimd_end);
+
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_save_state_light(state);
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(state);
+}
+
+static void kernel_fpsimd_rollback_changes(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(¤t->thread.kstate);
+ fpsimd_load_state_light(¤t->thread.ustate);
+ }
+}
+
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_ti_thread_flag(task_thread_info(tsk), TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(&tsk->thread.ustate);
+ fpsimd_load_state_light(&tsk->thread.kstate);
+ }
+}
+#endif
+
#ifdef CONFIG_EFI
static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 14300c9e06d5..338d40725a5d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -572,7 +572,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
-
+ uaccess_priviliged_context_switch(next);
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index dfc33ce09e72..94290069d97d 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -63,6 +63,24 @@ SYM_FUNC_START(__arch_copy_from_user)
SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER_MC(9998f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_from_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0 // Nothing to copy
+ ret
+SYM_FUNC_END(__arch_copy_from_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_from_user_fpsimd)
+#endif
.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index dbea3799c3ef..cbc09c377050 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -64,6 +64,25 @@ SYM_FUNC_START(__arch_copy_in_user)
SYM_FUNC_END(__arch_copy_in_user)
EXPORT_SYMBOL(__arch_copy_in_user)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_in_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0
+ ret
+SYM_FUNC_END(__arch_copy_in_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_in_user_fpsimd)
+#endif
+
.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
diff --git a/arch/arm64/lib/copy_template_fpsimd.S b/arch/arm64/lib/copy_template_fpsimd.S
new file mode 100644
index 000000000000..9b2e7ce1e4d2
--- /dev/null
+++ b/arch/arm64/lib/copy_template_fpsimd.S
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ */
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ * x0 - dest
+ * x1 - src
+ * x2 - n
+ * Returns:
+ * x0 - dest
+ */
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
+V_a .req v20
+V_b .req v21
+V_c .req v22
+V_d .req v23
+
+ mov dst, dstin
+ cmp count, #16
+ /*When memory length is less than 16, the accessed are not aligned.*/
+ b.lo .Ltiny15_fpsimd
+
+ neg tmp2, src
+ ands tmp2, tmp2, #15/* Bytes to reach alignment. */
+ b.eq .LSrcAligned_fpsimd
+ sub count, count, tmp2
+ /*
+ * Copy the leading memory data from src to dst in an increasing
+ * address order.By this way,the risk of overwriting the source
+ * memory data is eliminated when the distance between src and
+ * dst is less than 16. The memory accesses here are alignment.
+ */
+ tbz tmp2, #0, 1f
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+1:
+ tbz tmp2, #1, 2f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+2:
+ tbz tmp2, #2, 3f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+3:
+ tbz tmp2, #3, .LSrcAligned_fpsimd
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+
+.LSrcAligned_fpsimd:
+ cmp count, #64
+ b.ge .Lcpy_over64_fpsimd
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63_fpsimd:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltiny15_fpsimd
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+1:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+2:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+.Ltiny15_fpsimd:
+ /*
+ * Prefer to break one ldp/stp into several load/store to access
+ * memory in an increasing address order,rather than to load/store 16
+ * bytes from (src-16) to (dst-16) and to backward the src to aligned
+ * address,which way is used in original cortex memcpy. If keeping
+ * the original memcpy process here, memmove need to satisfy the
+ * precondition that src address is at least 16 bytes bigger than dst
+ * address,otherwise some source data will be overwritten when memove
+ * call memcpy directly. To make memmove simpler and decouple the
+ * memcpy's dependency on memmove, withdrew the original process.
+ */
+ tbz count, #3, 1f
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+1:
+ tbz count, #2, 2f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+2:
+ tbz count, #1, 3f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+3:
+ tbz count, #0, .Lexitfunc_fpsimd
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+
+ b .Lexitfunc_fpsimd
+
+.Lcpy_over64_fpsimd:
+ subs count, count, #128
+ b.ge .Lcpy_body_large_fpsimd
+ /*
+ * Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail.
+ */
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+ ldp1 B_l, B_h, src, #16
+ ldp1 C_l, C_h, src, #16
+ stp1 B_l, B_h, dst, #16
+ stp1 C_l, C_h, dst, #16
+ ldp1 D_l, D_h, src, #16
+ stp1 D_l, D_h, dst, #16
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+ b .Lexitfunc_fpsimd
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large_fpsimd:
+ /* pre-get 64 bytes data. */
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add src, src, #64
+
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add dst, dst, #64
+ add src, src, #64
+
+ subs count, count, #64
+ b.ge 1b
+
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ add dst, dst, #64
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+.Lexitfunc_fpsimd:
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 34154e7c8577..d0211fce4923 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -62,6 +62,25 @@ SYM_FUNC_START(__arch_copy_to_user)
SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER_MC(9998f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_to_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0
+ ret
+SYM_FUNC_END(__arch_copy_to_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_to_user_fpsimd)
+#endif
+
.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 9fc69e6e2c11..e3f73422829d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -26,6 +26,10 @@
#include <linux/tick.h>
#include <linux/irq.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+#include <asm/fpsimd.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -262,6 +266,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
__u32 pending;
int softirq_bit;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
/*
* Mask out PF_MEMALLOC as the current task context is borrowed for the
* softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
@@ -273,8 +280,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
account_irq_enter_time(current);
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
in_hardirq = lockdep_softirq_start();
-
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
@@ -322,6 +332,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
current_restore_flags(old_flags, PF_MEMALLOC);
@@ -612,12 +627,21 @@ static void tasklet_action_common(struct softirq_action *a,
{
struct tasklet_struct *list;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
+
local_irq_disable();
list = tl_head->head;
tl_head->head = NULL;
tl_head->tail = &tl_head->head;
local_irq_enable();
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
+
while (list) {
struct tasklet_struct *t = list;
@@ -645,6 +669,11 @@ static void tasklet_action_common(struct softirq_action *a,
__raise_softirq_irqoff(softirq_nr);
local_irq_enable();
}
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
}
static __latent_entropy void tasklet_action(struct softirq_action *a)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0b1c13a05332..9ec07294429b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -210,6 +210,17 @@ static int max_extfrag_threshold = 1000;
#endif /* CONFIG_SYSCTL */
+#ifdef CONFIG_USE_VECTORIZED_COPY
+int sysctl_copy_to_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_to_user_threshold);
+
+int sysctl_copy_from_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_from_user_threshold);
+
+int sysctl_copy_in_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_in_user_threshold);
+#endif
+
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
static int bpf_stats_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
@@ -3385,6 +3396,30 @@ static struct ctl_table vm_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ {
+ .procname = "copy_to_user_threshold",
+ .data = &sysctl_copy_to_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_from_user_threshold",
+ .data = &sysctl_copy_from_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_in_user_threshold",
+ .data = &sysctl_copy_in_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+#endif
{ }
};
--
2.34.1
2
1
28 Jan '26
From: Artem Kuzin <artem.kuzin(a)huawei.com>
kunpeng inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8445
-------------------------------------------------
1. This implementation uses st1/ld1 4-vector instructions which allow to copy 64 bytes at once
2. Copy code is used only if size of data block to copy is more than 128 bytes
4. To use this functionality you need to set configuration switch CONFIG_USE_VECTORIZED_COPY=y
5. Code can be used on any ARMv8 variant
6. In kernel copy functions like memcpy are not supported now, but can be enabled in future
7. For now we use lightweght version of register context saving/restoration (4-registers)
We introduce support of vectorization for copy_from/to/in_user functions. Nowadays it
works in parallel with original FPSIMD/SVE vectorization and doesn't affect it anyhow.
We have special flag in task struct - TIF_KERNEL_FPSIMD, that set if currently we use
lightweight vectorization in kernel. Task struct has been updated by two fields:
user space fpsimd state and kernel fpsimd state. User space fpsimd state used by
kernel_fpsimd_begin(), kernel_fpsimd_end() functions that wrap lightweight FPSIMD
contexts usage in kernel space. Kernel fpsimd state is used to manage threads switch.
Now there is no support of nested calls of kernel_neon_begin()/kernel_fpsimd_begin()
and there is no plans to support this in future. This is not necessary.
We save lightweight FPSIMD context in kernel_fpsimd_begin(), and restore it in
/kernel_fpsimd_end(). On thread switch we preserve kernel FPSIMD context and restore
user space one if any. This prevens curruption of user space FPSIMD state. Before
switching to the next thread we restore it's kernel FPSIMD context if any.
It is allowed to use FPSIMD in bottom halves, due to in case of BH preemption we check
TIF_KERNEL_FPSIMD flag and save/restore contexts.
Context management if quite lightweight and executed only in case of TIF_KERNEL_FPSIMD
flag is set.
To enable this feature, you need to manually modify one of the
appropriate entries:
/proc/sys/vm/copy_from_user_threshold
/proc/sys/vm/copy_in_user_threshold
/proc/sys/vm/copy_to_user_threshold
Allowed values are following:
-1 - feature enabled
0 - feature always enabled
n (n >0) - feature enabled, if copied size is greater than n KB.
P.S.:
What I am personally don't like in current approach:
1. Additional fields and flag in task struct look quite ugly
2. No way to configure the size of chunk to copy using FPSIMD from user space
3. FPSIMD-based memory movement is not generic, need to enable for memmove(),
memcpy() and friends in future.
Co-developed-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Signed-off-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Co-developed-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Artem Kuzin <artem.kuzin(a)huawei.com>
---
arch/arm64/Kconfig | 15 ++
arch/arm64/include/asm/fpsimd.h | 15 ++
arch/arm64/include/asm/fpsimdmacros.h | 14 ++
arch/arm64/include/asm/neon.h | 28 +++
arch/arm64/include/asm/processor.h | 10 +
arch/arm64/include/asm/thread_info.h | 4 +
arch/arm64/include/asm/uaccess.h | 274 +++++++++++++++++++++++++-
arch/arm64/kernel/entry-fpsimd.S | 22 +++
arch/arm64/kernel/fpsimd.c | 102 +++++++++-
arch/arm64/kernel/process.c | 2 +-
arch/arm64/lib/copy_from_user.S | 18 ++
arch/arm64/lib/copy_in_user.S | 19 ++
arch/arm64/lib/copy_template_fpsimd.S | 180 +++++++++++++++++
arch/arm64/lib/copy_to_user.S | 19 ++
kernel/softirq.c | 31 ++-
kernel/sysctl.c | 35 ++++
16 files changed, 780 insertions(+), 8 deletions(-)
create mode 100644 arch/arm64/lib/copy_template_fpsimd.S
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index eb30ef59aca2..959af31f7e70 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1470,6 +1470,21 @@ config ARM64_ILP32
is an ABI where long and pointers are 32bits but it uses the AARCH64
instruction set.
+config USE_VECTORIZED_COPY
+ bool "Use vectorized instructions in copy_to/from user"
+ depends on KERNEL_MODE_NEON
+ default y
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
+config VECTORIZED_COPY_VALIDATE
+ bool "Validate result of vectorized copy using regular implementation"
+ depends on KERNEL_MODE_NEON
+ depends on USE_VECTORIZED_COPY
+ default n
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
menuconfig AARCH32_EL0
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 22f6c6e23441..cb53767105ef 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -46,6 +46,21 @@
struct task_struct;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+extern void fpsimd_save_state_light(struct fpsimd_state *state);
+extern void fpsimd_load_state_light(struct fpsimd_state *state);
+#else
+static inline void fpsimd_save_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+static inline void fpsimd_load_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
extern void fpsimd_save_state(struct user_fpsimd_state *state);
extern void fpsimd_load_state(struct user_fpsimd_state *state);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index ea2577e159f6..62f5f8a0540a 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -8,6 +8,20 @@
#include <asm/assembler.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/* Lightweight fpsimd context saving/restoration.
+ * Necessary for vectorized kernel memory movement
+ * implementation
+ */
+.macro fpsimd_save_light state
+ st1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+
+.macro fpsimd_restore_light state
+ ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+#endif
+
.macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2]
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index d4b1d172a79b..ab84b194d7b3 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -16,4 +16,32 @@
void kernel_neon_begin(void);
void kernel_neon_end(void);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void);
+void kernel_fpsimd_end(void);
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state);
+void _kernel_fpsimd_load(struct fpsimd_state *state);
+#else
+bool kernel_fpsimd_begin(void)
+{
+ return false;
+}
+
+void kernel_fpsimd_end(void)
+{
+}
+
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 66186f3ab550..d6ca823f7f0f 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -137,6 +137,10 @@ struct cpu_context {
unsigned long pc;
};
+struct fpsimd_state {
+ __uint128_t v[4];
+};
+
struct thread_struct {
struct cpu_context cpu_context; /* cpu context */
@@ -174,6 +178,12 @@ struct thread_struct {
KABI_RESERVE(6)
KABI_RESERVE(7)
KABI_RESERVE(8)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ KABI_EXTEND(
+ struct fpsimd_state ustate;
+ struct fpsimd_state kstate;
+ )
+#endif
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 390d9612546b..2e395ebcc856 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -89,6 +89,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_PATCH_PENDING 28 /* pending live patching update */
#define TIF_SME 29 /* SME in use */
#define TIF_SME_VL_INHERIT 30 /* Inherit SME vl_onexec across exec */
+#define TIF_KERNEL_FPSIMD 31 /* Use FPSIMD in kernel */
+#define TIF_PRIV_UACC_ENABLED 32 /* Whether priviliged uaccess was manually enabled */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -108,6 +110,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_KERNEL_FPSIMD (1 << TIF_KERNEL_FPSIMD)
+#define _TIF_PRIV_UACC_ENABLED (1 << TIF_PRIV_UACC_ENABLED)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 03c2db710f92..4e4eec098cbc 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -24,6 +24,10 @@
#include <asm/memory.h>
#include <asm/extable.h>
+#ifndef __GENKSYMS__
+#include <asm/neon.h>
+#endif
+
#define HAVE_GET_KERNEL_NOFAULT
/*
@@ -174,7 +178,7 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN));
}
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_privileged(void)
{
if (uaccess_ttbr0_disable())
return;
@@ -182,7 +186,22 @@ static inline void uaccess_disable_privileged(void)
__uaccess_enable_hw_pan();
}
-static inline void uaccess_enable_privileged(void)
+static inline void uaccess_disable_privileged(void)
+{
+ preempt_disable();
+
+ if (!test_and_clear_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_disable_privileged();
+
+ preempt_enable();
+}
+
+static inline void __uaccess_enable_privileged(void)
{
if (uaccess_ttbr0_enable())
return;
@@ -190,6 +209,47 @@ static inline void uaccess_enable_privileged(void)
__uaccess_disable_hw_pan();
}
+static inline void uaccess_enable_privileged(void)
+{
+ preempt_disable();
+
+ if (test_and_set_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_enable_privileged();
+
+ preempt_enable();
+}
+
+static inline void uaccess_priviliged_context_switch(struct task_struct *next)
+{
+ bool curr_enabled = !!test_thread_flag(TIF_PRIV_UACC_ENABLED);
+ bool next_enabled = !!test_ti_thread_flag(&next->thread_info, TIF_PRIV_UACC_ENABLED);
+
+ if (curr_enabled == next_enabled)
+ return;
+
+ if (curr_enabled)
+ __uaccess_disable_privileged();
+ else
+ __uaccess_enable_privileged();
+}
+
+static inline void uaccess_priviliged_state_save(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_disable_privileged();
+}
+
+static inline void uaccess_priviliged_state_restore(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_enable_privileged();
+}
+
/*
* Sanitise a uaccess pointer such that it becomes NULL if above the maximum
* user address. In case the pointer is tagged (has the top byte set), untag
@@ -386,7 +446,97 @@ do { \
goto err_label; \
} while(0)
-extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+#define USER_COPY_CHUNK_SIZE 4096
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_from_user_threshold;
+
+#define verify_fpsimd_copy(to, from, n, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FPSIMD:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FPSIMD:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ __verify_ret; \
+})
+
+#define compare_fpsimd_copy(to, from, n, ret_fpsimd, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FIXUP:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FIXUP:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ if (ret_fpsimd != ret) { \
+ pr_err("FIXUP:%s difference between FPSIMD %lu and regular %lu\n", __func__, n - ret_fpsimd, n - ret); \
+ __verify_ret |= 1; \
+ } else { \
+ __verify_ret = 0; \
+ } \
+ __verify_ret; \
+})
+
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_from_user_fpsimd(void *to, const void __user *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long __acfu_ret;
+
+ if (sysctl_copy_from_user_threshold == -1 || n < sysctl_copy_from_user_threshold) {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user(to,
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __acfu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __acfu_ret_fpsimd = __arch_copy_from_user_fpsimd((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_disable_privileged();
+
+ __acfu_ret = __acfu_ret_fpsimd;
+ kernel_fpsimd_end();
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret)) {
+
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret_fpsimd, __acfu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+
+ return __acfu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
#define raw_copy_from_user(to, from, n) \
({ \
unsigned long __acfu_ret; \
@@ -397,7 +547,66 @@ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __u
__acfu_ret; \
})
-extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+#endif
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_to_user_threshold;
+
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_to_user_fpsimd(void __user *to, const void *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long __actu_ret;
+
+
+ if (sysctl_copy_to_user_threshold == -1 || n < sysctl_copy_to_user_threshold) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __actu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __actu_ret_fpsimd = __arch_copy_to_user_fpsimd(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_disable_privileged();
+
+ kernel_fpsimd_end();
+ __actu_ret = __actu_ret_fpsimd;
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret)) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret_fpsimd, __actu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+ return __actu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
#define raw_copy_to_user(to, from, n) \
({ \
unsigned long __actu_ret; \
@@ -407,7 +616,62 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
uaccess_ttbr0_disable(); \
__actu_ret; \
})
+#endif
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_in_user_threshold;
+
+extern unsigned long __must_check
+__arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_in_user_fpsimd(void __user *to, const void __user *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ unsigned long __aciu_ret;
+
+ if (sysctl_copy_in_user_threshold == -1 || n < sysctl_copy_in_user_threshold) {
+ uaccess_ttbr0_enable();
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __aciu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __aciu_ret_fpsimd = __arch_copy_in_user_fpsimd(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_disable_privileged();
+
+ kernel_fpsimd_end();
+ __aciu_ret = __aciu_ret_fpsimd;
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n,
+ __aciu_ret)) {
+ uaccess_ttbr0_enable();
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n,
+ __aciu_ret_fpsimd, __aciu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+ return __aciu_ret;
+}
+#else
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
#define raw_copy_in_user(to, from, n) \
({ \
@@ -419,6 +683,8 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
__aciu_ret; \
})
+#endif
+
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 8d12aaac7862..848ca6a351d7 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -11,6 +11,28 @@
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/*
+ * Save the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_save_state_light)
+ fpsimd_save_light x0
+ ret
+SYM_FUNC_END(fpsimd_save_state_light)
+
+/*
+ * Load the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_load_state_light)
+ fpsimd_restore_light x0
+ ret
+SYM_FUNC_END(fpsimd_load_state_light)
+#endif
+
/*
* Save the FP registers.
*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index c2489a72b0b9..1a08c19a181f 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1492,6 +1492,11 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
current);
}
+#ifdef CONFIG_USE_VECTORIZED_COPY
+static void kernel_fpsimd_rollback_changes(void);
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk);
+#endif
+
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
@@ -1500,10 +1505,11 @@ void fpsimd_thread_switch(struct task_struct *next)
return;
__get_cpu_fpsimd_context();
-
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_rollback_changes();
+#endif
/* Save unsaved fpsimd state, if any: */
fpsimd_save();
-
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
* state. For kernel threads, FPSIMD registers are never loaded
@@ -1516,6 +1522,9 @@ void fpsimd_thread_switch(struct task_struct *next)
update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
wrong_task || wrong_cpu);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_restore_changes(next);
+#endif
__put_cpu_fpsimd_context();
}
@@ -1835,6 +1844,95 @@ void kernel_neon_end(void)
}
EXPORT_SYMBOL(kernel_neon_end);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void)
+{
+ if (WARN_ON(!system_capabilities_finalized()) ||
+ !system_supports_fpsimd() ||
+ in_irq() || irqs_disabled() || in_nmi())
+ return false;
+
+ preempt_disable();
+ if (test_and_set_thread_flag(TIF_KERNEL_FPSIMD)) {
+ preempt_enable();
+
+ WARN_ON(1);
+ return false;
+ }
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
+ fpsimd_save_state_light(¤t->thread.ustate);
+ preempt_enable();
+
+ return true;
+}
+EXPORT_SYMBOL(kernel_fpsimd_begin);
+
+void kernel_fpsimd_end(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ preempt_disable();
+ if (test_and_clear_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(¤t->thread.ustate);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(kernel_fpsimd_end);
+
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_save_state_light(state);
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(state);
+}
+
+static void kernel_fpsimd_rollback_changes(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(¤t->thread.kstate);
+ fpsimd_load_state_light(¤t->thread.ustate);
+ }
+}
+
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_ti_thread_flag(task_thread_info(tsk), TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(&tsk->thread.ustate);
+ fpsimd_load_state_light(&tsk->thread.kstate);
+ }
+}
+#endif
+
#ifdef CONFIG_EFI
static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 14300c9e06d5..338d40725a5d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -572,7 +572,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
-
+ uaccess_priviliged_context_switch(next);
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index dfc33ce09e72..94290069d97d 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -63,6 +63,24 @@ SYM_FUNC_START(__arch_copy_from_user)
SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER_MC(9998f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_from_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0 // Nothing to copy
+ ret
+SYM_FUNC_END(__arch_copy_from_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_from_user_fpsimd)
+#endif
.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index dbea3799c3ef..cbc09c377050 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -64,6 +64,25 @@ SYM_FUNC_START(__arch_copy_in_user)
SYM_FUNC_END(__arch_copy_in_user)
EXPORT_SYMBOL(__arch_copy_in_user)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_in_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0
+ ret
+SYM_FUNC_END(__arch_copy_in_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_in_user_fpsimd)
+#endif
+
.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
diff --git a/arch/arm64/lib/copy_template_fpsimd.S b/arch/arm64/lib/copy_template_fpsimd.S
new file mode 100644
index 000000000000..9b2e7ce1e4d2
--- /dev/null
+++ b/arch/arm64/lib/copy_template_fpsimd.S
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ */
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ * x0 - dest
+ * x1 - src
+ * x2 - n
+ * Returns:
+ * x0 - dest
+ */
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
+V_a .req v20
+V_b .req v21
+V_c .req v22
+V_d .req v23
+
+ mov dst, dstin
+ cmp count, #16
+ /*When memory length is less than 16, the accessed are not aligned.*/
+ b.lo .Ltiny15_fpsimd
+
+ neg tmp2, src
+ ands tmp2, tmp2, #15/* Bytes to reach alignment. */
+ b.eq .LSrcAligned_fpsimd
+ sub count, count, tmp2
+ /*
+ * Copy the leading memory data from src to dst in an increasing
+ * address order.By this way,the risk of overwriting the source
+ * memory data is eliminated when the distance between src and
+ * dst is less than 16. The memory accesses here are alignment.
+ */
+ tbz tmp2, #0, 1f
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+1:
+ tbz tmp2, #1, 2f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+2:
+ tbz tmp2, #2, 3f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+3:
+ tbz tmp2, #3, .LSrcAligned_fpsimd
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+
+.LSrcAligned_fpsimd:
+ cmp count, #64
+ b.ge .Lcpy_over64_fpsimd
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63_fpsimd:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltiny15_fpsimd
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+1:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+2:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+.Ltiny15_fpsimd:
+ /*
+ * Prefer to break one ldp/stp into several load/store to access
+ * memory in an increasing address order,rather than to load/store 16
+ * bytes from (src-16) to (dst-16) and to backward the src to aligned
+ * address,which way is used in original cortex memcpy. If keeping
+ * the original memcpy process here, memmove need to satisfy the
+ * precondition that src address is at least 16 bytes bigger than dst
+ * address,otherwise some source data will be overwritten when memove
+ * call memcpy directly. To make memmove simpler and decouple the
+ * memcpy's dependency on memmove, withdrew the original process.
+ */
+ tbz count, #3, 1f
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+1:
+ tbz count, #2, 2f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+2:
+ tbz count, #1, 3f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+3:
+ tbz count, #0, .Lexitfunc_fpsimd
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+
+ b .Lexitfunc_fpsimd
+
+.Lcpy_over64_fpsimd:
+ subs count, count, #128
+ b.ge .Lcpy_body_large_fpsimd
+ /*
+ * Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail.
+ */
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+ ldp1 B_l, B_h, src, #16
+ ldp1 C_l, C_h, src, #16
+ stp1 B_l, B_h, dst, #16
+ stp1 C_l, C_h, dst, #16
+ ldp1 D_l, D_h, src, #16
+ stp1 D_l, D_h, dst, #16
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+ b .Lexitfunc_fpsimd
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large_fpsimd:
+ /* pre-get 64 bytes data. */
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add src, src, #64
+
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add dst, dst, #64
+ add src, src, #64
+
+ subs count, count, #64
+ b.ge 1b
+
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ add dst, dst, #64
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+.Lexitfunc_fpsimd:
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 34154e7c8577..d0211fce4923 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -62,6 +62,25 @@ SYM_FUNC_START(__arch_copy_to_user)
SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER_MC(9998f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_to_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0
+ ret
+SYM_FUNC_END(__arch_copy_to_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_to_user_fpsimd)
+#endif
+
.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 9fc69e6e2c11..e3f73422829d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -26,6 +26,10 @@
#include <linux/tick.h>
#include <linux/irq.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+#include <asm/fpsimd.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -262,6 +266,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
__u32 pending;
int softirq_bit;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
/*
* Mask out PF_MEMALLOC as the current task context is borrowed for the
* softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
@@ -273,8 +280,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
account_irq_enter_time(current);
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
in_hardirq = lockdep_softirq_start();
-
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
@@ -322,6 +332,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
current_restore_flags(old_flags, PF_MEMALLOC);
@@ -612,12 +627,21 @@ static void tasklet_action_common(struct softirq_action *a,
{
struct tasklet_struct *list;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
+
local_irq_disable();
list = tl_head->head;
tl_head->head = NULL;
tl_head->tail = &tl_head->head;
local_irq_enable();
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
+
while (list) {
struct tasklet_struct *t = list;
@@ -645,6 +669,11 @@ static void tasklet_action_common(struct softirq_action *a,
__raise_softirq_irqoff(softirq_nr);
local_irq_enable();
}
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
}
static __latent_entropy void tasklet_action(struct softirq_action *a)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0b1c13a05332..9ec07294429b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -210,6 +210,17 @@ static int max_extfrag_threshold = 1000;
#endif /* CONFIG_SYSCTL */
+#ifdef CONFIG_USE_VECTORIZED_COPY
+int sysctl_copy_to_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_to_user_threshold);
+
+int sysctl_copy_from_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_from_user_threshold);
+
+int sysctl_copy_in_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_in_user_threshold);
+#endif
+
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
static int bpf_stats_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
@@ -3385,6 +3396,30 @@ static struct ctl_table vm_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ {
+ .procname = "copy_to_user_threshold",
+ .data = &sysctl_copy_to_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_from_user_threshold",
+ .data = &sysctl_copy_from_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_in_user_threshold",
+ .data = &sysctl_copy_in_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+#endif
{ }
};
--
2.34.1
2
1
28 Jan '26
From: Artem Kuzin <artem.kuzin(a)huawei.com>
kunpeng inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8445
-------------------------------------------------
1. This implementation uses st1/ld1 4-vector instructions which allow to copy 64 bytes at once
2. Copy code is used only if size of data block to copy is more than 128 bytes
4. To use this functionality you need to set configuration switch CONFIG_USE_VECTORIZED_COPY=y
5. Code can be used on any ARMv8 variant
6. In kernel copy functions like memcpy are not supported now, but can be enabled in future
7. For now we use lightweght version of register context saving/restoration (4-registers)
We introduce support of vectorization for copy_from/to/in_user functions. Nowadays it
works in parallel with original FPSIMD/SVE vectorization and doesn't affect it anyhow.
We have special flag in task struct - TIF_KERNEL_FPSIMD, that set if currently we use
lightweight vectorization in kernel. Task struct has been updated by two fields:
user space fpsimd state and kernel fpsimd state. User space fpsimd state used by
kernel_fpsimd_begin(), kernel_fpsimd_end() functions that wrap lightweight FPSIMD
contexts usage in kernel space. Kernel fpsimd state is used to manage threads switch.
Now there is no support of nested calls of kernel_neon_begin()/kernel_fpsimd_begin()
and there is no plans to support this in future. This is not necessary.
We save lightweight FPSIMD context in kernel_fpsimd_begin(), and restore it in
/kernel_fpsimd_end(). On thread switch we preserve kernel FPSIMD context and restore
user space one if any. This prevens curruption of user space FPSIMD state. Before
switching to the next thread we restore it's kernel FPSIMD context if any.
It is allowed to use FPSIMD in bottom halves, due to in case of BH preemption we check
TIF_KERNEL_FPSIMD flag and save/restore contexts.
Context management if quite lightweight and executed only in case of TIF_KERNEL_FPSIMD
flag is set.
To enable this feature, you need to manually modify one of the
appropriate entries:
/proc/sys/vm/copy_from_user_threshold
/proc/sys/vm/copy_in_user_threshold
/proc/sys/vm/copy_to_user_threshold
Allowed values are following:
-1 - feature enabled
0 - feature always enabled
n (n >0) - feature enabled, if copied size is greater than n KB.
P.S.:
What I am personally don't like in current approach:
1. Additional fields and flag in task struct look quite ugly
2. No way to configure the size of chunk to copy using FPSIMD from user space
3. FPSIMD-based memory movement is not generic, need to enable for memmove(),
memcpy() and friends in future.
Co-developed-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Signed-off-by: Alexander Kozhevnikov <alexander.kozhevnikov(a)huawei-partners.com>
Co-developed-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Nikita Panov <panov.nikita(a)huawei.com>
Signed-off-by: Artem Kuzin <artem.kuzin(a)huawei.com>
---
arch/arm64/Kconfig | 15 ++
arch/arm64/include/asm/fpsimd.h | 15 ++
arch/arm64/include/asm/fpsimdmacros.h | 14 ++
arch/arm64/include/asm/neon.h | 28 +++
arch/arm64/include/asm/processor.h | 7 +
arch/arm64/include/asm/thread_info.h | 4 +
arch/arm64/include/asm/uaccess.h | 271 +++++++++++++++++++++++++-
arch/arm64/kernel/entry-fpsimd.S | 22 +++
arch/arm64/kernel/fpsimd.c | 102 +++++++++-
arch/arm64/kernel/process.c | 2 +-
arch/arm64/lib/copy_from_user.S | 18 ++
arch/arm64/lib/copy_in_user.S | 19 ++
arch/arm64/lib/copy_template_fpsimd.S | 180 +++++++++++++++++
arch/arm64/lib/copy_to_user.S | 19 ++
kernel/softirq.c | 31 ++-
kernel/sysctl.c | 35 ++++
16 files changed, 774 insertions(+), 8 deletions(-)
create mode 100644 arch/arm64/lib/copy_template_fpsimd.S
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index eb30ef59aca2..959af31f7e70 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1470,6 +1470,21 @@ config ARM64_ILP32
is an ABI where long and pointers are 32bits but it uses the AARCH64
instruction set.
+config USE_VECTORIZED_COPY
+ bool "Use vectorized instructions in copy_to/from user"
+ depends on KERNEL_MODE_NEON
+ default y
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
+config VECTORIZED_COPY_VALIDATE
+ bool "Validate result of vectorized copy using regular implementation"
+ depends on KERNEL_MODE_NEON
+ depends on USE_VECTORIZED_COPY
+ default n
+ help
+ This option turns on vectorization to speed up copy_to/from_user routines.
+
menuconfig AARCH32_EL0
bool "Kernel support for 32-bit EL0"
depends on ARM64_4K_PAGES || EXPERT
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 22f6c6e23441..cb53767105ef 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -46,6 +46,21 @@
struct task_struct;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+extern void fpsimd_save_state_light(struct fpsimd_state *state);
+extern void fpsimd_load_state_light(struct fpsimd_state *state);
+#else
+static inline void fpsimd_save_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+static inline void fpsimd_load_state_light(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
extern void fpsimd_save_state(struct user_fpsimd_state *state);
extern void fpsimd_load_state(struct user_fpsimd_state *state);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index ea2577e159f6..62f5f8a0540a 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -8,6 +8,20 @@
#include <asm/assembler.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/* Lightweight fpsimd context saving/restoration.
+ * Necessary for vectorized kernel memory movement
+ * implementation
+ */
+.macro fpsimd_save_light state
+ st1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+
+.macro fpsimd_restore_light state
+ ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [\state]
+.endm
+#endif
+
.macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2]
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index d4b1d172a79b..ab84b194d7b3 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -16,4 +16,32 @@
void kernel_neon_begin(void);
void kernel_neon_end(void);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void);
+void kernel_fpsimd_end(void);
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state);
+void _kernel_fpsimd_load(struct fpsimd_state *state);
+#else
+bool kernel_fpsimd_begin(void)
+{
+ return false;
+}
+
+void kernel_fpsimd_end(void)
+{
+}
+
+/* Functions to use in non-preemptible context */
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ (void) state;
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ (void) state;
+}
+#endif
+
#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 66186f3ab550..3f6784867508 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -137,6 +137,10 @@ struct cpu_context {
unsigned long pc;
};
+struct fpsimd_state {
+ __uint128_t v[4];
+};
+
struct thread_struct {
struct cpu_context cpu_context; /* cpu context */
@@ -166,6 +170,9 @@ struct thread_struct {
u64 sctlr_tcf0;
u64 gcr_user_incl;
#endif
+ struct fpsimd_state ustate;
+ struct fpsimd_state kstate;
+
KABI_USE(1, unsigned int vl[ARM64_VEC_MAX])
KABI_USE(2, unsigned int vl_onexec[ARM64_VEC_MAX])
KABI_USE(3, u64 tpidr2_el0)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 390d9612546b..2e395ebcc856 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -89,6 +89,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_PATCH_PENDING 28 /* pending live patching update */
#define TIF_SME 29 /* SME in use */
#define TIF_SME_VL_INHERIT 30 /* Inherit SME vl_onexec across exec */
+#define TIF_KERNEL_FPSIMD 31 /* Use FPSIMD in kernel */
+#define TIF_PRIV_UACC_ENABLED 32 /* Whether priviliged uaccess was manually enabled */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -108,6 +110,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_32BIT_AARCH64 (1 << TIF_32BIT_AARCH64)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_KERNEL_FPSIMD (1 << TIF_KERNEL_FPSIMD)
+#define _TIF_PRIV_UACC_ENABLED (1 << TIF_PRIV_UACC_ENABLED)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 03c2db710f92..60ffb6aee7bf 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -23,6 +23,7 @@
#include <asm/ptrace.h>
#include <asm/memory.h>
#include <asm/extable.h>
+#include <asm/neon.h>
#define HAVE_GET_KERNEL_NOFAULT
@@ -174,7 +175,7 @@ static inline void __uaccess_enable_hw_pan(void)
CONFIG_ARM64_PAN));
}
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_privileged(void)
{
if (uaccess_ttbr0_disable())
return;
@@ -182,7 +183,22 @@ static inline void uaccess_disable_privileged(void)
__uaccess_enable_hw_pan();
}
-static inline void uaccess_enable_privileged(void)
+static inline void uaccess_disable_privileged(void)
+{
+ preempt_disable();
+
+ if (!test_and_clear_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_disable_privileged();
+
+ preempt_enable();
+}
+
+static inline void __uaccess_enable_privileged(void)
{
if (uaccess_ttbr0_enable())
return;
@@ -190,6 +206,47 @@ static inline void uaccess_enable_privileged(void)
__uaccess_disable_hw_pan();
}
+static inline void uaccess_enable_privileged(void)
+{
+ preempt_disable();
+
+ if (test_and_set_thread_flag(TIF_PRIV_UACC_ENABLED)) {
+ WARN_ON(1);
+ preempt_enable();
+ return;
+ }
+
+ __uaccess_enable_privileged();
+
+ preempt_enable();
+}
+
+static inline void uaccess_priviliged_context_switch(struct task_struct *next)
+{
+ bool curr_enabled = !!test_thread_flag(TIF_PRIV_UACC_ENABLED);
+ bool next_enabled = !!test_ti_thread_flag(&next->thread_info, TIF_PRIV_UACC_ENABLED);
+
+ if (curr_enabled == next_enabled)
+ return;
+
+ if (curr_enabled)
+ __uaccess_disable_privileged();
+ else
+ __uaccess_enable_privileged();
+}
+
+static inline void uaccess_priviliged_state_save(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_disable_privileged();
+}
+
+static inline void uaccess_priviliged_state_restore(void)
+{
+ if (test_thread_flag(TIF_PRIV_UACC_ENABLED))
+ __uaccess_enable_privileged();
+}
+
/*
* Sanitise a uaccess pointer such that it becomes NULL if above the maximum
* user address. In case the pointer is tagged (has the top byte set), untag
@@ -386,7 +443,97 @@ do { \
goto err_label; \
} while(0)
-extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+#define USER_COPY_CHUNK_SIZE 4096
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_from_user_threshold;
+
+#define verify_fpsimd_copy(to, from, n, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FPSIMD:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FPSIMD:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ __verify_ret; \
+})
+
+#define compare_fpsimd_copy(to, from, n, ret_fpsimd, ret) \
+({ \
+ unsigned long __verify_ret = 0; \
+ __verify_ret = memcmp(to, from, ret ? n - ret : n); \
+ if (__verify_ret) \
+ pr_err("FIXUP:%s inconsistent state\n", __func__); \
+ if (ret) \
+ pr_err("FIXUP:%s failed to copy data, expected=%lu, copied=%lu\n", __func__, n, n - ret); \
+ __verify_ret |= ret; \
+ if (ret_fpsimd != ret) { \
+ pr_err("FIXUP:%s difference between FPSIMD %lu and regular %lu\n", __func__, n - ret_fpsimd, n - ret); \
+ __verify_ret |= 1; \
+ } else { \
+ __verify_ret = 0; \
+ } \
+ __verify_ret; \
+})
+
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_from_user_fpsimd(void *to, const void __user *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long __acfu_ret;
+
+ if (sysctl_copy_from_user_threshold == -1 || n < sysctl_copy_from_user_threshold) {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user(to,
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __acfu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __acfu_ret_fpsimd = __arch_copy_from_user_fpsimd((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_disable_privileged();
+
+ __acfu_ret = __acfu_ret_fpsimd;
+ kernel_fpsimd_end();
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret)) {
+
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(to, __uaccess_mask_ptr(from), n,
+ __acfu_ret_fpsimd, __acfu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __acfu_ret = __arch_copy_from_user((to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+
+ return __acfu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+
#define raw_copy_from_user(to, from, n) \
({ \
unsigned long __acfu_ret; \
@@ -397,7 +544,66 @@ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __u
__acfu_ret; \
})
-extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+#endif
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_to_user_threshold;
+
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_to_user_fpsimd(void __user *to, const void *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long __actu_ret;
+
+
+ if (sysctl_copy_to_user_threshold == -1 || n < sysctl_copy_to_user_threshold) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __actu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __actu_ret_fpsimd = __arch_copy_to_user_fpsimd(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_disable_privileged();
+
+ kernel_fpsimd_end();
+ __actu_ret = __actu_ret_fpsimd;
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret)) {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(__uaccess_mask_ptr(to), from, n,
+ __actu_ret_fpsimd, __actu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),
+ from, n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+ return __actu_ret;
+}
+#else
+extern unsigned long __must_check
+__arch_copy_to_user(void __user *to, const void *from, unsigned long n);
+
#define raw_copy_to_user(to, from, n) \
({ \
unsigned long __actu_ret; \
@@ -407,7 +613,62 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
uaccess_ttbr0_disable(); \
__actu_ret; \
})
+#endif
+#ifdef CONFIG_USE_VECTORIZED_COPY
+
+extern int sysctl_copy_in_user_threshold;
+
+extern unsigned long __must_check
+__arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
+
+extern unsigned long __must_check
+__arch_copy_in_user_fpsimd(void __user *to, const void __user *from, unsigned long n);
+
+static __always_inline unsigned long __must_check
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ unsigned long __aciu_ret;
+
+ if (sysctl_copy_in_user_threshold == -1 || n < sysctl_copy_in_user_threshold) {
+ uaccess_ttbr0_enable();
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ } else {
+ if (kernel_fpsimd_begin()) {
+ unsigned long __aciu_ret_fpsimd;
+
+ uaccess_enable_privileged();
+ __aciu_ret_fpsimd = __arch_copy_in_user_fpsimd(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_disable_privileged();
+
+ kernel_fpsimd_end();
+ __aciu_ret = __aciu_ret_fpsimd;
+#ifdef CONFIG_VECTORIZED_COPY_VALIDATE
+ if (verify_fpsimd_copy(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n,
+ __aciu_ret)) {
+ uaccess_ttbr0_enable();
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+
+ compare_fpsimd_copy(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n,
+ __aciu_ret_fpsimd, __aciu_ret);
+ }
+#endif
+ } else {
+ uaccess_ttbr0_enable();
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),
+ __uaccess_mask_ptr(from), n);
+ uaccess_ttbr0_disable();
+ }
+ }
+
+ return __aciu_ret;
+}
+#else
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
#define raw_copy_in_user(to, from, n) \
({ \
@@ -419,6 +680,8 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
__aciu_ret; \
})
+#endif
+
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 8d12aaac7862..848ca6a351d7 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -11,6 +11,28 @@
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+/*
+ * Save the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_save_state_light)
+ fpsimd_save_light x0
+ ret
+SYM_FUNC_END(fpsimd_save_state_light)
+
+/*
+ * Load the FP registers.
+ *
+ * x0 - pointer to struct fpsimd_state_light
+ */
+SYM_FUNC_START(fpsimd_load_state_light)
+ fpsimd_restore_light x0
+ ret
+SYM_FUNC_END(fpsimd_load_state_light)
+#endif
+
/*
* Save the FP registers.
*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index c2489a72b0b9..1a08c19a181f 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1492,6 +1492,11 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
current);
}
+#ifdef CONFIG_USE_VECTORIZED_COPY
+static void kernel_fpsimd_rollback_changes(void);
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk);
+#endif
+
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
@@ -1500,10 +1505,11 @@ void fpsimd_thread_switch(struct task_struct *next)
return;
__get_cpu_fpsimd_context();
-
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_rollback_changes();
+#endif
/* Save unsaved fpsimd state, if any: */
fpsimd_save();
-
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
* state. For kernel threads, FPSIMD registers are never loaded
@@ -1516,6 +1522,9 @@ void fpsimd_thread_switch(struct task_struct *next)
update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
wrong_task || wrong_cpu);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ kernel_fpsimd_restore_changes(next);
+#endif
__put_cpu_fpsimd_context();
}
@@ -1835,6 +1844,95 @@ void kernel_neon_end(void)
}
EXPORT_SYMBOL(kernel_neon_end);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+bool kernel_fpsimd_begin(void)
+{
+ if (WARN_ON(!system_capabilities_finalized()) ||
+ !system_supports_fpsimd() ||
+ in_irq() || irqs_disabled() || in_nmi())
+ return false;
+
+ preempt_disable();
+ if (test_and_set_thread_flag(TIF_KERNEL_FPSIMD)) {
+ preempt_enable();
+
+ WARN_ON(1);
+ return false;
+ }
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
+ fpsimd_save_state_light(¤t->thread.ustate);
+ preempt_enable();
+
+ return true;
+}
+EXPORT_SYMBOL(kernel_fpsimd_begin);
+
+void kernel_fpsimd_end(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ preempt_disable();
+ if (test_and_clear_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(¤t->thread.ustate);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL(kernel_fpsimd_end);
+
+void _kernel_fpsimd_save(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_save_state_light(state);
+}
+
+void _kernel_fpsimd_load(struct fpsimd_state *state)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD))
+ fpsimd_load_state_light(state);
+}
+
+static void kernel_fpsimd_rollback_changes(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_thread_flag(TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(¤t->thread.kstate);
+ fpsimd_load_state_light(¤t->thread.ustate);
+ }
+}
+
+static void kernel_fpsimd_restore_changes(struct task_struct *tsk)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ BUG_ON(preemptible());
+ if (test_ti_thread_flag(task_thread_info(tsk), TIF_KERNEL_FPSIMD)) {
+ fpsimd_save_state_light(&tsk->thread.ustate);
+ fpsimd_load_state_light(&tsk->thread.kstate);
+ }
+}
+#endif
+
#ifdef CONFIG_EFI
static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 14300c9e06d5..338d40725a5d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -572,7 +572,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
-
+ uaccess_priviliged_context_switch(next);
fpsimd_thread_switch(next);
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index dfc33ce09e72..94290069d97d 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -63,6 +63,24 @@ SYM_FUNC_START(__arch_copy_from_user)
SYM_FUNC_END(__arch_copy_from_user)
EXPORT_SYMBOL(__arch_copy_from_user)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER_MC(9998f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_from_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0 // Nothing to copy
+ ret
+SYM_FUNC_END(__arch_copy_from_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_from_user_fpsimd)
+#endif
.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index dbea3799c3ef..cbc09c377050 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -64,6 +64,25 @@ SYM_FUNC_START(__arch_copy_in_user)
SYM_FUNC_END(__arch_copy_in_user)
EXPORT_SYMBOL(__arch_copy_in_user)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_in_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0
+ ret
+SYM_FUNC_END(__arch_copy_in_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_in_user_fpsimd)
+#endif
+
.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
diff --git a/arch/arm64/lib/copy_template_fpsimd.S b/arch/arm64/lib/copy_template_fpsimd.S
new file mode 100644
index 000000000000..9b2e7ce1e4d2
--- /dev/null
+++ b/arch/arm64/lib/copy_template_fpsimd.S
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ */
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ * x0 - dest
+ * x1 - src
+ * x2 - n
+ * Returns:
+ * x0 - dest
+ */
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
+V_a .req v20
+V_b .req v21
+V_c .req v22
+V_d .req v23
+
+ mov dst, dstin
+ cmp count, #16
+ /*When memory length is less than 16, the accessed are not aligned.*/
+ b.lo .Ltiny15_fpsimd
+
+ neg tmp2, src
+ ands tmp2, tmp2, #15/* Bytes to reach alignment. */
+ b.eq .LSrcAligned_fpsimd
+ sub count, count, tmp2
+ /*
+ * Copy the leading memory data from src to dst in an increasing
+ * address order.By this way,the risk of overwriting the source
+ * memory data is eliminated when the distance between src and
+ * dst is less than 16. The memory accesses here are alignment.
+ */
+ tbz tmp2, #0, 1f
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+1:
+ tbz tmp2, #1, 2f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+2:
+ tbz tmp2, #2, 3f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+3:
+ tbz tmp2, #3, .LSrcAligned_fpsimd
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+
+.LSrcAligned_fpsimd:
+ cmp count, #64
+ b.ge .Lcpy_over64_fpsimd
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63_fpsimd:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltiny15_fpsimd
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+1:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+2:
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+.Ltiny15_fpsimd:
+ /*
+ * Prefer to break one ldp/stp into several load/store to access
+ * memory in an increasing address order,rather than to load/store 16
+ * bytes from (src-16) to (dst-16) and to backward the src to aligned
+ * address,which way is used in original cortex memcpy. If keeping
+ * the original memcpy process here, memmove need to satisfy the
+ * precondition that src address is at least 16 bytes bigger than dst
+ * address,otherwise some source data will be overwritten when memove
+ * call memcpy directly. To make memmove simpler and decouple the
+ * memcpy's dependency on memmove, withdrew the original process.
+ */
+ tbz count, #3, 1f
+ ldr1 tmp1, src, #8
+ str1 tmp1, dst, #8
+1:
+ tbz count, #2, 2f
+ ldr1 tmp1w, src, #4
+ str1 tmp1w, dst, #4
+2:
+ tbz count, #1, 3f
+ ldrh1 tmp1w, src, #2
+ strh1 tmp1w, dst, #2
+3:
+ tbz count, #0, .Lexitfunc_fpsimd
+ ldrb1 tmp1w, src, #1
+ strb1 tmp1w, dst, #1
+
+ b .Lexitfunc_fpsimd
+
+.Lcpy_over64_fpsimd:
+ subs count, count, #128
+ b.ge .Lcpy_body_large_fpsimd
+ /*
+ * Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail.
+ */
+ ldp1 A_l, A_h, src, #16
+ stp1 A_l, A_h, dst, #16
+ ldp1 B_l, B_h, src, #16
+ ldp1 C_l, C_h, src, #16
+ stp1 B_l, B_h, dst, #16
+ stp1 C_l, C_h, dst, #16
+ ldp1 D_l, D_h, src, #16
+ stp1 D_l, D_h, dst, #16
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+ b .Lexitfunc_fpsimd
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large_fpsimd:
+ /* pre-get 64 bytes data. */
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add src, src, #64
+
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ ldsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, src
+ add dst, dst, #64
+ add src, src, #64
+
+ subs count, count, #64
+ b.ge 1b
+
+ stsve V_a.16b, V_b.16b, V_c.16b, V_d.16b, dst
+ add dst, dst, #64
+
+ tst count, #0x3f
+ b.ne .Ltail63_fpsimd
+.Lexitfunc_fpsimd:
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 34154e7c8577..d0211fce4923 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -62,6 +62,25 @@ SYM_FUNC_START(__arch_copy_to_user)
SYM_FUNC_END(__arch_copy_to_user)
EXPORT_SYMBOL(__arch_copy_to_user)
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ .macro stsve reg1, reg2, reg3, reg4, ptr
+ USER(9997f, st1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+ .macro ldsve reg1, reg2, reg3, reg4, ptr
+ USER_MC(9998f, ld1 {\reg1, \reg2, \reg3, \reg4}, [\ptr])
+ .endm
+
+SYM_FUNC_START(__arch_copy_to_user_fpsimd)
+ add end, x0, x2
+ mov srcin, x1
+#include "copy_template_fpsimd.S"
+ mov x0, #0
+ ret
+SYM_FUNC_END(__arch_copy_to_user_fpsimd)
+EXPORT_SYMBOL(__arch_copy_to_user_fpsimd)
+#endif
+
.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 9fc69e6e2c11..e3f73422829d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -26,6 +26,10 @@
#include <linux/tick.h>
#include <linux/irq.h>
+#ifdef CONFIG_USE_VECTORIZED_COPY
+#include <asm/fpsimd.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -262,6 +266,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
__u32 pending;
int softirq_bit;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
/*
* Mask out PF_MEMALLOC as the current task context is borrowed for the
* softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
@@ -273,8 +280,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
account_irq_enter_time(current);
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
in_hardirq = lockdep_softirq_start();
-
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
@@ -322,6 +332,11 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
current_restore_flags(old_flags, PF_MEMALLOC);
@@ -612,12 +627,21 @@ static void tasklet_action_common(struct softirq_action *a,
{
struct tasklet_struct *list;
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ struct fpsimd_state state;
+#endif
+
local_irq_disable();
list = tl_head->head;
tl_head->head = NULL;
tl_head->tail = &tl_head->head;
local_irq_enable();
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ _kernel_fpsimd_save(&state);
+ uaccess_priviliged_state_save();
+#endif
+
while (list) {
struct tasklet_struct *t = list;
@@ -645,6 +669,11 @@ static void tasklet_action_common(struct softirq_action *a,
__raise_softirq_irqoff(softirq_nr);
local_irq_enable();
}
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ uaccess_priviliged_state_restore();
+ _kernel_fpsimd_load(&state);
+#endif
}
static __latent_entropy void tasklet_action(struct softirq_action *a)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0b1c13a05332..9ec07294429b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -210,6 +210,17 @@ static int max_extfrag_threshold = 1000;
#endif /* CONFIG_SYSCTL */
+#ifdef CONFIG_USE_VECTORIZED_COPY
+int sysctl_copy_to_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_to_user_threshold);
+
+int sysctl_copy_from_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_from_user_threshold);
+
+int sysctl_copy_in_user_threshold = -1;
+EXPORT_SYMBOL(sysctl_copy_in_user_threshold);
+#endif
+
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
static int bpf_stats_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
@@ -3385,6 +3396,30 @@ static struct ctl_table vm_table[] = {
.extra2 = SYSCTL_ONE,
},
#endif
+
+#ifdef CONFIG_USE_VECTORIZED_COPY
+ {
+ .procname = "copy_to_user_threshold",
+ .data = &sysctl_copy_to_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_from_user_threshold",
+ .data = &sysctl_copy_from_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "copy_in_user_threshold",
+ .data = &sysctl_copy_in_user_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+#endif
{ }
};
--
2.34.1
2
1
hulk inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8330
----------------------------------------------------------------------
A bitmap inconsistency issue was observed during stress tests under
mixed huge-page workloads. Ext4 reported multiple e4b bitmap check
failures like:
ext4_mb_complex_scan_group:2508: group 350, 8179 free clusters as
per group info. But got 8192 blocks
Analysis and experimentation confirmed that the issue is caused by a
race condition between page migration and bitmap modification. Although
this timing window is extremely narrow, it is still hit in practice:
folio_lock ext4_mb_load_buddy
__migrate_folio
check ref count
folio_mc_copy __filemap_get_folio
folio_try_get(folio)
......
mb_mark_used
ext4_mb_unload_buddy
__folio_migrate_mapping
folio_ref_freeze
folio_unlock
The root cause of this issue is that the fast path of load_buddy only
increments the folio's reference count, which is insufficient to prevent
concurrent folio migration. We observed that the folio migration process
acquires the folio lock. Therefore, we can determine whether to take the
fast path in load_buddy by checking the lock status. If the folio is
locked, we opt for the slow path (which acquires the lock) to close this
concurrency window.
Additionally, this change addresses the following issues:
When the DOUBLE_CHECK macro is enabled to inspect bitmap-related
issues, the following error may be triggered:
corruption in group 324 at byte 784(6272): f in copy != ff on
disk/prealloc
Analysis reveals that this is a false positive. There is a specific race
window where the bitmap and the group descriptor become momentarily
inconsistent, leading to this error report:
ext4_mb_load_buddy ext4_mb_load_buddy
__filemap_get_folio(create|lock)
folio_lock
ext4_mb_init_cache
folio_mark_uptodate
__filemap_get_folio(no lock)
......
mb_mark_used
mb_mark_used_double
mb_cmp_bitmaps
mb_set_bits(e4b->bd_bitmap)
folio_unlock
The original logic assumed that since mb_cmp_bitmaps is called when the
bitmap is newly loaded from disk, the folio lock would be sufficient to
prevent concurrent access. However, this overlooks a specific race
condition: if another process attempts to load buddy and finds the folio
is already in an uptodate state, it will immediately begin using it without
holding folio lock.
Fixes: 9bb3fd60f91e ("arm64: mm: Add copy mc support for all migrate_page")
Signed-off-by: Yongjian Sun <sunyongjian1(a)huawei.com>
---
fs/ext4/mballoc.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 522d2ec128ef..9d4e8e3c74e2 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1217,16 +1217,17 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
/* we could use find_or_create_page(), but it locks page
* what we'd like to avoid in fast path ... */
page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
- if (page == NULL || !PageUptodate(page)) {
+ if (page == NULL || !PageUptodate(page) || PageLocked(page)) {
+ /*
+ * PageLocked is employed to detect ongoing page
+ * migrations, since concurrent migrations can lead to
+ * bitmap inconsistency. And if we are not uptodate that
+ * implies somebody just created the page but is yet to
+ * initialize it. We can drop the page reference and
+ * try to get the page with lock in both cases to avoid
+ * concurrency.
+ */
if (page)
- /*
- * drop the page reference and try
- * to get the page with lock. If we
- * are not uptodate that implies
- * somebody just created the page but
- * is yet to initialize the same. So
- * wait for it to initialize.
- */
put_page(page);
page = find_or_create_page(inode->i_mapping, pnum, gfp);
if (page) {
@@ -1261,7 +1262,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
poff = block % blocks_per_page;
page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
- if (page == NULL || !PageUptodate(page)) {
+ if (page == NULL || !PageUptodate(page) || PageLocked(page)) {
if (page)
put_page(page);
page = find_or_create_page(inode->i_mapping, pnum, gfp);
--
2.39.2
2
1
[PATCH OLK-6.6] udp: Deal with race between UDP socket address change and rehash
by Zhang Changzhong 28 Jan '26
by Zhang Changzhong 28 Jan '26
28 Jan '26
From: Stefano Brivio <sbrivio(a)redhat.com>
mainline inclusion
from mainline-v6.14-rc1
commit a502ea6fa94b1f7be72a24bcf9e3f5f6b7e6e90c
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/10960
CVE: CVE-2024-57974
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
-------------------------------------------------
If a UDP socket changes its local address while it's receiving
datagrams, as a result of connect(), there is a period during which
a lookup operation might fail to find it, after the address is changed
but before the secondary hash (port and address) and the four-tuple
hash (local and remote ports and addresses) are updated.
Secondary hash chains were introduced by commit 30fff9231fad ("udp:
bind() optimisation") and, as a result, a rehash operation became
needed to make a bound socket reachable again after a connect().
This operation was introduced by commit 719f835853a9 ("udp: add
rehash on connect()") which isn't however a complete fix: the
socket will be found once the rehashing completes, but not while
it's pending.
This is noticeable with a socat(1) server in UDP4-LISTEN mode, and a
client sending datagrams to it. After the server receives the first
datagram (cf. _xioopen_ipdgram_listen()), it issues a connect() to
the address of the sender, in order to set up a directed flow.
Now, if the client, running on a different CPU thread, happens to
send a (subsequent) datagram while the server's socket changes its
address, but is not rehashed yet, this will result in a failed
lookup and a port unreachable error delivered to the client, as
apparent from the following reproducer:
LEN=$(($(cat /proc/sys/net/core/wmem_default) / 4))
dd if=/dev/urandom bs=1 count=${LEN} of=tmp.in
while :; do
taskset -c 1 socat UDP4-LISTEN:1337,null-eof OPEN:tmp.out,create,trunc &
sleep 0.1 || sleep 1
taskset -c 2 socat OPEN:tmp.in UDP4:localhost:1337,shut-null
wait
done
where the client will eventually get ECONNREFUSED on a write()
(typically the second or third one of a given iteration):
2024/11/13 21:28:23 socat[46901] E write(6, 0x556db2e3c000, 8192): Connection refused
This issue was first observed as a seldom failure in Podman's tests
checking UDP functionality while using pasta(1) to connect the
container's network namespace, which leads us to a reproducer with
the lookup error resulting in an ICMP packet on a tap device:
LOCAL_ADDR="$(ip -j -4 addr show|jq -rM '.[] | .addr_info[0] | select(.scope == "global").local')"
while :; do
./pasta --config-net -p pasta.pcap -u 1337 socat UDP4-LISTEN:1337,null-eof OPEN:tmp.out,create,trunc &
sleep 0.2 || sleep 1
socat OPEN:tmp.in UDP4:${LOCAL_ADDR}:1337,shut-null
wait
cmp tmp.in tmp.out
done
Once this fails:
tmp.in tmp.out differ: char 8193, line 29
we can finally have a look at what's going on:
$ tshark -r pasta.pcap
1 0.000000 :: ? ff02::16 ICMPv6 110 Multicast Listener Report Message v2
2 0.168690 88.198.0.161 ? 88.198.0.164 UDP 8234 60260 ? 1337 Len=8192
3 0.168767 88.198.0.161 ? 88.198.0.164 UDP 8234 60260 ? 1337 Len=8192
4 0.168806 88.198.0.161 ? 88.198.0.164 UDP 8234 60260 ? 1337 Len=8192
5 0.168827 c6:47:05:8d:dc:04 ? Broadcast ARP 42 Who has 88.198.0.161? Tell 88.198.0.164
6 0.168851 9a:55:9a:55:9a:55 ? c6:47:05:8d:dc:04 ARP 42 88.198.0.161 is at 9a:55:9a:55:9a:55
7 0.168875 88.198.0.161 ? 88.198.0.164 UDP 8234 60260 ? 1337 Len=8192
8 0.168896 88.198.0.164 ? 88.198.0.161 ICMP 590 Destination unreachable (Port unreachable)
9 0.168926 88.198.0.161 ? 88.198.0.164 UDP 8234 60260 ? 1337 Len=8192
10 0.168959 88.198.0.161 ? 88.198.0.164 UDP 8234 60260 ? 1337 Len=8192
11 0.168989 88.198.0.161 ? 88.198.0.164 UDP 4138 60260 ? 1337 Len=4096
12 0.169010 88.198.0.161 ? 88.198.0.164 UDP 42 60260 ? 1337 Len=0
On the third datagram received, the network namespace of the container
initiates an ARP lookup to deliver the ICMP message.
In another variant of this reproducer, starting the client with:
strace -f pasta --config-net -u 1337 socat UDP4-LISTEN:1337,null-eof OPEN:tmp.out,create,trunc 2>strace.log &
and connecting to the socat server using a loopback address:
socat OPEN:tmp.in UDP4:localhost:1337,shut-null
we can more clearly observe a sendmmsg() call failing after the
first datagram is delivered:
[pid 278012] connect(173, 0x7fff96c95fc0, 16) = 0
[...]
[pid 278012] recvmmsg(173, 0x7fff96c96020, 1024, MSG_DONTWAIT, NULL) = -1 EAGAIN (Resource temporarily unavailable)
[pid 278012] sendmmsg(173, 0x561c5ad0a720, 1, MSG_NOSIGNAL) = 1
[...]
[pid 278012] sendmmsg(173, 0x561c5ad0a720, 1, MSG_NOSIGNAL) = -1 ECONNREFUSED (Connection refused)
and, somewhat confusingly, after a connect() on the same socket
succeeded.
Until commit 4cdeeee9252a ("net: udp: prefer listeners bound to an
address"), the race between receive address change and lookup didn't
actually cause visible issues, because, once the lookup based on the
secondary hash chain failed, we would still attempt a lookup based on
the primary hash (destination port only), and find the socket with the
outdated secondary hash.
That change, however, dropped port-only lookups altogether, as side
effect, making the race visible.
To fix this, while avoiding the need to make address changes and
rehash atomic against lookups, reintroduce primary hash lookups as
fallback, if lookups based on four-tuple and secondary hashes fail.
To this end, introduce a simplified lookup implementation, which
doesn't take care of SO_REUSEPORT groups: if we have one, there are
multiple sockets that would match the four-tuple or secondary hash,
meaning that we can't run into this race at all.
v2:
- instead of synchronising lookup operations against address change
plus rehash, reintroduce a simplified version of the original
primary hash lookup as fallback
v1:
- fix build with CONFIG_IPV6=n: add ifdef around sk_v6_rcv_saddr
usage (Kuniyuki Iwashima)
- directly use sk_rcv_saddr for IPv4 receive addresses instead of
fetching inet_rcv_saddr (Kuniyuki Iwashima)
- move inet_update_saddr() to inet_hashtables.h and use that
to set IPv4/IPv6 addresses as suitable (Kuniyuki Iwashima)
- rebase onto net-next, update commit message accordingly
Reported-by: Ed Santiago <santiago(a)redhat.com>
Link: https://github.com/containers/podman/issues/24147
Analysed-by: David Gibson <david(a)gibson.dropbear.id.au>
Fixes: 30fff9231fad ("udp: bind() optimisation")
Signed-off-by: Stefano Brivio <sbrivio(a)redhat.com>
Reviewed-by: Eric Dumazet <edumazet(a)google.com>
Reviewed-by: Willem de Bruijn <willemb(a)google.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Conflicts:
net/ipv4/udp.c
net/ipv6/udp.c
[context conflicts, and fix some discards qualifiers error.]
Signed-off-by: Liu Jian <liujian56(a)huawei.com>
Signed-off-by: Zhang Changzhong <zhangchangzhong(a)huawei.com>
---
net/ipv4/udp.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
net/ipv6/udp.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 108 insertions(+)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8a34e22..4c1f82f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -421,6 +421,49 @@ u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
udp_ehash_secret + net_hash_mix(net));
}
+/**
+ * udp4_lib_lookup1() - Simplified lookup using primary hash (destination port)
+ * @net: Network namespace
+ * @saddr: Source address, network order
+ * @sport: Source port, network order
+ * @daddr: Destination address, network order
+ * @hnum: Destination port, host order
+ * @dif: Destination interface index
+ * @sdif: Destination bridge port index, if relevant
+ * @udptable: Set of UDP hash tables
+ *
+ * Simplified lookup to be used as fallback if no sockets are found due to a
+ * potential race between (receive) address change, and lookup happening before
+ * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
+ * result sockets, because if we have one, we don't need the fallback at all.
+ *
+ * Called under rcu_read_lock().
+ *
+ * Return: socket with highest matching score if any, NULL if none
+ */
+static struct sock *udp4_lib_lookup1(const struct net *net,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned int hnum,
+ int dif, int sdif,
+ const struct udp_table *udptable)
+{
+ unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
+ struct udp_hslot *hslot = &udptable->hash[slot];
+ struct sock *sk, *result = NULL;
+ int score, badness = 0;
+
+ sk_for_each_rcu(sk, &hslot->head) {
+ score = compute_score(sk, (struct net *)net,
+ saddr, sport, daddr, hnum, dif, sdif);
+ if (score > badness) {
+ result = sk;
+ badness = score;
+ }
+ }
+
+ return result;
+}
+
/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
@@ -526,6 +569,19 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
result = udp4_lib_lookup2(net, saddr, sport,
htonl(INADDR_ANY), hnum, dif, sdif,
hslot2, skb);
+ if (!IS_ERR_OR_NULL(result))
+ goto done;
+
+ /* Primary hash (destination port) lookup as fallback for this race:
+ * 1. __ip4_datagram_connect() sets sk_rcv_saddr
+ * 2. lookup (this function): new sk_rcv_saddr, hashes not updated yet
+ * 3. rehash operation updating _secondary and four-tuple_ hashes
+ * The primary hash doesn't need an update after 1., so, thanks to this
+ * further step, 1. and 3. don't need to be atomic against the lookup.
+ */
+ result = udp4_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
+ udptable);
+
done:
if (IS_ERR(result))
return NULL;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9ff8e72..7c9f77f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -162,6 +162,51 @@ static int compute_score(struct sock *sk, struct net *net,
return score;
}
+/**
+ * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
+ * @net: Network namespace
+ * @saddr: Source address, network order
+ * @sport: Source port, network order
+ * @daddr: Destination address, network order
+ * @hnum: Destination port, host order
+ * @dif: Destination interface index
+ * @sdif: Destination bridge port index, if relevant
+ * @udptable: Set of UDP hash tables
+ *
+ * Simplified lookup to be used as fallback if no sockets are found due to a
+ * potential race between (receive) address change, and lookup happening before
+ * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
+ * result sockets, because if we have one, we don't need the fallback at all.
+ *
+ * Called under rcu_read_lock().
+ *
+ * Return: socket with highest matching score if any, NULL if none
+ */
+static struct sock *udp6_lib_lookup1(const struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr,
+ unsigned int hnum, int dif, int sdif,
+ const struct udp_table *udptable)
+{
+ unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
+ struct udp_hslot *hslot = &udptable->hash[slot];
+ struct sock *sk, *result = NULL;
+ int score, badness = 0;
+
+ sk_for_each_rcu(sk, &hslot->head) {
+ score = compute_score(sk, (struct net *)net,
+ (struct in6_addr *)saddr, sport,
+ (struct in6_addr *)daddr, hnum,
+ dif, sdif);
+ if (score > badness) {
+ result = sk;
+ badness = score;
+ }
+ }
+
+ return result;
+}
+
/* called with rcu_read_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
@@ -266,6 +311,13 @@ struct sock *__udp6_lib_lookup(struct net *net,
result = udp6_lib_lookup2(net, saddr, sport,
&in6addr_any, hnum, dif, sdif,
hslot2, skb);
+ if (!IS_ERR_OR_NULL(result))
+ goto done;
+
+ /* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
+ result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
+ udptable);
+
done:
if (IS_ERR(result))
return NULL;
--
2.9.5
2
1
[PATCH OLK-6.6] x86/fpu: Clear XSTATE_BV[i] in guest XSAVE state whenever XFD[i]=1
by Zhang Kunbo 28 Jan '26
by Zhang Kunbo 28 Jan '26
28 Jan '26
From: Sean Christopherson <seanjc(a)google.com>
stable inclusion
from stable-v6.12.67
commit f577508cc8a0adb8b4ebe9480bba7683b6149930
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13516
CVE: CVE-2026-23005
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit b45f721775947a84996deb5c661602254ce25ce6 upstream.
When loading guest XSAVE state via KVM_SET_XSAVE, and when updating XFD in
response to a guest WRMSR, clear XFD-disabled features in the saved (or to
be restored) XSTATE_BV to ensure KVM doesn't attempt to load state for
features that are disabled via the guest's XFD. Because the kernel
executes XRSTOR with the guest's XFD, saving XSTATE_BV[i]=1 with XFD[i]=1
will cause XRSTOR to #NM and panic the kernel.
E.g. if fpu_update_guest_xfd() sets XFD without clearing XSTATE_BV:
------------[ cut here ]------------
WARNING: arch/x86/kernel/traps.c:1524 at exc_device_not_available+0x101/0x110, CPU#29: amx_test/848
Modules linked in: kvm_intel kvm irqbypass
CPU: 29 UID: 1000 PID: 848 Comm: amx_test Not tainted 6.19.0-rc2-ffa07f7fd437-x86_amx_nm_xfd_non_init-vm #171 NONE
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:exc_device_not_available+0x101/0x110
Call Trace:
<TASK>
asm_exc_device_not_available+0x1a/0x20
RIP: 0010:restore_fpregs_from_fpstate+0x36/0x90
switch_fpu_return+0x4a/0xb0
kvm_arch_vcpu_ioctl_run+0x1245/0x1e40 [kvm]
kvm_vcpu_ioctl+0x2c3/0x8f0 [kvm]
__x64_sys_ioctl+0x8f/0xd0
do_syscall_64+0x62/0x940
entry_SYSCALL_64_after_hwframe+0x4b/0x53
</TASK>
---[ end trace 0000000000000000 ]---
This can happen if the guest executes WRMSR(MSR_IA32_XFD) to set XFD[18] = 1,
and a host IRQ triggers kernel_fpu_begin() prior to the vmexit handler's
call to fpu_update_guest_xfd().
and if userspace stuffs XSTATE_BV[i]=1 via KVM_SET_XSAVE:
------------[ cut here ]------------
WARNING: arch/x86/kernel/traps.c:1524 at exc_device_not_available+0x101/0x110, CPU#14: amx_test/867
Modules linked in: kvm_intel kvm irqbypass
CPU: 14 UID: 1000 PID: 867 Comm: amx_test Not tainted 6.19.0-rc2-2dace9faccd6-x86_amx_nm_xfd_non_init-vm #168 NONE
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:exc_device_not_available+0x101/0x110
Call Trace:
<TASK>
asm_exc_device_not_available+0x1a/0x20
RIP: 0010:restore_fpregs_from_fpstate+0x36/0x90
fpu_swap_kvm_fpstate+0x6b/0x120
kvm_load_guest_fpu+0x30/0x80 [kvm]
kvm_arch_vcpu_ioctl_run+0x85/0x1e40 [kvm]
kvm_vcpu_ioctl+0x2c3/0x8f0 [kvm]
__x64_sys_ioctl+0x8f/0xd0
do_syscall_64+0x62/0x940
entry_SYSCALL_64_after_hwframe+0x4b/0x53
</TASK>
---[ end trace 0000000000000000 ]---
The new behavior is consistent with the AMX architecture. Per Intel's SDM,
XSAVE saves XSTATE_BV as '0' for components that are disabled via XFD
(and non-compacted XSAVE saves the initial configuration of the state
component):
If XSAVE, XSAVEC, XSAVEOPT, or XSAVES is saving the state component i,
the instruction does not generate #NM when XCR0[i] = IA32_XFD[i] = 1;
instead, it operates as if XINUSE[i] = 0 (and the state component was
in its initial state): it saves bit i of XSTATE_BV field of the XSAVE
header as 0; in addition, XSAVE saves the initial configuration of the
state component (the other instructions do not save state component i).
Alternatively, KVM could always do XRSTOR with XFD=0, e.g. by using
a constant XFD based on the set of enabled features when XSAVEing for
a struct fpu_guest. However, having XSTATE_BV[i]=1 for XFD-disabled
features can only happen in the above interrupt case, or in similar
scenarios involving preemption on preemptible kernels, because
fpu_swap_kvm_fpstate()'s call to save_fpregs_to_fpstate() saves the
outgoing FPU state with the current XFD; and that is (on all but the
first WRMSR to XFD) the guest XFD.
Therefore, XFD can only go out of sync with XSTATE_BV in the above
interrupt case, or in similar scenarios involving preemption on
preemptible kernels, and it we can consider it (de facto) part of KVM
ABI that KVM_GET_XSAVE returns XSTATE_BV[i]=0 for XFD-disabled features.
Reported-by: Paolo Bonzini <pbonzini(a)redhat.com>
Cc: stable(a)vger.kernel.org
Fixes: 820a6ee944e7 ("kvm: x86: Add emulation for IA32_XFD", 2022-01-14)
Signed-off-by: Sean Christopherson <seanjc(a)google.com>
[Move clearing of XSTATE_BV from fpu_copy_uabi_to_guest_fpstate
to kvm_vcpu_ioctl_x86_set_xsave. - Paolo]
Reviewed-by: Binbin Wu <binbin.wu(a)linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Conflicts:
arch/x86/kvm/x86.c
[Context difference. ]
Signed-off-by: Zhang Kunbo <zhangkunbo(a)huawei.com>
---
arch/x86/kernel/fpu/core.c | 32 +++++++++++++++++++++++++++++---
arch/x86/kvm/x86.c | 9 +++++++++
2 files changed, 38 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 1f0871be9d53..fd53ff26cd42 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -296,10 +296,29 @@ EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features);
#ifdef CONFIG_X86_64
void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
{
+ struct fpstate *fpstate = guest_fpu->fpstate;
+
fpregs_lock();
- guest_fpu->fpstate->xfd = xfd;
- if (guest_fpu->fpstate->in_use)
- xfd_update_state(guest_fpu->fpstate);
+
+ /*
+ * KVM's guest ABI is that setting XFD[i]=1 *can* immediately revert the
+ * save state to its initial configuration. Likewise, KVM_GET_XSAVE does
+ * the same as XSAVE and returns XSTATE_BV[i]=0 whenever XFD[i]=1.
+ *
+ * If the guest's FPU state is in hardware, just update XFD: the XSAVE
+ * in fpu_swap_kvm_fpstate will clear XSTATE_BV[i] whenever XFD[i]=1.
+ *
+ * If however the guest's FPU state is NOT resident in hardware, clear
+ * disabled components in XSTATE_BV now, or a subsequent XRSTOR will
+ * attempt to load disabled components and generate #NM _in the host_.
+ */
+ if (xfd && test_thread_flag(TIF_NEED_FPU_LOAD))
+ fpstate->regs.xsave.header.xfeatures &= ~xfd;
+
+ fpstate->xfd = xfd;
+ if (fpstate->in_use)
+ xfd_update_state(fpstate);
+
fpregs_unlock();
}
EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
@@ -407,6 +426,13 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
if (ustate->xsave.header.xfeatures & ~xcr0)
return -EINVAL;
+ /*
+ * Disabled features must be in their initial state, otherwise XRSTOR
+ * causes an exception.
+ */
+ if (WARN_ON_ONCE(ustate->xsave.header.xfeatures & kstate->xfd))
+ return -EINVAL;
+
/*
* Nullify @vpkru to preserve its current value if PKRU's bit isn't set
* in the header. KVM's odd ABI is to leave PKRU untouched in this
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2139f728aecc..57fc6916906b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5541,9 +5541,18 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
+ union fpregs_state *xstate = (union fpregs_state *)guest_xsave->region;
+
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
return 0;
+ /*
+ * For backwards compatibility, do not expect disabled features to be in
+ * their initial state. XSTATE_BV[i] must still be cleared whenever
+ * XFD[i]=1, or XRSTOR would cause a #NM.
+ */
+ xstate->xsave.header.xfeatures &= ~vcpu->arch.guest_fpu.fpstate->xfd;
+
return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
guest_xsave->region,
kvm_caps.supported_xcr0,
--
2.34.1
2
1
[PATCH OLK-6.6] x86/fpu: Clear XSTATE_BV[i] in guest XSAVE state whenever XFD[i]=1
by Zhang Kunbo 28 Jan '26
by Zhang Kunbo 28 Jan '26
28 Jan '26
From: Sean Christopherson <seanjc(a)google.com>
stable inclusion
from stable-v6.12.67
commit f577508cc8a0adb8b4ebe9480bba7683b6149930
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13516
CVE: CVE-2026-23005
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit b45f721775947a84996deb5c661602254ce25ce6 upstream.
When loading guest XSAVE state via KVM_SET_XSAVE, and when updating XFD in
response to a guest WRMSR, clear XFD-disabled features in the saved (or to
be restored) XSTATE_BV to ensure KVM doesn't attempt to load state for
features that are disabled via the guest's XFD. Because the kernel
executes XRSTOR with the guest's XFD, saving XSTATE_BV[i]=1 with XFD[i]=1
will cause XRSTOR to #NM and panic the kernel.
E.g. if fpu_update_guest_xfd() sets XFD without clearing XSTATE_BV:
------------[ cut here ]------------
WARNING: arch/x86/kernel/traps.c:1524 at exc_device_not_available+0x101/0x110, CPU#29: amx_test/848
Modules linked in: kvm_intel kvm irqbypass
CPU: 29 UID: 1000 PID: 848 Comm: amx_test Not tainted 6.19.0-rc2-ffa07f7fd437-x86_amx_nm_xfd_non_init-vm #171 NONE
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:exc_device_not_available+0x101/0x110
Call Trace:
<TASK>
asm_exc_device_not_available+0x1a/0x20
RIP: 0010:restore_fpregs_from_fpstate+0x36/0x90
switch_fpu_return+0x4a/0xb0
kvm_arch_vcpu_ioctl_run+0x1245/0x1e40 [kvm]
kvm_vcpu_ioctl+0x2c3/0x8f0 [kvm]
__x64_sys_ioctl+0x8f/0xd0
do_syscall_64+0x62/0x940
entry_SYSCALL_64_after_hwframe+0x4b/0x53
</TASK>
---[ end trace 0000000000000000 ]---
This can happen if the guest executes WRMSR(MSR_IA32_XFD) to set XFD[18] = 1,
and a host IRQ triggers kernel_fpu_begin() prior to the vmexit handler's
call to fpu_update_guest_xfd().
and if userspace stuffs XSTATE_BV[i]=1 via KVM_SET_XSAVE:
------------[ cut here ]------------
WARNING: arch/x86/kernel/traps.c:1524 at exc_device_not_available+0x101/0x110, CPU#14: amx_test/867
Modules linked in: kvm_intel kvm irqbypass
CPU: 14 UID: 1000 PID: 867 Comm: amx_test Not tainted 6.19.0-rc2-2dace9faccd6-x86_amx_nm_xfd_non_init-vm #168 NONE
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015
RIP: 0010:exc_device_not_available+0x101/0x110
Call Trace:
<TASK>
asm_exc_device_not_available+0x1a/0x20
RIP: 0010:restore_fpregs_from_fpstate+0x36/0x90
fpu_swap_kvm_fpstate+0x6b/0x120
kvm_load_guest_fpu+0x30/0x80 [kvm]
kvm_arch_vcpu_ioctl_run+0x85/0x1e40 [kvm]
kvm_vcpu_ioctl+0x2c3/0x8f0 [kvm]
__x64_sys_ioctl+0x8f/0xd0
do_syscall_64+0x62/0x940
entry_SYSCALL_64_after_hwframe+0x4b/0x53
</TASK>
---[ end trace 0000000000000000 ]---
The new behavior is consistent with the AMX architecture. Per Intel's SDM,
XSAVE saves XSTATE_BV as '0' for components that are disabled via XFD
(and non-compacted XSAVE saves the initial configuration of the state
component):
If XSAVE, XSAVEC, XSAVEOPT, or XSAVES is saving the state component i,
the instruction does not generate #NM when XCR0[i] = IA32_XFD[i] = 1;
instead, it operates as if XINUSE[i] = 0 (and the state component was
in its initial state): it saves bit i of XSTATE_BV field of the XSAVE
header as 0; in addition, XSAVE saves the initial configuration of the
state component (the other instructions do not save state component i).
Alternatively, KVM could always do XRSTOR with XFD=0, e.g. by using
a constant XFD based on the set of enabled features when XSAVEing for
a struct fpu_guest. However, having XSTATE_BV[i]=1 for XFD-disabled
features can only happen in the above interrupt case, or in similar
scenarios involving preemption on preemptible kernels, because
fpu_swap_kvm_fpstate()'s call to save_fpregs_to_fpstate() saves the
outgoing FPU state with the current XFD; and that is (on all but the
first WRMSR to XFD) the guest XFD.
Therefore, XFD can only go out of sync with XSTATE_BV in the above
interrupt case, or in similar scenarios involving preemption on
preemptible kernels, and it we can consider it (de facto) part of KVM
ABI that KVM_GET_XSAVE returns XSTATE_BV[i]=0 for XFD-disabled features.
Reported-by: Paolo Bonzini <pbonzini(a)redhat.com>
Cc: stable(a)vger.kernel.org
Fixes: 820a6ee944e7 ("kvm: x86: Add emulation for IA32_XFD", 2022-01-14)
Signed-off-by: Sean Christopherson <seanjc(a)google.com>
[Move clearing of XSTATE_BV from fpu_copy_uabi_to_guest_fpstate
to kvm_vcpu_ioctl_x86_set_xsave. - Paolo]
Reviewed-by: Binbin Wu <binbin.wu(a)linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Conflicts:
arch/x86/kvm/x86.c
[Context difference. ]
Signed-off-by: Zhang Kunbo <zhangkunbo(a)huawei.com>
---
arch/x86/kernel/fpu/core.c | 32 +++++++++++++++++++++++++++++---
arch/x86/kvm/x86.c | 9 +++++++++
2 files changed, 38 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 1f0871be9d53..fd53ff26cd42 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -296,10 +296,29 @@ EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features);
#ifdef CONFIG_X86_64
void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
{
+ struct fpstate *fpstate = guest_fpu->fpstate;
+
fpregs_lock();
- guest_fpu->fpstate->xfd = xfd;
- if (guest_fpu->fpstate->in_use)
- xfd_update_state(guest_fpu->fpstate);
+
+ /*
+ * KVM's guest ABI is that setting XFD[i]=1 *can* immediately revert the
+ * save state to its initial configuration. Likewise, KVM_GET_XSAVE does
+ * the same as XSAVE and returns XSTATE_BV[i]=0 whenever XFD[i]=1.
+ *
+ * If the guest's FPU state is in hardware, just update XFD: the XSAVE
+ * in fpu_swap_kvm_fpstate will clear XSTATE_BV[i] whenever XFD[i]=1.
+ *
+ * If however the guest's FPU state is NOT resident in hardware, clear
+ * disabled components in XSTATE_BV now, or a subsequent XRSTOR will
+ * attempt to load disabled components and generate #NM _in the host_.
+ */
+ if (xfd && test_thread_flag(TIF_NEED_FPU_LOAD))
+ fpstate->regs.xsave.header.xfeatures &= ~xfd;
+
+ fpstate->xfd = xfd;
+ if (fpstate->in_use)
+ xfd_update_state(fpstate);
+
fpregs_unlock();
}
EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
@@ -407,6 +426,13 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
if (ustate->xsave.header.xfeatures & ~xcr0)
return -EINVAL;
+ /*
+ * Disabled features must be in their initial state, otherwise XRSTOR
+ * causes an exception.
+ */
+ if (WARN_ON_ONCE(ustate->xsave.header.xfeatures & kstate->xfd))
+ return -EINVAL;
+
/*
* Nullify @vpkru to preserve its current value if PKRU's bit isn't set
* in the header. KVM's odd ABI is to leave PKRU untouched in this
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2139f728aecc..57fc6916906b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5541,9 +5541,18 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
+ union fpregs_state *xstate = (union fpregs_state *)guest_xsave->region;
+
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
return 0;
+ /*
+ * For backwards compatibility, do not expect disabled features to be in
+ * their initial state. XSTATE_BV[i] must still be cleared whenever
+ * XFD[i]=1, or XRSTOR would cause a #NM.
+ */
+ xstate->xsave.header.xfeatures &= ~vcpu->arch.guest_fpu.fpstate->xfd;
+
return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
guest_xsave->region,
kvm_caps.supported_xcr0,
--
2.34.1
2
1
Currently, x86, Riscv, Loongarch use the Generic Entry which makes
maintainers' work easier and codes more elegant. arm64 has already
successfully switched to the Generic IRQ Entry in commit
b3cf07851b6c ("arm64: entry: Switch to generic IRQ entry"), it is
time to completely convert arm64 to Generic Entry.
The goal is to bring arm64 in line with other architectures that already
use the generic entry infrastructure, reducing duplicated code and
making it easier to share future changes in entry/exit paths, such as
"Syscall User Dispatch".
This patch set is rebased on arm64 (for-next/entry). And the performance
was measured on Kunpeng 920 using "perf bench basic syscall" with
"arm64.nopauth selinux=0 audit=1".
After switch to Generic Entry, the performance are below:
| Metric | W/O Generic Framework | With Generic Framework | Change |
| ---------- | --------------------- | ---------------------- | ------ |
| Total time | 2.487 [sec] | 2.393[sec] | ↓3.8% |
| usecs/op | 0.248780 | 0.239361 | ↓3.8% |
| ops/sec | 4,019,620 | 4,177,789 | ↑3.9% |
Compared to earlier with arch specific handling, the performance improved
by approximately 3.9%.
On the basis of optimizing syscall_get_arguments()[1], el0_svc_common()
and syscall_exit_work(), the performance are below:
| Metric | W/O Generic Entry | With Generic Entry opt| Change |
| ---------- | ----------------- | ------------------ | ------ |
| Total time | 2.487 [sec] | 2.264 [sec] | ↓9.0% |
| usecs/op | 0.248780 | 0.226481 | ↓9.0% |
| ops/sec | 4,019,620 | 4,415,383 | ↑9.8% |
Therefore, after the optimization, ARM64 System Call performance improved
by approximately 9%.
It was tested ok with following test cases on kunpeng920 and QEMU
virt platform:
- Perf tests.
- Different `dynamic preempt` mode switch.
- Pseudo NMI tests.
- Stress-ng CPU stress test.
- Hackbench stress test.
- MTE test case in Documentation/arch/arm64/memory-tagging-extension.rst
and all test cases in tools/testing/selftests/arm64/mte/*.
- "sud" selftest testcase.
- get_set_sud, get_syscall_info, set_syscall_info, peeksiginfo
in tools/testing/selftests/ptrace.
- breakpoint_test_arm64 in selftests/breakpoints.
- syscall-abi and ptrace in tools/testing/selftests/arm64/abi
- fp-ptrace, sve-ptrace, za-ptrace in selftests/arm64/fp.
- vdso_test_getrandom in tools/testing/selftests/vDSO
- Strace tests.
The test QEMU configuration is as follows:
qemu-system-aarch64 \
-M virt,gic-version=3,virtualization=on,mte=on \
-cpu max,pauth-impdef=on \
-kernel Image \
-smp 8,sockets=1,cores=4,threads=2 \
-m 512m \
-nographic \
-no-reboot \
-device virtio-rng-pci \
-append "root=/dev/vda rw console=ttyAMA0 kgdboc=ttyAMA0,115200 \
earlycon preempt=voluntary irqchip.gicv3_pseudo_nmi=1" \
-drive if=none,file=images/rootfs.ext4,format=raw,id=hd0 \
-device virtio-blk-device,drive=hd0 \
[1]: https://kernel.googlesource.com/pub/scm/linux/kernel/git/akpm/mm/+/89bf683c…
Changes in v11:
- Remove unused syscall in syscall_trace_enter().
- Update and provide a detailed explanation of the differences after
moving rseq_syscall() before audit_syscall_exit().
- Rebased on arm64 (for-next/entry), and remove the first applied 3 patchs.
- syscall_exit_to_user_mode_work() for arch reuse instead of adding
new syscall_exit_to_user_mode_work_prepare() helper.
- Link to v10: https://lore.kernel.org/all/20251222114737.1334364-1-ruanjinjie@huawei.com/
Changes in v10:
- Rebased on v6.19-rc1, rename syscall_exit_to_user_mode_prepare() to
syscall_exit_to_user_mode_work_prepare() to avoid conflict.
- Also inline syscall_trace_enter().
- Support aarch64 for sud_benchmark.
- Update and correct the commit message.
- Add Reviewed-by.
- Link to v9: https://lore.kernel.org/all/20251204082123.2792067-1-ruanjinjie@huawei.com/
Changes in v9:
- Move "Return early for ptrace_report_syscall_entry() error" patch ahead
to make it not introduce a regression.
- Not check _TIF_SECCOMP/SYSCALL_EMU for syscall_exit_work() in
a separate patch.
- Do not report_syscall_exit() for PTRACE_SYSEMU_SINGLESTEP in a separate
patch.
- Add two performance patch to improve the arm64 performance.
- Add Reviewed-by.
- Link to v8: https://lore.kernel.org/all/20251126071446.3234218-1-ruanjinjie@huawei.com/
Changes in v8:
- Rename "report_syscall_enter()" to "report_syscall_entry()".
- Add ptrace_save_reg() to avoid duplication.
- Remove unused _TIF_WORK_MASK in a standalone patch.
- Align syscall_trace_enter() return value with the generic version.
- Use "scno" instead of regs->syscallno in el0_svc_common().
- Move rseq_syscall() ahead in a standalone patch to clarify it clearly.
- Rename "syscall_trace_exit()" to "syscall_exit_work()".
- Keep the goto in el0_svc_common().
- No argument was passed to __secure_computing() and check -1 not -1L.
- Remove "Add has_syscall_work() helper" patch.
- Move "Add syscall_exit_to_user_mode_prepare() helper" patch later.
- Add miss header for asm/entry-common.h.
- Update the implementation of arch_syscall_is_vdso_sigreturn().
- Add "ARCH_SYSCALL_WORK_EXIT" to be defined as "SECCOMP | SYSCALL_EMU"
to keep the behaviour unchanged.
- Add more testcases test.
- Add Reviewed-by.
- Update the commit message.
- Link to v7: https://lore.kernel.org/all/20251117133048.53182-1-ruanjinjie@huawei.com/
Chanegs in v7:
- Support "Syscall User Dispatch" by implementing
arch_syscall_is_vdso_sigreturn() as kemal suggested.
- Add aarch64 support for "sud" selftest testcase, which tested ok with
the patch series.
- Fix the kernel test robot warning for arch_ptrace_report_syscall_entry()
and arch_ptrace_report_syscall_exit() in asm/entry-common.h.
- Add perf syscall performance test.
- Link to v6: https://lore.kernel.org/all/20250916082611.2972008-1-ruanjinjie@huawei.com/
Changes in v6:
- Rebased on v6.17-rc5-next as arm64 generic irq entry has merged.
- Update the commit message.
- Link to v5: https://lore.kernel.org/all/20241206101744.4161990-1-ruanjinjie@huawei.com/
Changes in v5:
- Not change arm32 and keep inerrupts_enabled() macro for gicv3 driver.
- Move irqentry_state definition into arch/arm64/kernel/entry-common.c.
- Avoid removing the __enter_from_*() and __exit_to_*() wrappers.
- Update "irqentry_state_t ret/irq_state" to "state"
to keep it consistently.
- Use generic irq entry header for PREEMPT_DYNAMIC after split
the generic entry.
- Also refactor the ARM64 syscall code.
- Introduce arch_ptrace_report_syscall_entry/exit(), instead of
arch_pre/post_report_syscall_entry/exit() to simplify code.
- Make the syscall patches clear separation.
- Update the commit message.
- Link to v4: https://lore.kernel.org/all/20241025100700.3714552-1-ruanjinjie@huawei.com/
Changes in v4:
- Rework/cleanup split into a few patches as Mark suggested.
- Replace interrupts_enabled() macro with regs_irqs_disabled(), instead
of left it here.
- Remove rcu and lockdep state in pt_regs by using temporary
irqentry_state_t as Mark suggested.
- Remove some unnecessary intermediate functions to make it clear.
- Rework preempt irq and PREEMPT_DYNAMIC code
to make the switch more clear.
- arch_prepare_*_entry/exit() -> arch_pre_*_entry/exit().
- Expand the arch functions comment.
- Make arch functions closer to its caller.
- Declare saved_reg in for block.
- Remove arch_exit_to_kernel_mode_prepare(), arch_enter_from_kernel_mode().
- Adjust "Add few arch functions to use generic entry" patch to be
the penultimate.
- Update the commit message.
- Add suggested-by.
- Link to v3: https://lore.kernel.org/all/20240629085601.470241-1-ruanjinjie@huawei.com/
Changes in v3:
- Test the MTE test cases.
- Handle forget_syscall() in arch_post_report_syscall_entry()
- Make the arch funcs not use __weak as Thomas suggested, so move
the arch funcs to entry-common.h, and make arch_forget_syscall() folded
in arch_post_report_syscall_entry() as suggested.
- Move report_single_step() to thread_info.h for arm64
- Change __always_inline() to inline, add inline for the other arch funcs.
- Remove unused signal.h for entry-common.h.
- Add Suggested-by.
- Update the commit message.
Changes in v2:
- Add tested-by.
- Fix a bug that not call arch_post_report_syscall_entry() in
syscall_trace_enter() if ptrace_report_syscall_entry() return not zero.
- Refactor report_syscall().
- Add comment for arch_prepare_report_syscall_exit().
- Adjust entry-common.h header file inclusion to alphabetical order.
- Update the commit message.
Jinjie Ruan (13):
entry: Remove unused syscall in syscall_trace_enter()
arm64/ptrace: Refactor syscall_trace_enter/exit()
arm64: ptrace: Move rseq_syscall() before audit_syscall_exit()
arm64: syscall: Rework el0_svc_common()
arm64/ptrace: Not check _TIF_SECCOMP/SYSCALL_EMU for
syscall_exit_work()
arm64/ptrace: Do not report_syscall_exit() for
PTRACE_SYSEMU_SINGLESTEP
arm64/ptrace: Expand secure_computing() in place
arm64/ptrace: Use syscall_get_arguments() helper
entry: Rework syscall_exit_to_user_mode_work() for arch reuse
entry: Add arch_ptrace_report_syscall_entry/exit()
arm64: entry: Convert to generic entry
arm64: Inline el0_svc_common()
entry: Inline syscall_exit_work() and syscall_trace_enter()
kemal (1):
selftests: sud_test: Support aarch64
arch/arm64/Kconfig | 2 +-
arch/arm64/include/asm/entry-common.h | 76 +++++++++
arch/arm64/include/asm/syscall.h | 19 ++-
arch/arm64/include/asm/thread_info.h | 16 +-
arch/arm64/kernel/debug-monitors.c | 7 +
arch/arm64/kernel/ptrace.c | 115 -------------
arch/arm64/kernel/signal.c | 2 +-
arch/arm64/kernel/syscall.c | 29 +---
include/linux/entry-common.h | 158 ++++++++++++++++--
kernel/entry/common.h | 7 -
kernel/entry/syscall-common.c | 96 +----------
kernel/entry/syscall_user_dispatch.c | 4 +-
.../syscall_user_dispatch/sud_benchmark.c | 2 +-
.../syscall_user_dispatch/sud_test.c | 4 +
14 files changed, 268 insertions(+), 269 deletions(-)
delete mode 100644 kernel/entry/common.h
--
2.34.1
1
14
28 Jan '26
Fix exception when umount with release dev fd.
Hongbo Li (2):
mfs: Fix wild-memory-access error when mfs event is destroy.
mfs: Do not release the async event immediately when read failed
fs/mfs/cache.c | 2 ++
fs/mfs/dev.c | 12 ++++++++----
fs/mfs/super.c | 6 +++++-
3 files changed, 15 insertions(+), 5 deletions(-)
--
2.34.1
2
3
28 Jan '26
From: Steven Rostedt <rostedt(a)goodmis.org>
mainline inclusion
from mainline-v6.19-rc4
commit 90f9f5d64cae4e72defd96a2a22760173cb3c9ec
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8435
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
When creating a synthetic event based on an existing synthetic event that
had a stacktrace field and the new synthetic event used that field a
kernel crash occurred:
~# cd /sys/kernel/tracing
~# echo 's:stack unsigned long stack[];' > dynamic_events
~# echo 'hist:keys=prev_pid:s0=common_stacktrace if prev_state & 3' >> events/sched/sched_switch/trigger
~# echo 'hist:keys=next_pid:s1=$s0:onmatch(sched.sched_switch).trace(stack,$s1)' >> events/sched/sched_switch/trigger
The above creates a synthetic event that takes a stacktrace when a task
schedules out in a non-running state and passes that stacktrace to the
sched_switch event when that task schedules back in. It triggers the
"stack" synthetic event that has a stacktrace as its field (called "stack").
~# echo 's:syscall_stack s64 id; unsigned long stack[];' >> dynamic_events
~# echo 'hist:keys=common_pid:s2=stack' >> events/synthetic/stack/trigger
~# echo 'hist:keys=common_pid:s3=$s2,i0=id:onmatch(synthetic.stack).trace(syscall_stack,$i0,$s3)' >> events/raw_syscalls/sys_exit/trigger
The above makes another synthetic event called "syscall_stack" that
attaches the first synthetic event (stack) to the sys_exit trace event and
records the stacktrace from the stack event with the id of the system call
that is exiting.
When enabling this event (or using it in a historgram):
~# echo 1 > events/synthetic/syscall_stack/enable
Produces a kernel crash!
BUG: unable to handle page fault for address: 0000000000400010
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: Oops: 0000 [#1] SMP PTI
CPU: 6 UID: 0 PID: 1257 Comm: bash Not tainted 6.16.3+deb14-amd64 #1 PREEMPT(lazy) Debian 6.16.3-1
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.17.0-debian-1.17.0-1 04/01/2014
RIP: 0010:trace_event_raw_event_synth+0x90/0x380
Code: c5 00 00 00 00 85 d2 0f 84 e1 00 00 00 31 db eb 34 0f 1f 00 66 66 2e 0f 1f 84 00 00 00 00 00 66 66 2e 0f 1f 84 00 00 00 00 00 <49> 8b 04 24 48 83 c3 01 8d 0c c5 08 00 00 00 01 cd 41 3b 5d 40 0f
RSP: 0018:ffffd2670388f958 EFLAGS: 00010202
RAX: ffff8ba1065cc100 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000001 RSI: fffff266ffda7b90 RDI: ffffd2670388f9b0
RBP: 0000000000000010 R08: ffff8ba104e76000 R09: ffffd2670388fa50
R10: ffff8ba102dd42e0 R11: ffffffff9a908970 R12: 0000000000400010
R13: ffff8ba10a246400 R14: ffff8ba10a710220 R15: fffff266ffda7b90
FS: 00007fa3bc63f740(0000) GS:ffff8ba2e0f48000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000000400010 CR3: 0000000107f9e003 CR4: 0000000000172ef0
Call Trace:
<TASK>
? __tracing_map_insert+0x208/0x3a0
action_trace+0x67/0x70
event_hist_trigger+0x633/0x6d0
event_triggers_call+0x82/0x130
trace_event_buffer_commit+0x19d/0x250
trace_event_raw_event_sys_exit+0x62/0xb0
syscall_exit_work+0x9d/0x140
do_syscall_64+0x20a/0x2f0
? trace_event_raw_event_sched_switch+0x12b/0x170
? save_fpregs_to_fpstate+0x3e/0x90
? _raw_spin_unlock+0xe/0x30
? finish_task_switch.isra.0+0x97/0x2c0
? __rseq_handle_notify_resume+0xad/0x4c0
? __schedule+0x4b8/0xd00
? restore_fpregs_from_fpstate+0x3c/0x90
? switch_fpu_return+0x5b/0xe0
? do_syscall_64+0x1ef/0x2f0
? do_fault+0x2e9/0x540
? __handle_mm_fault+0x7d1/0xf70
? count_memcg_events+0x167/0x1d0
? handle_mm_fault+0x1d7/0x2e0
? do_user_addr_fault+0x2c3/0x7f0
entry_SYSCALL_64_after_hwframe+0x76/0x7e
The reason is that the stacktrace field is not labeled as such, and is
treated as a normal field and not as a dynamic event that it is.
In trace_event_raw_event_synth() the event is field is still treated as a
dynamic array, but the retrieval of the data is considered a normal field,
and the reference is just the meta data:
// Meta data is retrieved instead of a dynamic array
str_val = (char *)(long)var_ref_vals[val_idx];
// Then when it tries to process it:
len = *((unsigned long *)str_val) + 1;
It triggers a kernel page fault.
To fix this, first when defining the fields of the first synthetic event,
set the filter type to FILTER_STACKTRACE. This is used later by the second
synthetic event to know that this field is a stacktrace. When creating
the field of the new synthetic event, have it use this FILTER_STACKTRACE
to know to create a stacktrace field to copy the stacktrace into.
Cc: stable(a)vger.kernel.org
Cc: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers(a)efficios.com>
Cc: Tom Zanussi <zanussi(a)kernel.org>
Link: https://patch.msgid.link/20260122194824.6905a38e@gandalf.local.home
Fixes: 00cf3d672a9d ("tracing: Allow synthetic events to pass around stacktraces")
Signed-off-by: Steven Rostedt (Google) <rostedt(a)goodmis.org>
Signed-off-by: Tengda Wu <wutengda2(a)huawei.com>
---
kernel/trace/trace_events_hist.c | 9 +++++++++
kernel/trace/trace_events_synth.c | 8 +++++++-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 2d6f3c63bc62..f22aa4afaa6d 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2047,6 +2047,15 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
hist_field->fn_num = HIST_FIELD_FN_RELDYNSTRING;
else
hist_field->fn_num = HIST_FIELD_FN_PSTRING;
+ } else if (field->filter_type == FILTER_STACKTRACE) {
+ flags |= HIST_FIELD_FL_STACKTRACE;
+
+ hist_field->size = MAX_FILTER_STR_VAL;
+ hist_field->type = kstrdup_const(field->type, GFP_KERNEL);
+ if (!hist_field->type)
+ goto free;
+
+ hist_field->fn_num = HIST_FIELD_FN_STACK;
} else {
hist_field->size = field->size;
hist_field->is_signed = field->is_signed;
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index 794e72dbb12d..13c2d3ad7185 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -137,7 +137,9 @@ static int synth_event_define_fields(struct trace_event_call *call)
struct synth_event *event = call->data;
unsigned int i, size, n_u64;
char *name, *type;
+ int filter_type;
bool is_signed;
+ bool is_stack;
int ret = 0;
for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
@@ -145,8 +147,12 @@ static int synth_event_define_fields(struct trace_event_call *call)
is_signed = event->fields[i]->is_signed;
type = event->fields[i]->type;
name = event->fields[i]->name;
+ is_stack = event->fields[i]->is_stack;
+
+ filter_type = is_stack ? FILTER_STACKTRACE : FILTER_OTHER;
+
ret = trace_define_field(call, type, name, offset, size,
- is_signed, FILTER_OTHER);
+ is_signed, filter_type);
if (ret)
break;
--
2.34.1
2
1
[PATCH OLK-6.6] ima: don't clear IMA_DIGSIG flag when setting or removing non-IMA xattr
by Zhao Yipeng 27 Jan '26
by Zhao Yipeng 27 Jan '26
27 Jan '26
From: Coiby Xu <coxu(a)redhat.com>
stable inclusion
from stable-v6.6.117
commit d2993a7e98eb70c737c6f5365a190e79c72b8407
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11626
CVE: CVE-2025-68183
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 88b4cbcf6b041ae0f2fc8a34554a5b6a83a2b7cd ]
Currently when both IMA and EVM are in fix mode, the IMA signature will
be reset to IMA hash if a program first stores IMA signature in
security.ima and then writes/removes some other security xattr for the
file.
For example, on Fedora, after booting the kernel with "ima_appraise=fix
evm=fix ima_policy=appraise_tcb" and installing rpm-plugin-ima,
installing/reinstalling a package will not make good reference IMA
signature generated. Instead IMA hash is generated,
# getfattr -m - -d -e hex /usr/bin/bash
# file: usr/bin/bash
security.ima=0x0404...
This happens because when setting security.selinux, the IMA_DIGSIG flag
that had been set early was cleared. As a result, IMA hash is generated
when the file is closed.
Similarly, IMA signature can be cleared on file close after removing
security xattr like security.evm or setting/removing ACL.
Prevent replacing the IMA file signature with a file hash, by preventing
the IMA_DIGSIG flag from being reset.
Here's a minimal C reproducer which sets security.selinux as the last
step which can also replaced by removing security.evm or setting ACL,
#include <stdio.h>
#include <sys/xattr.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
int main() {
const char* file_path = "/usr/sbin/test_binary";
const char* hex_string = "030204d33204490066306402304";
int length = strlen(hex_string);
char* ima_attr_value;
int fd;
fd = open(file_path, O_WRONLY|O_CREAT|O_EXCL, 0644);
if (fd == -1) {
perror("Error opening file");
return 1;
}
ima_attr_value = (char*)malloc(length / 2 );
for (int i = 0, j = 0; i < length; i += 2, j++) {
sscanf(hex_string + i, "%2hhx", &ima_attr_value[j]);
}
if (fsetxattr(fd, "security.ima", ima_attr_value, length/2, 0) == -1) {
perror("Error setting extended attribute");
close(fd);
return 1;
}
const char* selinux_value= "system_u:object_r:bin_t:s0";
if (fsetxattr(fd, "security.selinux", selinux_value, strlen(selinux_value), 0) == -1) {
perror("Error setting extended attribute");
close(fd);
return 1;
}
close(fd);
return 0;
}
Signed-off-by: Coiby Xu <coxu(a)redhat.com>
Signed-off-by: Mimi Zohar <zohar(a)linux.ibm.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Zhao Yipeng <zhaoyipeng5(a)huawei.com>
---
security/integrity/ima/ima_appraise.c | 23 ++++++++++++++++++-----
1 file changed, 18 insertions(+), 5 deletions(-)
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 8a54399d212d..a9e31d8addc6 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -809,6 +809,15 @@ static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name,
return 0;
}
+/*
+ * ima_reset_appraise_flags - reset ima_iint_cache flags
+ *
+ * @digsig: whether to clear/set IMA_DIGSIG flag, tristate values
+ * 0: clear IMA_DIGSIG
+ * 1: set IMA_DIGSIG
+ * -1: don't change IMA_DIGSIG
+ *
+ */
static void ima_reset_appraise_flags(struct inode *inode, int digsig)
{
struct integrity_iint_cache *iint;
@@ -821,9 +830,9 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig)
return;
iint->measured_pcrs = 0;
set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags);
- if (digsig)
+ if (digsig == 1)
set_bit(IMA_DIGSIG, &iint->atomic_flags);
- else
+ else if (digsig == 0)
clear_bit(IMA_DIGSIG, &iint->atomic_flags);
}
@@ -908,6 +917,8 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
} else if (!strcmp(xattr_name, XATTR_NAME_EVM) && xattr_value_len > 0) {
digsig = (xvalue->type == EVM_XATTR_PORTABLE_DIGSIG);
+ } else {
+ digsig = -1;
}
if (result == 1 || evm_revalidate_status(xattr_name)) {
ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
@@ -921,18 +932,20 @@ int ima_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
const char *acl_name, struct posix_acl *kacl)
{
if (evm_revalidate_status(acl_name))
- ima_reset_appraise_flags(d_backing_inode(dentry), 0);
+ ima_reset_appraise_flags(d_backing_inode(dentry), -1);
return 0;
}
int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
{
- int result;
+ int result, digsig = -1;
result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
if (result == 1 || evm_revalidate_status(xattr_name)) {
- ima_reset_appraise_flags(d_backing_inode(dentry), 0);
+ if (!strcmp(xattr_name, XATTR_NAME_IMA))
+ digsig = 0;
+ ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
if (result == 1)
result = 0;
}
--
2.34.1
2
1
From: Mathy Vanhoef <Mathy.Vanhoef(a)kuleuven.be>
stable inclusion
from stable-v6.1.146
commit e2c8a3c0388aef6bfc4aabfba07bc7dff16eea80
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/7758
CVE: CVE-2025-27558
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 737bb912ebbe4571195c56eba557c4d7315b26fb upstream.
This patch is a mitigation to prevent the A-MSDU spoofing vulnerability
for mesh networks. The initial update to the IEEE 802.11 standard, in
response to the FragAttacks, missed this case (CVE-2025-27558). It can
be considered a variant of CVE-2020-24588 but for mesh networks.
This patch tries to detect if a standard MSDU was turned into an A-MSDU
by an adversary. This is done by parsing a received A-MSDU as a standard
MSDU, calculating the length of the Mesh Control header, and seeing if
the 6 bytes after this header equal the start of an rfc1042 header. If
equal, this is a strong indication of an ongoing attack attempt.
This defense was tested with mac80211_hwsim against a mesh network that
uses an empty Mesh Address Extension field, i.e., when four addresses
are used, and when using a 12-byte Mesh Address Extension field, i.e.,
when six addresses are used. Functionality of normal MSDUs and A-MSDUs
was also tested, and confirmed working, when using both an empty and
12-byte Mesh Address Extension field.
It was also tested with mac80211_hwsim that A-MSDU attacks in non-mesh
networks keep being detected and prevented.
Note that the vulnerability being patched, and the defense being
implemented, was also discussed in the following paper and in the
following IEEE 802.11 presentation:
https://papers.mathyvanhoef.com/wisec2025.pdf
https://mentor.ieee.org/802.11/dcn/25/11-25-0949-00-000m-a-msdu-mesh-spoof-…
Cc: stable(a)vger.kernel.org
Signed-off-by: Mathy Vanhoef <Mathy.Vanhoef(a)kuleuven.be>
Link: https://patch.msgid.link/20250616004635.224344-1-Mathy.Vanhoef@kuleuven.be
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Conflicts:
net/wireless/util.c
[Fanhua Li: context conflict]
Signed-off-by: Fanhua Li <lifanhua5(a)huawei.com>
---
net/wireless/util.c | 51 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 49 insertions(+), 2 deletions(-)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 15773db362a1..81ac8d1da085 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -742,6 +742,51 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
return frame;
}
+/*
+ * Detects if an MSDU frame was maliciously converted into an A-MSDU
+ * frame by an adversary. This is done by parsing the received frame
+ * as if it were a regular MSDU, even though the A-MSDU flag is set.
+ *
+ * For non-mesh interfaces, detection involves checking whether the
+ * payload, when interpreted as an MSDU, begins with a valid RFC1042
+ * header. This is done by comparing the A-MSDU subheader's destination
+ * address to the start of the RFC1042 header.
+ *
+ * For mesh interfaces, the MSDU includes a 6-byte Mesh Control field
+ * and an optional variable-length Mesh Address Extension field before
+ * the RFC1042 header. The position of the RFC1042 header must therefore
+ * be calculated based on the mesh header length.
+ *
+ * Since this function intentionally parses an A-MSDU frame as an MSDU,
+ * it only assumes that the A-MSDU subframe header is present, and
+ * beyond this it performs its own bounds checks under the assumption
+ * that the frame is instead parsed as a non-aggregated MSDU.
+ */
+static bool
+is_amsdu_aggregation_attack(struct ethhdr *eth, struct sk_buff *skb,
+ enum nl80211_iftype iftype)
+{
+ int offset;
+
+ /* Non-mesh case can be directly compared */
+ if (iftype != NL80211_IFTYPE_MESH_POINT)
+ return ether_addr_equal(eth->h_dest, rfc1042_header);
+
+ offset = __ieee80211_get_mesh_hdrlen(eth->h_dest[0]);
+ if (offset == 6) {
+ /* Mesh case with empty address extension field */
+ return ether_addr_equal(eth->h_source, rfc1042_header);
+ } else if (offset + ETH_ALEN <= skb->len) {
+ /* Mesh case with non-empty address extension field */
+ u8 temp[ETH_ALEN];
+
+ skb_copy_bits(skb, offset, temp, ETH_ALEN);
+ return ether_addr_equal(temp, rfc1042_header);
+ }
+
+ return false;
+}
+
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
@@ -774,8 +819,10 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
/* the last MSDU has no padding */
if (subframe_len > remaining)
goto purge;
- /* mitigate A-MSDU aggregation injection attacks */
- if (ether_addr_equal(eth.h_dest, rfc1042_header))
+ /* mitigate A-MSDU aggregation injection attacks, to be
+ * checked when processing first subframe (offset == 0).
+ */
+ if (offset == 0 && is_amsdu_aggregation_attack(ð, skb, iftype))
goto purge;
offset += sizeof(struct ethhdr);
--
2.22.0
2
1
From: Mathy Vanhoef <Mathy.Vanhoef(a)kuleuven.be>
stable inclusion
from stable-v6.6.99
commit ec6392061de6681148b63ee6c8744da833498cdd
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/7758
CVE: CVE-2025-27558
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 737bb912ebbe4571195c56eba557c4d7315b26fb upstream.
This patch is a mitigation to prevent the A-MSDU spoofing vulnerability
for mesh networks. The initial update to the IEEE 802.11 standard, in
response to the FragAttacks, missed this case (CVE-2025-27558). It can
be considered a variant of CVE-2020-24588 but for mesh networks.
This patch tries to detect if a standard MSDU was turned into an A-MSDU
by an adversary. This is done by parsing a received A-MSDU as a standard
MSDU, calculating the length of the Mesh Control header, and seeing if
the 6 bytes after this header equal the start of an rfc1042 header. If
equal, this is a strong indication of an ongoing attack attempt.
This defense was tested with mac80211_hwsim against a mesh network that
uses an empty Mesh Address Extension field, i.e., when four addresses
are used, and when using a 12-byte Mesh Address Extension field, i.e.,
when six addresses are used. Functionality of normal MSDUs and A-MSDUs
was also tested, and confirmed working, when using both an empty and
12-byte Mesh Address Extension field.
It was also tested with mac80211_hwsim that A-MSDU attacks in non-mesh
networks keep being detected and prevented.
Note that the vulnerability being patched, and the defense being
implemented, was also discussed in the following paper and in the
following IEEE 802.11 presentation:
https://papers.mathyvanhoef.com/wisec2025.pdf
https://mentor.ieee.org/802.11/dcn/25/11-25-0949-00-000m-a-msdu-mesh-spoof-…
Cc: stable(a)vger.kernel.org
Signed-off-by: Mathy Vanhoef <Mathy.Vanhoef(a)kuleuven.be>
Link: https://patch.msgid.link/20250616004635.224344-1-Mathy.Vanhoef@kuleuven.be
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Hai <wanghai38(a)huawei.com>
---
net/wireless/util.c | 52 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 50 insertions(+), 2 deletions(-)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7acd8d0db61a..24e5af65da58 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -826,6 +826,52 @@ bool ieee80211_is_valid_amsdu(struct sk_buff *skb, u8 mesh_hdr)
}
EXPORT_SYMBOL(ieee80211_is_valid_amsdu);
+
+/*
+ * Detects if an MSDU frame was maliciously converted into an A-MSDU
+ * frame by an adversary. This is done by parsing the received frame
+ * as if it were a regular MSDU, even though the A-MSDU flag is set.
+ *
+ * For non-mesh interfaces, detection involves checking whether the
+ * payload, when interpreted as an MSDU, begins with a valid RFC1042
+ * header. This is done by comparing the A-MSDU subheader's destination
+ * address to the start of the RFC1042 header.
+ *
+ * For mesh interfaces, the MSDU includes a 6-byte Mesh Control field
+ * and an optional variable-length Mesh Address Extension field before
+ * the RFC1042 header. The position of the RFC1042 header must therefore
+ * be calculated based on the mesh header length.
+ *
+ * Since this function intentionally parses an A-MSDU frame as an MSDU,
+ * it only assumes that the A-MSDU subframe header is present, and
+ * beyond this it performs its own bounds checks under the assumption
+ * that the frame is instead parsed as a non-aggregated MSDU.
+ */
+static bool
+is_amsdu_aggregation_attack(struct ethhdr *eth, struct sk_buff *skb,
+ enum nl80211_iftype iftype)
+{
+ int offset;
+
+ /* Non-mesh case can be directly compared */
+ if (iftype != NL80211_IFTYPE_MESH_POINT)
+ return ether_addr_equal(eth->h_dest, rfc1042_header);
+
+ offset = __ieee80211_get_mesh_hdrlen(eth->h_dest[0]);
+ if (offset == 6) {
+ /* Mesh case with empty address extension field */
+ return ether_addr_equal(eth->h_source, rfc1042_header);
+ } else if (offset + ETH_ALEN <= skb->len) {
+ /* Mesh case with non-empty address extension field */
+ u8 temp[ETH_ALEN];
+
+ skb_copy_bits(skb, offset, temp, ETH_ALEN);
+ return ether_addr_equal(temp, rfc1042_header);
+ }
+
+ return false;
+}
+
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
@@ -867,8 +913,10 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
/* the last MSDU has no padding */
if (subframe_len > remaining)
goto purge;
- /* mitigate A-MSDU aggregation injection attacks */
- if (ether_addr_equal(hdr.eth.h_dest, rfc1042_header))
+ /* mitigate A-MSDU aggregation injection attacks, to be
+ * checked when processing first subframe (offset == 0).
+ */
+ if (offset == 0 && is_amsdu_aggregation_attack(&hdr.eth, skb, iftype))
goto purge;
offset += sizeof(struct ethhdr);
--
2.22.0
2
1
From: Mathy Vanhoef <Mathy.Vanhoef(a)kuleuven.be>
stable inclusion
from stable-v6.1.146
commit e2c8a3c0388aef6bfc4aabfba07bc7dff16eea80
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9288
CVE: CVE-2025-38512
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 737bb912ebbe4571195c56eba557c4d7315b26fb upstream.
This patch is a mitigation to prevent the A-MSDU spoofing vulnerability
for mesh networks. The initial update to the IEEE 802.11 standard, in
response to the FragAttacks, missed this case (CVE-2025-27558). It can
be considered a variant of CVE-2020-24588 but for mesh networks.
This patch tries to detect if a standard MSDU was turned into an A-MSDU
by an adversary. This is done by parsing a received A-MSDU as a standard
MSDU, calculating the length of the Mesh Control header, and seeing if
the 6 bytes after this header equal the start of an rfc1042 header. If
equal, this is a strong indication of an ongoing attack attempt.
This defense was tested with mac80211_hwsim against a mesh network that
uses an empty Mesh Address Extension field, i.e., when four addresses
are used, and when using a 12-byte Mesh Address Extension field, i.e.,
when six addresses are used. Functionality of normal MSDUs and A-MSDUs
was also tested, and confirmed working, when using both an empty and
12-byte Mesh Address Extension field.
It was also tested with mac80211_hwsim that A-MSDU attacks in non-mesh
networks keep being detected and prevented.
Note that the vulnerability being patched, and the defense being
implemented, was also discussed in the following paper and in the
following IEEE 802.11 presentation:
https://papers.mathyvanhoef.com/wisec2025.pdf
https://mentor.ieee.org/802.11/dcn/25/11-25-0949-00-000m-a-msdu-mesh-spoof-…
Cc: stable(a)vger.kernel.org
Signed-off-by: Mathy Vanhoef <Mathy.Vanhoef(a)kuleuven.be>
Link: https://patch.msgid.link/20250616004635.224344-1-Mathy.Vanhoef@kuleuven.be
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Conflicts:
net/wireless/util.c
[Fanhua Li: context conflict]
Signed-off-by: Fanhua Li <lifanhua5(a)huawei.com>
---
net/wireless/util.c | 51 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 49 insertions(+), 2 deletions(-)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 15773db362a15..81ac8d1da0854 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -742,6 +742,51 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
return frame;
}
+/*
+ * Detects if an MSDU frame was maliciously converted into an A-MSDU
+ * frame by an adversary. This is done by parsing the received frame
+ * as if it were a regular MSDU, even though the A-MSDU flag is set.
+ *
+ * For non-mesh interfaces, detection involves checking whether the
+ * payload, when interpreted as an MSDU, begins with a valid RFC1042
+ * header. This is done by comparing the A-MSDU subheader's destination
+ * address to the start of the RFC1042 header.
+ *
+ * For mesh interfaces, the MSDU includes a 6-byte Mesh Control field
+ * and an optional variable-length Mesh Address Extension field before
+ * the RFC1042 header. The position of the RFC1042 header must therefore
+ * be calculated based on the mesh header length.
+ *
+ * Since this function intentionally parses an A-MSDU frame as an MSDU,
+ * it only assumes that the A-MSDU subframe header is present, and
+ * beyond this it performs its own bounds checks under the assumption
+ * that the frame is instead parsed as a non-aggregated MSDU.
+ */
+static bool
+is_amsdu_aggregation_attack(struct ethhdr *eth, struct sk_buff *skb,
+ enum nl80211_iftype iftype)
+{
+ int offset;
+
+ /* Non-mesh case can be directly compared */
+ if (iftype != NL80211_IFTYPE_MESH_POINT)
+ return ether_addr_equal(eth->h_dest, rfc1042_header);
+
+ offset = __ieee80211_get_mesh_hdrlen(eth->h_dest[0]);
+ if (offset == 6) {
+ /* Mesh case with empty address extension field */
+ return ether_addr_equal(eth->h_source, rfc1042_header);
+ } else if (offset + ETH_ALEN <= skb->len) {
+ /* Mesh case with non-empty address extension field */
+ u8 temp[ETH_ALEN];
+
+ skb_copy_bits(skb, offset, temp, ETH_ALEN);
+ return ether_addr_equal(temp, rfc1042_header);
+ }
+
+ return false;
+}
+
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
@@ -774,8 +819,10 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
/* the last MSDU has no padding */
if (subframe_len > remaining)
goto purge;
- /* mitigate A-MSDU aggregation injection attacks */
- if (ether_addr_equal(eth.h_dest, rfc1042_header))
+ /* mitigate A-MSDU aggregation injection attacks, to be
+ * checked when processing first subframe (offset == 0).
+ */
+ if (offset == 0 && is_amsdu_aggregation_attack(ð, skb, iftype))
goto purge;
offset += sizeof(struct ethhdr);
--
2.43.0
2
1
27 Jan '26
From: Ido Schimmel <idosch(a)nvidia.com>
stable inclusion
from stable-v5.10.248
commit a2dfe6758fc63e542105bee8b17a3a7485684db0
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13354
CVE: CVE-2025-68801
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 8b0e69763ef948fb872a7767df4be665d18f5fd4 ]
We sometimes observe use-after-free when dereferencing a neighbour [1].
The problem seems to be that the driver stores a pointer to the
neighbour, but without holding a reference on it. A reference is only
taken when the neighbour is used by a nexthop.
Fix by simplifying the reference counting scheme. Always take a
reference when storing a neighbour pointer in a neighbour entry. Avoid
taking a referencing when the neighbour is used by a nexthop as the
neighbour entry associated with the nexthop already holds a reference.
Tested by running the test that uncovered the problem over 300 times.
Without this patch the problem was reproduced after a handful of
iterations.
[1]
BUG: KASAN: slab-use-after-free in mlxsw_sp_neigh_entry_update+0x2d4/0x310
Read of size 8 at addr ffff88817f8e3420 by task ip/3929
CPU: 3 UID: 0 PID: 3929 Comm: ip Not tainted 6.18.0-rc4-virtme-g36b21a067510 #3 PREEMPT(full)
Hardware name: Nvidia SN5600/VMOD0013, BIOS 5.13 05/31/2023
Call Trace:
<TASK>
dump_stack_lvl+0x6f/0xa0
print_address_description.constprop.0+0x6e/0x300
print_report+0xfc/0x1fb
kasan_report+0xe4/0x110
mlxsw_sp_neigh_entry_update+0x2d4/0x310
mlxsw_sp_router_rif_gone_sync+0x35f/0x510
mlxsw_sp_rif_destroy+0x1ea/0x730
mlxsw_sp_inetaddr_port_vlan_event+0xa1/0x1b0
__mlxsw_sp_inetaddr_lag_event+0xcc/0x130
__mlxsw_sp_inetaddr_event+0xf5/0x3c0
mlxsw_sp_router_netdevice_event+0x1015/0x1580
notifier_call_chain+0xcc/0x150
call_netdevice_notifiers_info+0x7e/0x100
__netdev_upper_dev_unlink+0x10b/0x210
netdev_upper_dev_unlink+0x79/0xa0
vrf_del_slave+0x18/0x50
do_set_master+0x146/0x7d0
do_setlink.isra.0+0x9a0/0x2880
rtnl_newlink+0x637/0xb20
rtnetlink_rcv_msg+0x6fe/0xb90
netlink_rcv_skb+0x123/0x380
netlink_unicast+0x4a3/0x770
netlink_sendmsg+0x75b/0xc90
__sock_sendmsg+0xbe/0x160
____sys_sendmsg+0x5b2/0x7d0
___sys_sendmsg+0xfd/0x180
__sys_sendmsg+0x124/0x1c0
do_syscall_64+0xbb/0xfd0
entry_SYSCALL_64_after_hwframe+0x4b/0x53
[...]
Allocated by task 109:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_kmalloc+0x7b/0x90
__kmalloc_noprof+0x2c1/0x790
neigh_alloc+0x6af/0x8f0
___neigh_create+0x63/0xe90
mlxsw_sp_nexthop_neigh_init+0x430/0x7e0
mlxsw_sp_nexthop_type_init+0x212/0x960
mlxsw_sp_nexthop6_group_info_init.constprop.0+0x81f/0x1280
mlxsw_sp_nexthop6_group_get+0x392/0x6a0
mlxsw_sp_fib6_entry_create+0x46a/0xfd0
mlxsw_sp_router_fib6_replace+0x1ed/0x5f0
mlxsw_sp_router_fib6_event_work+0x10a/0x2a0
process_one_work+0xd57/0x1390
worker_thread+0x4d6/0xd40
kthread+0x355/0x5b0
ret_from_fork+0x1d4/0x270
ret_from_fork_asm+0x11/0x20
Freed by task 154:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_save_free_info+0x3b/0x60
__kasan_slab_free+0x43/0x70
kmem_cache_free_bulk.part.0+0x1eb/0x5e0
kvfree_rcu_bulk+0x1f2/0x260
kfree_rcu_work+0x130/0x1b0
process_one_work+0xd57/0x1390
worker_thread+0x4d6/0xd40
kthread+0x355/0x5b0
ret_from_fork+0x1d4/0x270
ret_from_fork_asm+0x11/0x20
Last potentially related work creation:
kasan_save_stack+0x30/0x50
kasan_record_aux_stack+0x8c/0xa0
kvfree_call_rcu+0x93/0x5b0
mlxsw_sp_router_neigh_event_work+0x67d/0x860
process_one_work+0xd57/0x1390
worker_thread+0x4d6/0xd40
kthread+0x355/0x5b0
ret_from_fork+0x1d4/0x270
ret_from_fork_asm+0x11/0x20
Fixes: 6cf3c971dc84 ("mlxsw: spectrum_router: Add private neigh table")
Signed-off-by: Ido Schimmel <idosch(a)nvidia.com>
Reviewed-by: Petr Machata <petrm(a)nvidia.com>
Signed-off-by: Petr Machata <petrm(a)nvidia.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Link: https://patch.msgid.link/92d75e21d95d163a41b5cea67a15cd33f547cba6.176469565…
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Cai Xinchen <caixinchen1(a)huawei.com>
---
.../ethernet/mellanox/mlxsw/spectrum_router.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index d2887ae508bb..e22ee1336d74 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2032,6 +2032,7 @@ mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
if (!neigh_entry)
return NULL;
+ neigh_hold(n);
neigh_entry->key.n = n;
neigh_entry->rif = rif;
INIT_LIST_HEAD(&neigh_entry->nexthop_list);
@@ -2041,6 +2042,7 @@ mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
{
+ neigh_release(neigh_entry->key.n);
kfree(neigh_entry);
}
@@ -3607,6 +3609,8 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_neigh_entry_insert;
+ neigh_release(old_n);
+
read_lock_bh(&n->lock);
nud_state = n->nud_state;
dead = n->dead;
@@ -3615,14 +3619,10 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(nh, &neigh_entry->nexthop_list,
neigh_list_node) {
- neigh_release(old_n);
- neigh_clone(n);
__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
}
- neigh_release(n);
-
return 0;
err_neigh_entry_insert:
@@ -3711,6 +3711,11 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
}
}
+ /* Release the reference taken by neigh_lookup() / neigh_create() since
+ * neigh_entry already holds one.
+ */
+ neigh_release(n);
+
/* If that is the first nexthop connected to that neigh, add to
* nexthop_neighs_list
*/
@@ -3737,11 +3742,9 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
- struct neighbour *n;
if (!neigh_entry)
return;
- n = neigh_entry->key.n;
__mlxsw_sp_nexthop_neigh_update(nh, true);
list_del(&nh->neigh_list_node);
@@ -3755,8 +3758,6 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
-
- neigh_release(n);
}
static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
--
2.34.1
2
1
[PATCH OLK-5.10] mlxsw: spectrum_mr: Fix use-after-free when updating multicast route stats
by Cai Xinchen 27 Jan '26
by Cai Xinchen 27 Jan '26
27 Jan '26
From: Ido Schimmel <idosch(a)nvidia.com>
stable inclusion
from stable-v5.10.248
commit b957366f5611bbaba03dd10ef861283347ddcc88
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13353
CVE: CVE-2025-68800
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 8ac1dacec458f55f871f7153242ed6ab60373b90 ]
Cited commit added a dedicated mutex (instead of RTNL) to protect the
multicast route list, so that it will not change while the driver
periodically traverses it in order to update the kernel about multicast
route stats that were queried from the device.
One instance of list entry deletion (during route replace) was missed
and it can result in a use-after-free [1].
Fix by acquiring the mutex before deleting the entry from the list and
releasing it afterwards.
[1]
BUG: KASAN: slab-use-after-free in mlxsw_sp_mr_stats_update+0x4a5/0x540 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c:1006 [mlxsw_spectrum]
Read of size 8 at addr ffff8881523c2fa8 by task kworker/2:5/22043
CPU: 2 UID: 0 PID: 22043 Comm: kworker/2:5 Not tainted 6.18.0-rc1-custom-g1a3d6d7cd014 #1 PREEMPT(full)
Hardware name: Mellanox Technologies Ltd. MSN2010/SA002610, BIOS 5.6.5 08/24/2017
Workqueue: mlxsw_core mlxsw_sp_mr_stats_update [mlxsw_spectrum]
Call Trace:
<TASK>
dump_stack_lvl+0xba/0x110
print_report+0x174/0x4f5
kasan_report+0xdf/0x110
mlxsw_sp_mr_stats_update+0x4a5/0x540 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c:1006 [mlxsw_spectrum]
process_one_work+0x9cc/0x18e0
worker_thread+0x5df/0xe40
kthread+0x3b8/0x730
ret_from_fork+0x3e9/0x560
ret_from_fork_asm+0x1a/0x30
</TASK>
Allocated by task 29933:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_kmalloc+0x8f/0xa0
mlxsw_sp_mr_route_add+0xd8/0x4770 [mlxsw_spectrum]
mlxsw_sp_router_fibmr_event_work+0x371/0xad0 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c:7965 [mlxsw_spectrum]
process_one_work+0x9cc/0x18e0
worker_thread+0x5df/0xe40
kthread+0x3b8/0x730
ret_from_fork+0x3e9/0x560
ret_from_fork_asm+0x1a/0x30
Freed by task 29933:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_save_free_info+0x3b/0x70
__kasan_slab_free+0x43/0x70
kfree+0x14e/0x700
mlxsw_sp_mr_route_add+0x2dea/0x4770 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c:444 [mlxsw_spectrum]
mlxsw_sp_router_fibmr_event_work+0x371/0xad0 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c:7965 [mlxsw_spectrum]
process_one_work+0x9cc/0x18e0
worker_thread+0x5df/0xe40
kthread+0x3b8/0x730
ret_from_fork+0x3e9/0x560
ret_from_fork_asm+0x1a/0x30
Fixes: f38656d06725 ("mlxsw: spectrum_mr: Protect multicast route list with a lock")
Signed-off-by: Ido Schimmel <idosch(a)nvidia.com>
Reviewed-by: Petr Machata <petrm(a)nvidia.com>
Signed-off-by: Petr Machata <petrm(a)nvidia.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Link: https://patch.msgid.link/f996feecfd59fde297964bfc85040b6d83ec6089.176469565…
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Cai Xinchen <caixinchen1(a)huawei.com>
---
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index ee308d9aedcd..d8a4bbb8e899 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -440,7 +440,9 @@ int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
rhashtable_remove_fast(&mr_table->route_ht,
&mr_orig_route->ht_node,
mlxsw_sp_mr_route_ht_params);
+ mutex_lock(&mr_table->route_list_lock);
list_del(&mr_orig_route->node);
+ mutex_unlock(&mr_table->route_list_lock);
mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
}
--
2.34.1
2
1
hulk inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/7679
--------------------------------
For a hugetlb folio, the memcg may be null, which will lead to warning, To
avoid warning, add !folio_test_hugetlb(old). The mem_cgroup_replace_folio
is only used by fuse or shmem, the !folio_test_hugetlb(old) is unnecessary,
remove it and keep the same with mainline.
Fixes: d151025ac030 ("hugetlb: memcg: account hugetlb-backed memory in memory controller")
Signed-off-by: Chen Ridong <chenridong(a)huawei.com>
---
mm/memcontrol.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 45352cee74b18..51b438146ceef 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -8803,12 +8803,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
return;
memcg = folio_memcg(old);
- /*
- * Note that it is normal to see !memcg for a hugetlb folio.
- * For e.g, itt could have been allocated when memory_hugetlb_accounting
- * was not selected.
- */
- VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
+ VM_WARN_ON_ONCE_FOLIO(!memcg, old);
if (!memcg)
return;
@@ -8852,7 +8847,12 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new)
return;
memcg = folio_memcg(old);
- VM_WARN_ON_ONCE_FOLIO(!memcg, old);
+ /*
+ * Note that it is normal to see !memcg for a hugetlb folio.
+ * For e.g, itt could have been allocated when memory_hugetlb_accounting
+ * was not selected.
+ */
+ VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old);
if (!memcg)
return;
--
2.34.1
2
1
Fix CVE-2025-37947
Namjae Jeon (1):
ksmbd: fix stream write failure
Norbert Szetei (1):
ksmbd: prevent out-of-bounds stream writes by validating *pos
fs/ksmbd/vfs.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
--
2.39.2
2
3
[PATCH OLK-6.6] s390/pci: Avoid deadlock between PCI error recovery and mlx5 crdump
by Zhang Yuwei 27 Jan '26
by Zhang Yuwei 27 Jan '26
27 Jan '26
From: Gerd Bayer <gbayer(a)linux.ibm.com>
stable inclusion
from stable-v6.6.117
commit b63c061be622b17b495cbf78a6d5f2d4c3147f8e
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11503
CVE: CVE-2025-68310
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 0fd20f65df6aa430454a0deed8f43efa91c54835 ]
Do not block PCI config accesses through pci_cfg_access_lock() when
executing the s390 variant of PCI error recovery: Acquire just
device_lock() instead of pci_dev_lock() as powerpc's EEH and
generig PCI AER processing do.
During error recovery testing a pair of tasks was reported to be hung:
mlx5_core 0000:00:00.1: mlx5_health_try_recover:338:(pid 5553): health recovery flow aborted, PCI reads still not working
INFO: task kmcheck:72 blocked for more than 122 seconds.
Not tainted 5.14.0-570.12.1.bringup7.el9.s390x #1
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:kmcheck state:D stack:0 pid:72 tgid:72 ppid:2 flags:0x00000000
Call Trace:
[<000000065256f030>] __schedule+0x2a0/0x590
[<000000065256f356>] schedule+0x36/0xe0
[<000000065256f572>] schedule_preempt_disabled+0x22/0x30
[<0000000652570a94>] __mutex_lock.constprop.0+0x484/0x8a8
[<000003ff800673a4>] mlx5_unload_one+0x34/0x58 [mlx5_core]
[<000003ff8006745c>] mlx5_pci_err_detected+0x94/0x140 [mlx5_core]
[<0000000652556c5a>] zpci_event_attempt_error_recovery+0xf2/0x398
[<0000000651b9184a>] __zpci_event_error+0x23a/0x2c0
INFO: task kworker/u1664:6:1514 blocked for more than 122 seconds.
Not tainted 5.14.0-570.12.1.bringup7.el9.s390x #1
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:kworker/u1664:6 state:D stack:0 pid:1514 tgid:1514 ppid:2 flags:0x00000000
Workqueue: mlx5_health0000:00:00.0 mlx5_fw_fatal_reporter_err_work [mlx5_core]
Call Trace:
[<000000065256f030>] __schedule+0x2a0/0x590
[<000000065256f356>] schedule+0x36/0xe0
[<0000000652172e28>] pci_wait_cfg+0x80/0xe8
[<0000000652172f94>] pci_cfg_access_lock+0x74/0x88
[<000003ff800916b6>] mlx5_vsc_gw_lock+0x36/0x178 [mlx5_core]
[<000003ff80098824>] mlx5_crdump_collect+0x34/0x1c8 [mlx5_core]
[<000003ff80074b62>] mlx5_fw_fatal_reporter_dump+0x6a/0xe8 [mlx5_core]
[<0000000652512242>] devlink_health_do_dump.part.0+0x82/0x168
[<0000000652513212>] devlink_health_report+0x19a/0x230
[<000003ff80075a12>] mlx5_fw_fatal_reporter_err_work+0xba/0x1b0 [mlx5_core]
No kernel log of the exact same error with an upstream kernel is
available - but the very same deadlock situation can be constructed there,
too:
- task: kmcheck
mlx5_unload_one() tries to acquire devlink lock while the PCI error
recovery code has set pdev->block_cfg_access by way of
pci_cfg_access_lock()
- task: kworker
mlx5_crdump_collect() tries to set block_cfg_access through
pci_cfg_access_lock() while devlink_health_report() had acquired
the devlink lock.
A similar deadlock situation can be reproduced by requesting a
crdump with
> devlink health dump show pci/<BDF> reporter fw_fatal
while PCI error recovery is executed on the same <BDF> physical function
by mlx5_core's pci_error_handlers. On s390 this can be injected with
> zpcictl --reset-fw <BDF>
Tests with this patch failed to reproduce that second deadlock situation,
the devlink command is rejected with "kernel answers: Permission denied" -
and we get a kernel log message of:
mlx5_core 1ed0:00:00.1: mlx5_crdump_collect:50:(pid 254382): crdump: failed to lock vsc gw err -5
because the config read of VSC_SEMAPHORE is rejected by the underlying
hardware.
Two prior attempts to address this issue have been discussed and
ultimately rejected [see link], with the primary argument that s390's
implementation of PCI error recovery is imposing restrictions that
neither powerpc's EEH nor PCI AER handling need. Tests show that PCI
error recovery on s390 is running to completion even without blocking
access to PCI config space.
Link: https://lore.kernel.org/all/20251007144826.2825134-1-gbayer@linux.ibm.com/
Cc: stable(a)vger.kernel.org
Fixes: 4cdf2f4e24ff ("s390/pci: implement minimal PCI error recovery")
Reviewed-by: Niklas Schnelle <schnelle(a)linux.ibm.com>
Signed-off-by: Gerd Bayer <gbayer(a)linux.ibm.com>
Signed-off-by: Heiko Carstens <hca(a)linux.ibm.com>
[ Adjust context ]
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Hai <wanghai38(a)huawei.com>
---
arch/s390/pci/pci_event.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 3291241806bb..4cb77079ef2d 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -176,7 +176,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
* is unbound or probed and that userspace can't access its
* configuration space while we perform recovery.
*/
- pci_dev_lock(pdev);
+ device_lock(&pdev->dev);
if (pdev->error_state == pci_channel_io_perm_failure) {
ers_res = PCI_ERS_RESULT_DISCONNECT;
goto out_unlock;
@@ -224,7 +224,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
if (driver->err_handler->resume)
driver->err_handler->resume(pdev);
out_unlock:
- pci_dev_unlock(pdev);
+ device_unlock(&pdev->dev);
return ers_res;
}
--
2.22.0
2
1
[PATCH OLK-5.10] s390/pci: Avoid deadlock between PCI error recovery and mlx5 crdump
by Zhang Yuwei 27 Jan '26
by Zhang Yuwei 27 Jan '26
27 Jan '26
From: Gerd Bayer <gbayer(a)linux.ibm.com>
stable inclusion
from stable-v6.6.117
commit b63c061be622b17b495cbf78a6d5f2d4c3147f8e
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8287
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 0fd20f65df6aa430454a0deed8f43efa91c54835 ]
Do not block PCI config accesses through pci_cfg_access_lock() when
executing the s390 variant of PCI error recovery: Acquire just
device_lock() instead of pci_dev_lock() as powerpc's EEH and
generig PCI AER processing do.
During error recovery testing a pair of tasks was reported to be hung:
mlx5_core 0000:00:00.1: mlx5_health_try_recover:338:(pid 5553): health recovery flow aborted, PCI reads still not working
INFO: task kmcheck:72 blocked for more than 122 seconds.
Not tainted 5.14.0-570.12.1.bringup7.el9.s390x #1
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:kmcheck state:D stack:0 pid:72 tgid:72 ppid:2 flags:0x00000000
Call Trace:
[<000000065256f030>] __schedule+0x2a0/0x590
[<000000065256f356>] schedule+0x36/0xe0
[<000000065256f572>] schedule_preempt_disabled+0x22/0x30
[<0000000652570a94>] __mutex_lock.constprop.0+0x484/0x8a8
[<000003ff800673a4>] mlx5_unload_one+0x34/0x58 [mlx5_core]
[<000003ff8006745c>] mlx5_pci_err_detected+0x94/0x140 [mlx5_core]
[<0000000652556c5a>] zpci_event_attempt_error_recovery+0xf2/0x398
[<0000000651b9184a>] __zpci_event_error+0x23a/0x2c0
INFO: task kworker/u1664:6:1514 blocked for more than 122 seconds.
Not tainted 5.14.0-570.12.1.bringup7.el9.s390x #1
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:kworker/u1664:6 state:D stack:0 pid:1514 tgid:1514 ppid:2 flags:0x00000000
Workqueue: mlx5_health0000:00:00.0 mlx5_fw_fatal_reporter_err_work [mlx5_core]
Call Trace:
[<000000065256f030>] __schedule+0x2a0/0x590
[<000000065256f356>] schedule+0x36/0xe0
[<0000000652172e28>] pci_wait_cfg+0x80/0xe8
[<0000000652172f94>] pci_cfg_access_lock+0x74/0x88
[<000003ff800916b6>] mlx5_vsc_gw_lock+0x36/0x178 [mlx5_core]
[<000003ff80098824>] mlx5_crdump_collect+0x34/0x1c8 [mlx5_core]
[<000003ff80074b62>] mlx5_fw_fatal_reporter_dump+0x6a/0xe8 [mlx5_core]
[<0000000652512242>] devlink_health_do_dump.part.0+0x82/0x168
[<0000000652513212>] devlink_health_report+0x19a/0x230
[<000003ff80075a12>] mlx5_fw_fatal_reporter_err_work+0xba/0x1b0 [mlx5_core]
No kernel log of the exact same error with an upstream kernel is
available - but the very same deadlock situation can be constructed there,
too:
- task: kmcheck
mlx5_unload_one() tries to acquire devlink lock while the PCI error
recovery code has set pdev->block_cfg_access by way of
pci_cfg_access_lock()
- task: kworker
mlx5_crdump_collect() tries to set block_cfg_access through
pci_cfg_access_lock() while devlink_health_report() had acquired
the devlink lock.
A similar deadlock situation can be reproduced by requesting a
crdump with
> devlink health dump show pci/<BDF> reporter fw_fatal
while PCI error recovery is executed on the same <BDF> physical function
by mlx5_core's pci_error_handlers. On s390 this can be injected with
> zpcictl --reset-fw <BDF>
Tests with this patch failed to reproduce that second deadlock situation,
the devlink command is rejected with "kernel answers: Permission denied" -
and we get a kernel log message of:
mlx5_core 1ed0:00:00.1: mlx5_crdump_collect:50:(pid 254382): crdump: failed to lock vsc gw err -5
because the config read of VSC_SEMAPHORE is rejected by the underlying
hardware.
Two prior attempts to address this issue have been discussed and
ultimately rejected [see link], with the primary argument that s390's
implementation of PCI error recovery is imposing restrictions that
neither powerpc's EEH nor PCI AER handling need. Tests show that PCI
error recovery on s390 is running to completion even without blocking
access to PCI config space.
Link: https://lore.kernel.org/all/20251007144826.2825134-1-gbayer@linux.ibm.com/
Cc: stable(a)vger.kernel.org
Fixes: 4cdf2f4e24ff ("s390/pci: implement minimal PCI error recovery")
Reviewed-by: Niklas Schnelle <schnelle(a)linux.ibm.com>
Signed-off-by: Gerd Bayer <gbayer(a)linux.ibm.com>
Signed-off-by: Heiko Carstens <hca(a)linux.ibm.com>
[ Adjust context ]
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Hai <wanghai38(a)huawei.com>
---
arch/s390/pci/pci_event.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 3291241806bb..4cb77079ef2d 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -176,7 +176,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
* is unbound or probed and that userspace can't access its
* configuration space while we perform recovery.
*/
- pci_dev_lock(pdev);
+ device_lock(&pdev->dev);
if (pdev->error_state == pci_channel_io_perm_failure) {
ers_res = PCI_ERS_RESULT_DISCONNECT;
goto out_unlock;
@@ -224,7 +224,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
if (driver->err_handler->resume)
driver->err_handler->resume(pdev);
out_unlock:
- pci_dev_unlock(pdev);
+ device_unlock(&pdev->dev);
return ers_res;
}
--
2.22.0
2
1
[PATCH OLK-6.6] KVM: SVM: Reject SEV{-ES} intra host migration if vCPU creation is in-flight
by Lin Ruifeng 27 Jan '26
by Lin Ruifeng 27 Jan '26
27 Jan '26
From: Sean Christopherson <seanjc(a)google.com>
stable inclusion
from stable-v6.6.99
commit 8c8e8d4d7544bb783e15078eda8ba2580e192246
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9350
CVE: CVE-2025-38455
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit ecf371f8b02d5e31b9aa1da7f159f1b2107bdb01 upstream.
Reject migration of SEV{-ES} state if either the source or destination VM
is actively creating a vCPU, i.e. if kvm_vm_ioctl_create_vcpu() is in the
section between incrementing created_vcpus and online_vcpus. The bulk of
vCPU creation runs _outside_ of kvm->lock to allow creating multiple vCPUs
in parallel, and so sev_info.es_active can get toggled from false=>true in
the destination VM after (or during) svm_vcpu_create(), resulting in an
SEV{-ES} VM effectively having a non-SEV{-ES} vCPU.
The issue manifests most visibly as a crash when trying to free a vCPU's
NULL VMSA page in an SEV-ES VM, but any number of things can go wrong.
BUG: unable to handle page fault for address: ffffebde00000000
#PF: supervisor read access in kernel mode
#PF: error_code(0x0000) - not-present page
PGD 0 P4D 0
Oops: Oops: 0000 [#1] SMP KASAN NOPTI
CPU: 227 UID: 0 PID: 64063 Comm: syz.5.60023 Tainted: G U O 6.15.0-smp-DEV #2 NONE
Tainted: [U]=USER, [O]=OOT_MODULE
Hardware name: Google, Inc. Arcadia_IT_80/Arcadia_IT_80, BIOS 12.52.0-0 10/28/2024
RIP: 0010:constant_test_bit arch/x86/include/asm/bitops.h:206 [inline]
RIP: 0010:arch_test_bit arch/x86/include/asm/bitops.h:238 [inline]
RIP: 0010:_test_bit include/asm-generic/bitops/instrumented-non-atomic.h:142 [inline]
RIP: 0010:PageHead include/linux/page-flags.h:866 [inline]
RIP: 0010:___free_pages+0x3e/0x120 mm/page_alloc.c:5067
Code: <49> f7 06 40 00 00 00 75 05 45 31 ff eb 0c 66 90 4c 89 f0 4c 39 f0
RSP: 0018:ffff8984551978d0 EFLAGS: 00010246
RAX: 0000777f80000001 RBX: 0000000000000000 RCX: ffffffff918aeb98
RDX: 0000000000000000 RSI: 0000000000000008 RDI: ffffebde00000000
RBP: 0000000000000000 R08: ffffebde00000007 R09: 1ffffd7bc0000000
R10: dffffc0000000000 R11: fffff97bc0000001 R12: dffffc0000000000
R13: ffff8983e19751a8 R14: ffffebde00000000 R15: 1ffffd7bc0000000
FS: 0000000000000000(0000) GS:ffff89ee661d3000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: ffffebde00000000 CR3: 000000793ceaa000 CR4: 0000000000350ef0
DR0: 0000000000000000 DR1: 0000000000000b5f DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Call Trace:
<TASK>
sev_free_vcpu+0x413/0x630 arch/x86/kvm/svm/sev.c:3169
svm_vcpu_free+0x13a/0x2a0 arch/x86/kvm/svm/svm.c:1515
kvm_arch_vcpu_destroy+0x6a/0x1d0 arch/x86/kvm/x86.c:12396
kvm_vcpu_destroy virt/kvm/kvm_main.c:470 [inline]
kvm_destroy_vcpus+0xd1/0x300 virt/kvm/kvm_main.c:490
kvm_arch_destroy_vm+0x636/0x820 arch/x86/kvm/x86.c:12895
kvm_put_kvm+0xb8e/0xfb0 virt/kvm/kvm_main.c:1310
kvm_vm_release+0x48/0x60 virt/kvm/kvm_main.c:1369
__fput+0x3e4/0x9e0 fs/file_table.c:465
task_work_run+0x1a9/0x220 kernel/task_work.c:227
exit_task_work include/linux/task_work.h:40 [inline]
do_exit+0x7f0/0x25b0 kernel/exit.c:953
do_group_exit+0x203/0x2d0 kernel/exit.c:1102
get_signal+0x1357/0x1480 kernel/signal.c:3034
arch_do_signal_or_restart+0x40/0x690 arch/x86/kernel/signal.c:337
exit_to_user_mode_loop kernel/entry/common.c:111 [inline]
exit_to_user_mode_prepare include/linux/entry-common.h:329 [inline]
__syscall_exit_to_user_mode_work kernel/entry/common.c:207 [inline]
syscall_exit_to_user_mode+0x67/0xb0 kernel/entry/common.c:218
do_syscall_64+0x7c/0x150 arch/x86/entry/syscall_64.c:100
entry_SYSCALL_64_after_hwframe+0x76/0x7e
RIP: 0033:0x7f87a898e969
</TASK>
Modules linked in: gq(O)
gsmi: Log Shutdown Reason 0x03
CR2: ffffebde00000000
---[ end trace 0000000000000000 ]---
Deliberately don't check for a NULL VMSA when freeing the vCPU, as crashing
the host is likely desirable due to the VMSA being consumed by hardware.
E.g. if KVM manages to allow VMRUN on the vCPU, hardware may read/write a
bogus VMSA page. Accessing PFN 0 is "fine"-ish now that it's sequestered
away thanks to L1TF, but panicking in this scenario is preferable to
potentially running with corrupted state.
Reported-by: Alexander Potapenko <glider(a)google.com>
Tested-by: Alexander Potapenko <glider(a)google.com>
Fixes: 0b020f5af092 ("KVM: SEV: Add support for SEV-ES intra host migration")
Fixes: b56639318bb2 ("KVM: SEV: Add support for SEV intra host migration")
Cc: stable(a)vger.kernel.org
Cc: James Houghton <jthoughton(a)google.com>
Cc: Peter Gonda <pgonda(a)google.com>
Reviewed-by: Liam Merwick <liam.merwick(a)oracle.com>
Tested-by: Liam Merwick <liam.merwick(a)oracle.com>
Reviewed-by: James Houghton <jthoughton(a)google.com>
Link: https://lore.kernel.org/r/20250602224459.41505-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc(a)google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com>
---
arch/x86/kvm/svm/sev.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 63611572ae10..9da6ce316dc4 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1895,6 +1895,10 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
struct kvm_vcpu *src_vcpu;
unsigned long i;
+ if (src->created_vcpus != atomic_read(&src->online_vcpus) ||
+ dst->created_vcpus != atomic_read(&dst->online_vcpus))
+ return -EBUSY;
+
if (!sev_es_guest(src))
return 0;
--
2.43.0
2
1
[PATCH OLK-6.6] atm: atmtcp: Prevent arbitrary write in atmtcp_recv_control().
by Lin Ruifeng 27 Jan '26
by Lin Ruifeng 27 Jan '26
27 Jan '26
From: Kuniyuki Iwashima <kuniyu(a)google.com>
stable inclusion
from stable-v6.6.104
commit 3c80c230d6e3e6f63d43f4c3f0bb344e3e8b119b
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12387
CVE: CVE-2025-39828
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit ec79003c5f9d2c7f9576fc69b8dbda80305cbe3a ]
syzbot reported the splat below. [0]
When atmtcp_v_open() or atmtcp_v_close() is called via connect()
or close(), atmtcp_send_control() is called to send an in-kernel
special message.
The message has ATMTCP_HDR_MAGIC in atmtcp_control.hdr.length.
Also, a pointer of struct atm_vcc is set to atmtcp_control.vcc.
The notable thing is struct atmtcp_control is uAPI but has a
space for an in-kernel pointer.
struct atmtcp_control {
struct atmtcp_hdr hdr; /* must be first */
...
atm_kptr_t vcc; /* both directions */
...
} __ATM_API_ALIGN;
typedef struct { unsigned char _[8]; } __ATM_API_ALIGN atm_kptr_t;
The special message is processed in atmtcp_recv_control() called
from atmtcp_c_send().
atmtcp_c_send() is vcc->dev->ops->send() and called from 2 paths:
1. .ndo_start_xmit() (vcc->send() == atm_send_aal0())
2. vcc_sendmsg()
The problem is sendmsg() does not validate the message length and
userspace can abuse atmtcp_recv_control() to overwrite any kptr
by atmtcp_control.
Let's add a new ->pre_send() hook to validate messages from sendmsg().
[0]:
Oops: general protection fault, probably for non-canonical address 0xdffffc00200000ab: 0000 [#1] SMP KASAN PTI
KASAN: probably user-memory-access in range [0x0000000100000558-0x000000010000055f]
CPU: 0 UID: 0 PID: 5865 Comm: syz-executor331 Not tainted 6.17.0-rc1-syzkaller-00215-gbab3ce404553 #0 PREEMPT(full)
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2025
RIP: 0010:atmtcp_recv_control drivers/atm/atmtcp.c:93 [inline]
RIP: 0010:atmtcp_c_send+0x1da/0x950 drivers/atm/atmtcp.c:297
Code: 4d 8d 75 1a 4c 89 f0 48 c1 e8 03 42 0f b6 04 20 84 c0 0f 85 15 06 00 00 41 0f b7 1e 4d 8d b7 60 05 00 00 4c 89 f0 48 c1 e8 03 <42> 0f b6 04 20 84 c0 0f 85 13 06 00 00 66 41 89 1e 4d 8d 75 1c 4c
RSP: 0018:ffffc90003f5f810 EFLAGS: 00010203
RAX: 00000000200000ab RBX: 0000000000000000 RCX: 0000000000000000
RDX: ffff88802a510000 RSI: 00000000ffffffff RDI: ffff888030a6068c
RBP: ffff88802699fb40 R08: ffff888030a606eb R09: 1ffff1100614c0dd
R10: dffffc0000000000 R11: ffffffff8718fc40 R12: dffffc0000000000
R13: ffff888030a60680 R14: 000000010000055f R15: 00000000ffffffff
FS: 00007f8d7e9236c0(0000) GS:ffff888125c1c000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 000000000045ad50 CR3: 0000000075bde000 CR4: 00000000003526f0
Call Trace:
<TASK>
vcc_sendmsg+0xa10/0xc60 net/atm/common.c:645
sock_sendmsg_nosec net/socket.c:714 [inline]
__sock_sendmsg+0x219/0x270 net/socket.c:729
____sys_sendmsg+0x505/0x830 net/socket.c:2614
___sys_sendmsg+0x21f/0x2a0 net/socket.c:2668
__sys_sendmsg net/socket.c:2700 [inline]
__do_sys_sendmsg net/socket.c:2705 [inline]
__se_sys_sendmsg net/socket.c:2703 [inline]
__x64_sys_sendmsg+0x19b/0x260 net/socket.c:2703
do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
entry_SYSCALL_64_after_hwframe+0x77/0x7f
RIP: 0033:0x7f8d7e96a4a9
Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 51 18 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f8d7e923198 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00007f8d7e9f4308 RCX: 00007f8d7e96a4a9
RDX: 0000000000000000 RSI: 0000200000000240 RDI: 0000000000000005
RBP: 00007f8d7e9f4300 R08: 65732f636f72702f R09: 65732f636f72702f
R10: 65732f636f72702f R11: 0000000000000246 R12: 00007f8d7e9c10ac
R13: 00007f8d7e9231a0 R14: 0000200000000200 R15: 0000200000000250
</TASK>
Modules linked in:
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Reported-by: syzbot+1741b56d54536f4ec349(a)syzkaller.appspotmail.com
Closes: https://lore.kernel.org/netdev/68a6767c.050a0220.3d78fd.0011.GAE@google.com/
Tested-by: syzbot+1741b56d54536f4ec349(a)syzkaller.appspotmail.com
Signed-off-by: Kuniyuki Iwashima <kuniyu(a)google.com>
Link: https://patch.msgid.link/20250821021901.2814721-1-kuniyu@google.com
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com>
---
drivers/atm/atmtcp.c | 17 ++++++++++++++---
include/linux/atmdev.h | 1 +
net/atm/common.c | 15 ++++++++++++---
3 files changed, 27 insertions(+), 6 deletions(-)
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index ff558908897f..9c83fb29b2f1 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
return NULL;
}
+static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct atmtcp_hdr *hdr;
+
+ if (skb->len < sizeof(struct atmtcp_hdr))
+ return -EINVAL;
+
+ hdr = (struct atmtcp_hdr *)skb->data;
+ if (hdr->length == ATMTCP_HDR_MAGIC)
+ return -EINVAL;
+
+ return 0;
+}
static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
{
@@ -288,9 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
- if (skb->len < sizeof(struct atmtcp_hdr))
- goto done;
-
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
@@ -347,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = {
static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
+ .pre_send = atmtcp_c_pre_send,
.send = atmtcp_c_send
};
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 45f2f278b50a..70807c679f1a 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -185,6 +185,7 @@ struct atmdev_ops { /* only send is required */
int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
void __user *arg);
#endif
+ int (*pre_send)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
diff --git a/net/atm/common.c b/net/atm/common.c
index 9cc82acbc735..48bb3f66a3f2 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -635,18 +635,27 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
skb->dev = NULL; /* for paths shared with net_device interfaces */
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
- atm_return_tx(vcc, skb);
- kfree_skb(skb);
error = -EFAULT;
- goto out;
+ goto free_skb;
}
if (eff != size)
memset(skb->data + size, 0, eff-size);
+
+ if (vcc->dev->ops->pre_send) {
+ error = vcc->dev->ops->pre_send(vcc, skb);
+ if (error)
+ goto free_skb;
+ }
+
error = vcc->dev->ops->send(vcc, skb);
error = error ? error : size;
out:
release_sock(sk);
return error;
+free_skb:
+ atm_return_tx(vcc, skb);
+ kfree_skb(skb);
+ goto out;
}
__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
--
2.43.0
2
1
27 Jan '26
From: Ariel D'Alessandro <ariel.dalessandro(a)collabora.com>
stable inclusion
from stable-v6.6.117
commit df1ad5de2197ea1b527d13ae7b699e9ee7d724d4
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11625
CVE: CVE-2025-68184
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 9882a40640036d5bbc590426a78981526d4f2345 ]
Commit c410fa9b07c3 ("drm/mediatek: Add AFBC support to Mediatek DRM
driver") added AFBC support to Mediatek DRM and enabled the
32x8/split/sparse modifier.
However, this is currently broken on Mediatek MT8188 (Genio 700 EVK
platform); tested using upstream Kernel and Mesa (v25.2.1), AFBC is used by
default since Mesa v25.0.
Kernel trace reports vblank timeouts constantly, and the render is garbled:
```
[CRTC:62:crtc-0] vblank wait timed out
WARNING: CPU: 7 PID: 70 at drivers/gpu/drm/drm_atomic_helper.c:1835 drm_atomic_helper_wait_for_vblanks.part.0+0x24c/0x27c
[...]
Hardware name: MediaTek Genio-700 EVK (DT)
Workqueue: events_unbound commit_work
pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : drm_atomic_helper_wait_for_vblanks.part.0+0x24c/0x27c
lr : drm_atomic_helper_wait_for_vblanks.part.0+0x24c/0x27c
sp : ffff80008337bca0
x29: ffff80008337bcd0 x28: 0000000000000061 x27: 0000000000000000
x26: 0000000000000001 x25: 0000000000000000 x24: ffff0000c9dcc000
x23: 0000000000000001 x22: 0000000000000000 x21: ffff0000c66f2f80
x20: ffff0000c0d7d880 x19: 0000000000000000 x18: 000000000000000a
x17: 000000040044ffff x16: 005000f2b5503510 x15: 0000000000000000
x14: 0000000000000000 x13: 74756f2064656d69 x12: 742074696177206b
x11: 0000000000000058 x10: 0000000000000018 x9 : ffff800082396a70
x8 : 0000000000057fa8 x7 : 0000000000000cce x6 : ffff8000823eea70
x5 : ffff0001fef5f408 x4 : ffff80017ccee000 x3 : ffff0000c12cb480
x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff0000c12cb480
Call trace:
drm_atomic_helper_wait_for_vblanks.part.0+0x24c/0x27c (P)
drm_atomic_helper_commit_tail_rpm+0x64/0x80
commit_tail+0xa4/0x1a4
commit_work+0x14/0x20
process_one_work+0x150/0x290
worker_thread+0x2d0/0x3ec
kthread+0x12c/0x210
ret_from_fork+0x10/0x20
---[ end trace 0000000000000000 ]---
```
Until this gets fixed upstream, disable AFBC support on this platform, as
it's currently broken with upstream Mesa.
Fixes: c410fa9b07c3 ("drm/mediatek: Add AFBC support to Mediatek DRM driver")
Cc: stable(a)vger.kernel.org
Signed-off-by: Ariel D'Alessandro <ariel.dalessandro(a)collabora.com>
Reviewed-by: Daniel Stone <daniels(a)collabora.com>
Reviewed-by: CK Hu <ck.hu(a)mediatek.com>
Reviewed-by: Macpaul Lin <macpaul.lin(a)mediatek.com>
Link: https://patchwork.kernel.org/project/dri-devel/patch/20251024202756.811425-…
Signed-off-by: Chun-Kuang Hu <chunkuang.hu(a)kernel.org>
[ Applied to mtk_drm_plane.c instead of mtk_plane.c ]
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Hai <wanghai38(a)huawei.com>
---
drivers/gpu/drm/mediatek/mtk_drm_plane.c | 24 +-----------------------
1 file changed, 1 insertion(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index f10d4cc6c223..32038cff2730 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -21,9 +21,6 @@
static const u64 modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
- AFBC_FORMAT_MOD_SPLIT |
- AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_INVALID,
};
@@ -71,26 +68,7 @@ static bool mtk_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
- if (modifier == DRM_FORMAT_MOD_LINEAR)
- return true;
-
- if (modifier != DRM_FORMAT_MOD_ARM_AFBC(
- AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
- AFBC_FORMAT_MOD_SPLIT |
- AFBC_FORMAT_MOD_SPARSE))
- return false;
-
- if (format != DRM_FORMAT_XRGB8888 &&
- format != DRM_FORMAT_ARGB8888 &&
- format != DRM_FORMAT_BGRX8888 &&
- format != DRM_FORMAT_BGRA8888 &&
- format != DRM_FORMAT_ABGR8888 &&
- format != DRM_FORMAT_XBGR8888 &&
- format != DRM_FORMAT_RGB888 &&
- format != DRM_FORMAT_BGR888)
- return false;
-
- return true;
+ return modifier == DRM_FORMAT_MOD_LINEAR;
}
static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
--
2.22.0
2
1
27 Jan '26
From: Ariel D'Alessandro <ariel.dalessandro(a)collabora.com>
stable inclusion
from stable-v6.6.117
commit df1ad5de2197ea1b527d13ae7b699e9ee7d724d4
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8287
CVE: CVE-2025-68184
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 9882a40640036d5bbc590426a78981526d4f2345 ]
Commit c410fa9b07c3 ("drm/mediatek: Add AFBC support to Mediatek DRM
driver") added AFBC support to Mediatek DRM and enabled the
32x8/split/sparse modifier.
However, this is currently broken on Mediatek MT8188 (Genio 700 EVK
platform); tested using upstream Kernel and Mesa (v25.2.1), AFBC is used by
default since Mesa v25.0.
Kernel trace reports vblank timeouts constantly, and the render is garbled:
```
[CRTC:62:crtc-0] vblank wait timed out
WARNING: CPU: 7 PID: 70 at drivers/gpu/drm/drm_atomic_helper.c:1835 drm_atomic_helper_wait_for_vblanks.part.0+0x24c/0x27c
[...]
Hardware name: MediaTek Genio-700 EVK (DT)
Workqueue: events_unbound commit_work
pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : drm_atomic_helper_wait_for_vblanks.part.0+0x24c/0x27c
lr : drm_atomic_helper_wait_for_vblanks.part.0+0x24c/0x27c
sp : ffff80008337bca0
x29: ffff80008337bcd0 x28: 0000000000000061 x27: 0000000000000000
x26: 0000000000000001 x25: 0000000000000000 x24: ffff0000c9dcc000
x23: 0000000000000001 x22: 0000000000000000 x21: ffff0000c66f2f80
x20: ffff0000c0d7d880 x19: 0000000000000000 x18: 000000000000000a
x17: 000000040044ffff x16: 005000f2b5503510 x15: 0000000000000000
x14: 0000000000000000 x13: 74756f2064656d69 x12: 742074696177206b
x11: 0000000000000058 x10: 0000000000000018 x9 : ffff800082396a70
x8 : 0000000000057fa8 x7 : 0000000000000cce x6 : ffff8000823eea70
x5 : ffff0001fef5f408 x4 : ffff80017ccee000 x3 : ffff0000c12cb480
x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff0000c12cb480
Call trace:
drm_atomic_helper_wait_for_vblanks.part.0+0x24c/0x27c (P)
drm_atomic_helper_commit_tail_rpm+0x64/0x80
commit_tail+0xa4/0x1a4
commit_work+0x14/0x20
process_one_work+0x150/0x290
worker_thread+0x2d0/0x3ec
kthread+0x12c/0x210
ret_from_fork+0x10/0x20
---[ end trace 0000000000000000 ]---
```
Until this gets fixed upstream, disable AFBC support on this platform, as
it's currently broken with upstream Mesa.
Fixes: c410fa9b07c3 ("drm/mediatek: Add AFBC support to Mediatek DRM driver")
Cc: stable(a)vger.kernel.org
Signed-off-by: Ariel D'Alessandro <ariel.dalessandro(a)collabora.com>
Reviewed-by: Daniel Stone <daniels(a)collabora.com>
Reviewed-by: CK Hu <ck.hu(a)mediatek.com>
Reviewed-by: Macpaul Lin <macpaul.lin(a)mediatek.com>
Link: https://patchwork.kernel.org/project/dri-devel/patch/20251024202756.811425-…
Signed-off-by: Chun-Kuang Hu <chunkuang.hu(a)kernel.org>
[ Applied to mtk_drm_plane.c instead of mtk_plane.c ]
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Hai <wanghai38(a)huawei.com>
---
drivers/gpu/drm/mediatek/mtk_drm_plane.c | 24 +-----------------------
1 file changed, 1 insertion(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index f10d4cc6c223..32038cff2730 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -21,9 +21,6 @@
static const u64 modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
- AFBC_FORMAT_MOD_SPLIT |
- AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_INVALID,
};
@@ -71,26 +68,7 @@ static bool mtk_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
- if (modifier == DRM_FORMAT_MOD_LINEAR)
- return true;
-
- if (modifier != DRM_FORMAT_MOD_ARM_AFBC(
- AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
- AFBC_FORMAT_MOD_SPLIT |
- AFBC_FORMAT_MOD_SPARSE))
- return false;
-
- if (format != DRM_FORMAT_XRGB8888 &&
- format != DRM_FORMAT_ARGB8888 &&
- format != DRM_FORMAT_BGRX8888 &&
- format != DRM_FORMAT_BGRA8888 &&
- format != DRM_FORMAT_ABGR8888 &&
- format != DRM_FORMAT_XBGR8888 &&
- format != DRM_FORMAT_RGB888 &&
- format != DRM_FORMAT_BGR888)
- return false;
-
- return true;
+ return modifier == DRM_FORMAT_MOD_LINEAR;
}
static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
--
2.22.0
2
1
From: Keith Busch <kbusch(a)kernel.org>
mainline inclusion
from mainline-v6.18-rc7
commit 03b3bcd319b3ab5182bc9aaa0421351572c78ac0
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11536
CVE: CVE-2025-68265
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
The namespaces can access the controller's admin request_queue, and
stale references on the namespaces may exist after tearing down the
controller. Ensure the admin request_queue is active by moving the
controller's 'put' to after all controller references have been released
to ensure no one is can access the request_queue. This fixes a reported
use-after-free bug:
BUG: KASAN: slab-use-after-free in blk_queue_enter+0x41c/0x4a0
Read of size 8 at addr ffff88c0a53819f8 by task nvme/3287
CPU: 67 UID: 0 PID: 3287 Comm: nvme Tainted: G E 6.13.2-ga1582f1a031e #15
Tainted: [E]=UNSIGNED_MODULE
Hardware name: Jabil /EGS 2S MB1, BIOS 1.00 06/18/2025
Call Trace:
<TASK>
dump_stack_lvl+0x4f/0x60
print_report+0xc4/0x620
? _raw_spin_lock_irqsave+0x70/0xb0
? _raw_read_unlock_irqrestore+0x30/0x30
? blk_queue_enter+0x41c/0x4a0
kasan_report+0xab/0xe0
? blk_queue_enter+0x41c/0x4a0
blk_queue_enter+0x41c/0x4a0
? __irq_work_queue_local+0x75/0x1d0
? blk_queue_start_drain+0x70/0x70
? irq_work_queue+0x18/0x20
? vprintk_emit.part.0+0x1cc/0x350
? wake_up_klogd_work_func+0x60/0x60
blk_mq_alloc_request+0x2b7/0x6b0
? __blk_mq_alloc_requests+0x1060/0x1060
? __switch_to+0x5b7/0x1060
nvme_submit_user_cmd+0xa9/0x330
nvme_user_cmd.isra.0+0x240/0x3f0
? force_sigsegv+0xe0/0xe0
? nvme_user_cmd64+0x400/0x400
? vfs_fileattr_set+0x9b0/0x9b0
? cgroup_update_frozen_flag+0x24/0x1c0
? cgroup_leave_frozen+0x204/0x330
? nvme_ioctl+0x7c/0x2c0
blkdev_ioctl+0x1a8/0x4d0
? blkdev_common_ioctl+0x1930/0x1930
? fdget+0x54/0x380
__x64_sys_ioctl+0x129/0x190
do_syscall_64+0x5b/0x160
entry_SYSCALL_64_after_hwframe+0x4b/0x53
RIP: 0033:0x7f765f703b0b
Code: ff ff ff 85 c0 79 9b 49 c7 c4 ff ff ff ff 5b 5d 4c 89 e0 41 5c c3 66 0f 1f 84 00 00 00 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d dd 52 0f 00 f7 d8 64 89 01 48
RSP: 002b:00007ffe2cefe808 EFLAGS: 00000202 ORIG_RAX: 0000000000000010
RAX: ffffffffffffffda RBX: 00007ffe2cefe860 RCX: 00007f765f703b0b
RDX: 00007ffe2cefe860 RSI: 00000000c0484e41 RDI: 0000000000000003
RBP: 0000000000000000 R08: 0000000000000003 R09: 0000000000000000
R10: 00007f765f611d50 R11: 0000000000000202 R12: 0000000000000003
R13: 00000000c0484e41 R14: 0000000000000001 R15: 00007ffe2cefea60
</TASK>
Reported-by: Casey Chen <cachen(a)purestorage.com>
Reviewed-by: Christoph Hellwig <hch(a)lst.de>
Reviewed-by: Hannes Reinecke <hare(a)suse.de>
Reviewed-by: Ming Lei <ming.lei(a)redhat.com>
Reviewed-by: Chaitanya Kulkarni <kch(a)nvidia.com>
Signed-off-by: Keith Busch <kbusch(a)kernel.org>
Signed-off-by: Yifan Qiao <qiaoyifan4(a)huawei.com>
---
drivers/nvme/host/core.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 081f837f58d6..e79ff52ae0a0 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4330,7 +4330,6 @@ EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
{
blk_mq_destroy_queue(ctrl->admin_q);
- blk_put_queue(ctrl->admin_q);
if (ctrl->ops->flags & NVME_F_FABRICS) {
blk_mq_destroy_queue(ctrl->fabrics_q);
blk_put_queue(ctrl->fabrics_q);
@@ -4469,6 +4468,8 @@ static void nvme_free_ctrl(struct device *dev)
container_of(dev, struct nvme_ctrl, ctrl_device);
struct nvme_subsystem *subsys = ctrl->subsys;
+ if (ctrl->admin_q)
+ blk_put_queue(ctrl->admin_q);
if (!subsys || ctrl->instance != subsys->instance)
ida_free(&nvme_instance_ida, ctrl->instance);
--
2.39.2
2
1
[PATCH OLK-5.10] um: virtio_uml: Fix use-after-free after put_device in probe
by Zhang Yuwei 27 Jan '26
by Zhang Yuwei 27 Jan '26
27 Jan '26
From: Miaoqian Lin <linmq006(a)gmail.com>
mainline inclusion
from mainline-v6.17-rc7
commit 7ebf70cf181651fe3f2e44e95e7e5073d594c9c0
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8206
CVE: CVE-2025-39951
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
When register_virtio_device() fails in virtio_uml_probe(),
the code sets vu_dev->registered = 1 even though
the device was not successfully registered.
This can lead to use-after-free or other issues.
Fixes: 04e5b1fb0183 ("um: virtio: Remove device on disconnect")
Signed-off-by: Miaoqian Lin <linmq006(a)gmail.com>
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Conflicts:
arch/um/drivers/virtio_uml.c
[commit 1fcf9da38901 is not backport]
Signed-off-by: Zhang Yuwei <zhangyuwei20(a)huawei.com>
---
arch/um/drivers/virtio_uml.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index d5d768188b3b..0178d33e5946 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -1129,10 +1129,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vu_dev);
rc = register_virtio_device(&vu_dev->vdev);
- if (rc)
+ if (rc) {
put_device(&vu_dev->vdev.dev);
+ return rc;
+ }
vu_dev->registered = 1;
- return rc;
+ return 0;
error_init:
os_close_file(vu_dev->sock);
--
2.22.0
2
1
[PATCH OLK-5.10] um: virtio_uml: Fix use-after-free after put_device in probe
by Zhang Yuwei 27 Jan '26
by Zhang Yuwei 27 Jan '26
27 Jan '26
From: Miaoqian Lin <linmq006(a)gmail.com>
mainline inclusion
from mainline-v6.17-rc7
commit 7ebf70cf181651fe3f2e44e95e7e5073d594c9c0
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/ID0U90
CVE: CVE-2025-39951
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
When register_virtio_device() fails in virtio_uml_probe(),
the code sets vu_dev->registered = 1 even though
the device was not successfully registered.
This can lead to use-after-free or other issues.
Fixes: 04e5b1fb0183 ("um: virtio: Remove device on disconnect")
Signed-off-by: Miaoqian Lin <linmq006(a)gmail.com>
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Conflicts:
arch/um/drivers/virtio_uml.c
[commit 1fcf9da38901 is not backport]
Signed-off-by: Zhang Yuwei <zhangyuwei20(a)huawei.com>
---
arch/um/drivers/virtio_uml.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index d5d768188b3b..0178d33e5946 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -1129,10 +1129,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vu_dev);
rc = register_virtio_device(&vu_dev->vdev);
- if (rc)
+ if (rc) {
put_device(&vu_dev->vdev.dev);
+ return rc;
+ }
vu_dev->registered = 1;
- return rc;
+ return 0;
error_init:
os_close_file(vu_dev->sock);
--
2.22.0
2
1
[PATCH OLK-6.6 0/9] arm_mpam: Introduce the Narrow-PARTID feature for MPAM driver
by Zeng Heng 26 Jan '26
by Zeng Heng 26 Jan '26
26 Jan '26
The narrow-partid feature in MPAM allows for a more efficient use of
PARTIDs by enabling a many-to-one mapping of reqpartids (requested PARTIDs)
to intpartids (internal PARTIDs). This mapping reduces the number of unique
PARTIDs needed, thus allowing more tasks or processes to be monitored and
managed with the available resources.
For a mixture of MSCs system, for MSCs that do not support narrow-partid,
we use the PARTIDs exceeding the number of closids as reqPARTIDs for
expanding the monitoring groups.
Therefore, we will expand the information contained in the RMID, so that it
includes not only PMG, but also reqPARTIDs information. The new RMID would
be like:
RMID = (reqPARTID << shift | PMG).
Each control group has m (req)PARTIDs, which are used to expand the
number of monitoring groups under one control group. Therefore, the number
of monitoring groups is no longer limited by the range of MPAM's PMG, which
enhances the extensibility of the system's monitoring capabilities.
Dave Martin (1):
arm_mpam: Set INTERNAL as needed when setting MSC controls
Zeng Heng (8):
arm_mpam: Introduce the definitions of intPARTID and reqPARTID
arm_mpam: Add limitation for the Narrow-PARTID feature
arm_mpam: Expand the composition of RMID
arm_mpam: Automatically synchronize the configuration of all
sub-monitoring groups
Revert "arm_mpam: Add limitation for the Narrow-PARTID feature"
fs/resctrl: Add resctrl_arch_expand_rmid()
fs/resctrl: Deploy resctrl_arch_expand_rmid()
arm64/mpam: Add limitation
arch/x86/include/asm/resctrl.h | 7 +
drivers/platform/mpam/mpam_devices.c | 61 ++++-
drivers/platform/mpam/mpam_internal.h | 5 +
drivers/platform/mpam/mpam_resctrl.c | 355 ++++++++++++++++++++------
fs/resctrl/monitor.c | 45 +++-
include/linux/arm_mpam.h | 16 ++
include/linux/resctrl.h | 18 ++
7 files changed, 415 insertions(+), 92 deletions(-)
--
2.25.1
2
10
Offering: HULK
hulk inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8430
--------------------------------
There is a stale data issue caused by merging 9e891675aab8. After
the patch was merged, data writeback before punch hole cancellation
was canceled. This led to some regions of unwritten extents not
being zeroed, ultimately exposing stale data.
Fixes: 9e891675aab8 ("ext4: don't write back data before punch hole in nojournal mode")
Signed-off-by: Yongjian Sun <sunyongjian1(a)huawei.com>
---
fs/ext4/inode.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 84a09f3e6d49..cc4eeccc0dd7 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4696,13 +4696,12 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
ret = ext4_update_disksize_before_punch(inode, offset, length);
if (ret)
goto out_dio;
-
- ret = ext4_truncate_page_cache_block_range(inode,
- first_block_offset, last_block_offset + 1);
- if (ret)
- goto out_dio;
}
+ ret = ext4_truncate_page_cache_block_range(inode, offset, offset + length);
+ if (ret)
+ goto out_dio;
+
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
--
2.39.2
2
1
From: Jarkko Sakkinen <jarkko.sakkinen(a)opinsys.com>
stable inclusion
from stable-v6.6.120
commit d88481653d74d622d1d0d2c9bad845fc2cc6fd23
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13388
CVE: CVE-2025-71077
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit faf07e611dfa464b201223a7253e9dc5ee0f3c9e upstream.
tpm2_get_pcr_allocation() does not cap any upper limit for the number of
banks. Cap the limit to eight banks so that out of bounds values coming
from external I/O cause on only limited harm.
Cc: stable(a)vger.kernel.org # v5.10+
Fixes: bcfff8384f6c ("tpm: dynamically allocate the allocated_banks array")
Tested-by: Lai Yi <yi1.lai(a)linux.intel.com>
Reviewed-by: Jonathan McDowell <noodles(a)meta.com>
Reviewed-by: Roberto Sassu <roberto.sassu(a)huawei.com>
Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen(a)opinsys.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Chen Jinghuang <chenjinghuang2(a)huawei.com>
---
drivers/char/tpm/tpm-chip.c | 1 -
drivers/char/tpm/tpm1-cmd.c | 5 -----
drivers/char/tpm/tpm2-cmd.c | 8 +++-----
include/linux/tpm.h | 8 +++++---
4 files changed, 8 insertions(+), 14 deletions(-)
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 70e3fe20fdcf..458a3e9ea73a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -279,7 +279,6 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
- kfree(chip->allocated_banks);
kfree(chip);
}
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index cf64c7385105..b49a790f1bd5 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -799,11 +799,6 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
*/
int tpm1_get_pcr_allocation(struct tpm_chip *chip)
{
- chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks)
- return -ENOMEM;
-
chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 93545be190a5..57bb3e34b770 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -574,11 +574,9 @@ ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
nr_possible_banks = be32_to_cpup(
(__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
-
- chip->allocated_banks = kcalloc(nr_possible_banks,
- sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks) {
+ if (nr_possible_banks > TPM2_MAX_PCR_BANKS) {
+ pr_err("tpm: out of bank capacity: %u > %u\n",
+ nr_possible_banks, TPM2_MAX_PCR_BANKS);
rc = -ENOMEM;
goto out;
}
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index bf8a4ec8a01c..f5e7cca2f257 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -25,7 +25,9 @@
#include <crypto/hash_info.h>
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
-#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+#define TPM2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define TPM2_MAX_PCR_BANKS 8
struct tpm_chip;
struct trusted_key_payload;
@@ -51,7 +53,7 @@ enum tpm_algorithms {
struct tpm_digest {
u16 alg_id;
- u8 digest[TPM_MAX_DIGEST_SIZE];
+ u8 digest[TPM2_MAX_DIGEST_SIZE];
} __packed;
struct tpm_bank_info {
@@ -157,7 +159,7 @@ struct tpm_chip {
unsigned int groups_cnt;
u32 nr_allocated_banks;
- struct tpm_bank_info *allocated_banks;
+ struct tpm_bank_info allocated_banks[TPM2_MAX_PCR_BANKS];
#ifdef CONFIG_ACPI
acpi_handle acpi_dev_handle;
char ppi_version[TPM_PPI_VERSION_LEN + 1];
--
2.34.1
2
1
[PATCH OLK-5.10] mm/kmemleak: move up cond_resched() call in page scanning loop
by Wupeng Ma 26 Jan '26
by Wupeng Ma 26 Jan '26
26 Jan '26
From: Waiman Long <longman(a)redhat.com>
mainline inclusion
from mainline-v6.6-rc1
commit e68d343d2720779362cb7160cb7f4bd24979b2b4
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8429
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Commit bde5f6bc68db ("kmemleak: add scheduling point to kmemleak_scan()")
added a cond_resched() call to the struct page scanning loop to prevent
soft lockup from happening. However, soft lockup can still happen in that
loop in some corner cases when the pages that satisfy the "!(pfn & 63)"
check are skipped for some reasons.
Fix this corner case by moving up the cond_resched() check so that it will
be called every 64 pages unconditionally.
Link: https://lkml.kernel.org/r/20230825164947.1317981-1-longman@redhat.com
Fixes: bde5f6bc68db ("kmemleak: add scheduling point to kmemleak_scan()")
Signed-off-by: Waiman Long <longman(a)redhat.com>
Cc: Catalin Marinas <catalin.marinas(a)arm.com>
Cc: Yisheng Xie <xieyisheng1(a)huawei.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Wupeng Ma <mawupeng1(a)huawei.com>
---
mm/kmemleak.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 90eb82299149d..1d9ea3901d472 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1456,6 +1456,9 @@ static void kmemleak_scan(void)
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
struct page *page = pfn_to_online_page(pfn);
+ if (!(pfn & 63))
+ cond_resched();
+
if (!page)
continue;
@@ -1466,8 +1469,6 @@ static void kmemleak_scan(void)
if (page_count(page) == 0)
continue;
scan_block(page, page + 1, NULL);
- if (!(pfn & 63))
- cond_resched();
}
}
put_online_mems();
--
2.43.0
2
1
Fix CVE-2026-22980.
Li Lingfeng (1):
Revert "nfsd: provide locking for v4_end_grace"
NeilBrown (1):
nfsd: provide locking for v4_end_grace
fs/nfsd/nfs4state.c | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
--
2.52.0
2
3
[PATCH OLK-6.6] cifs: Fix memory and information leak in smb3_reconfigure()
by Yongjian Sun 26 Jan '26
by Yongjian Sun 26 Jan '26
26 Jan '26
From: Zilin Guan <zilin(a)seu.edu.cn>
mainline inclusion
from mainline-v6.19-rc3
commit cb6d5aa9c0f10074f1ad056c3e2278ad2cc7ec8d
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13475
CVE: CVE-2025-71151
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
In smb3_reconfigure(), if smb3_sync_session_ctx_passwords() fails, the
function returns immediately without freeing and erasing the newly
allocated new_password and new_password2. This causes both a memory leak
and a potential information leak.
Fix this by calling kfree_sensitive() on both password buffers before
returning in this error case.
Fixes: 0f0e357902957 ("cifs: during remount, make sure passwords are in sync")
Signed-off-by: Zilin Guan <zilin(a)seu.edu.cn>
Reviewed-by: ChenXiaoSong <chenxiaosong(a)kylinos.cn>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Yongjian Sun <sunyongjian1(a)huawei.com>
---
fs/smb/client/fs_context.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 137d03781d52..2344d7150a7b 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -979,6 +979,8 @@ static int smb3_reconfigure(struct fs_context *fc)
rc = smb3_sync_session_ctx_passwords(cifs_sb, ses);
if (rc) {
mutex_unlock(&ses->session_mutex);
+ kfree_sensitive(new_password);
+ kfree_sensitive(new_password2);
return rc;
}
--
2.39.2
2
1
[PATCH OLK-5.10] software node: Correct a OOB check in software_node_get_reference_args()
by Zhang Yuwei 26 Jan '26
by Zhang Yuwei 26 Jan '26
26 Jan '26
From: Zijun Hu <quic_zijuhu(a)quicinc.com>
stable inclusion
from stable-v5.10.239
commit 142acd739eb6f08c148a96ae8309256f1422ff4b
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9453
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 31e4e12e0e9609850cefd4b2e1adf782f56337d6 ]
software_node_get_reference_args() wants to get @index-th element, so
the property value requires at least '(index + 1) * sizeof(*ref)' bytes
but that can not be guaranteed by current OOB check, and may cause OOB
for malformed property.
Fix by using as OOB check '((index + 1) * sizeof(*ref) > prop->length)'.
Reviewed-by: Sakari Ailus <sakari.ailus(a)linux.intel.com>
Signed-off-by: Zijun Hu <quic_zijuhu(a)quicinc.com>
Link: https://lore.kernel.org/r/20250414-fix_swnode-v2-1-9c9e6ae11eab@quicinc.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Liu Mingrui <liumingrui(a)huawei.com>
---
drivers/base/swnode.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index b664c36388e2..89b53ca086d6 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -508,7 +508,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (prop->is_inline)
return -EINVAL;
- if (index * sizeof(*ref) >= prop->length)
+ if ((index + 1) * sizeof(*ref) > prop->length)
return -ENOENT;
ref_array = prop->pointer;
--
2.22.0
2
1
From: Edward Adam Davis <eadavis(a)qq.com>
stable inclusion
from stable-v6.6.88
commit cc0bc4cb62ce5fa0c383e3bf0765d01f46bd49ac
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/10650
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit ddf2846f22e8575d6b4b6a66f2100f168b8cd73d ]
The width in dmapctl of the AG is zero, it trigger a divide error when
calculating the control page level in dbAllocAG.
To avoid this issue, add a check for agwidth in dbAllocAG.
Reported-and-tested-by: syzbot+7c808908291a569281a9(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=7c808908291a569281a9
Signed-off-by: Edward Adam Davis <eadavis(a)qq.com>
Signed-off-by: Dave Kleikamp <dave.kleikamp(a)oracle.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Wang Hai <wanghai38(a)huawei.com>
---
fs/jfs/jfs_dmap.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 07b81097b84f..d6a9aecf64cb 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -204,6 +204,10 @@ int dbMount(struct inode *ipbmap)
bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+ if (!bmp->db_agwidth) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
--
2.22.0
2
1
[PATCH OLK-6.6] cifs: Fix memory and information leak in smb3_reconfigure()
by Yongjian Sun 26 Jan '26
by Yongjian Sun 26 Jan '26
26 Jan '26
From: Zilin Guan <zilin(a)seu.edu.cn>
mainline inclusion
from mainline-v6.19-rc3
commit cb6d5aa9c0f10074f1ad056c3e2278ad2cc7ec8d
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13475
CVE: CVE-2025-71151
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
In smb3_reconfigure(), if smb3_sync_session_ctx_passwords() fails, the
function returns immediately without freeing and erasing the newly
allocated new_password and new_password2. This causes both a memory leak
and a potential information leak.
Fix this by calling kfree_sensitive() on both password buffers before
returning in this error case.
Fixes: 0f0e357902957 ("cifs: during remount, make sure passwords are in sync")
Signed-off-by: Zilin Guan <zilin(a)seu.edu.cn>
Reviewed-by: ChenXiaoSong <chenxiaosong(a)kylinos.cn>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Yongjian Sun <sunyongjian1(a)huawei.com>
---
fs/smb/client/fs_context.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 137d03781d52..2344d7150a7b 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -979,6 +979,8 @@ static int smb3_reconfigure(struct fs_context *fc)
rc = smb3_sync_session_ctx_passwords(cifs_sb, ses);
if (rc) {
mutex_unlock(&ses->session_mutex);
+ kfree_sensitive(new_password);
+ kfree_sensitive(new_password2);
return rc;
}
--
2.39.2
2
1
26 Jan '26
Commit 35c18f2933c5 ("Add a new optional ",cma" suffix to the
crashkernel= command line option") and commit ab475510e042 ("kdump:
implement reserve_crashkernel_cma") added CMA support for kdump
crashkernel reservation. This allows the kernel to dynamically allocate
contiguous memory for crash dumping when needed, rather than permanently
reserving a fixed region at boot time.
So extend crashkernel CMA reservation support to riscv. The following
changes are made to enable CMA reservation:
- Parse and obtain the CMA reservation size along with other crashkernel
parameters.
- Call reserve_crashkernel_cma() to allocate the CMA region for kdump.
- Include the CMA-reserved ranges for kdump kernel to use.
- Exclude the CMA-reserved ranges from the crash kernel memory to
prevent them from being exported through /proc/vmcore.
Update kernel-parameters.txt to document CMA support for crashkernel on
riscv architecture.
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
Documentation/admin-guide/kernel-parameters.txt | 2 +-
arch/riscv/kernel/machine_kexec_file.c | 17 +++++++++++++++--
arch/riscv/mm/init.c | 5 +++--
3 files changed, 19 insertions(+), 5 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 36bb642a7edd..84e0e6f5d551 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1119,7 +1119,7 @@ Kernel parameters
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
crashkernel=size[KMG],cma
- [KNL, X86, ARM64, ppc] Reserve additional crash kernel memory from
+ [KNL, X86, ARM64, RISCV, ppc] Reserve additional crash kernel memory from
CMA. This reservation is usable by the first system's
userspace memory and kernel movable allocations (memory
balloon, zswap). Pages allocated from this memory range
diff --git a/arch/riscv/kernel/machine_kexec_file.c b/arch/riscv/kernel/machine_kexec_file.c
index dd9d92a96517..5c8f3cccbc83 100644
--- a/arch/riscv/kernel/machine_kexec_file.c
+++ b/arch/riscv/kernel/machine_kexec_file.c
@@ -59,9 +59,9 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
{
struct crash_mem *cmem;
unsigned int nr_ranges;
- int ret;
+ int ret, i;
- nr_ranges = 1; /* For exclusion of crashkernel region */
+ nr_ranges = 1 + crashk_cma_cnt; /* For exclusion of crashkernel region */
walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
@@ -74,11 +74,24 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
if (ret)
goto out;
+ for (i = 0; i < crashk_cma_cnt; i++) {
+ cmem->ranges[cmem->nr_ranges].start = crashk_cma_ranges[i].start;
+ cmem->ranges[cmem->nr_ranges].end = crashk_cma_ranges[i].end;
+ cmem->nr_ranges++;
+ }
+
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
if (!ret)
ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+ for (i = 0; i < crashk_cma_cnt; ++i) {
+ ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
+ crashk_cma_ranges[i].end);
+ if (ret)
+ goto out;
+ }
+
out:
kfree(cmem);
return ret;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index addb8a9305be..074d2d5f79ee 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -1404,7 +1404,7 @@ static inline void setup_vm_final(void)
*/
static void __init arch_reserve_crashkernel(void)
{
- unsigned long long low_size = 0;
+ unsigned long long low_size = 0, cma_size = 0;
unsigned long long crash_base, crash_size;
bool high = false;
int ret;
@@ -1414,11 +1414,12 @@ static void __init arch_reserve_crashkernel(void)
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
- &low_size, NULL, &high);
+ &low_size, &cma_size, &high);
if (ret)
return;
reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
+ reserve_crashkernel_cma(cma_size);
}
void __init paging_init(void)
--
2.34.1
1
0
From: Edward Adam Davis <eadavis(a)qq.com>
stable inclusion
from stable-v6.6.88
commit cc0bc4cb62ce5fa0c383e3bf0765d01f46bd49ac
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IC8J7I
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit ddf2846f22e8575d6b4b6a66f2100f168b8cd73d ]
The width in dmapctl of the AG is zero, it trigger a divide error when
calculating the control page level in dbAllocAG.
To avoid this issue, add a check for agwidth in dbAllocAG.
Reported-and-tested-by: syzbot+7c808908291a569281a9(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=7c808908291a569281a9
Signed-off-by: Edward Adam Davis <eadavis(a)qq.com>
Signed-off-by: Dave Kleikamp <dave.kleikamp(a)oracle.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Wang Hai <wanghai38(a)huawei.com>
---
fs/jfs/jfs_dmap.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 07b81097b84f..d6a9aecf64cb 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -204,6 +204,10 @@ int dbMount(struct inode *ipbmap)
bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+ if (!bmp->db_agwidth) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
--
2.22.0
2
1
CVE-2025-68296
Thomas Zimmermann (2):
drm/fbdev-helper: Set and clear VGA switcheroo client from fb_info
drm, fbcon, vga_switcheroo: Avoid race condition in fbcon setup
drivers/gpu/drm/drm_fb_helper.c | 12 ++----------
drivers/video/fbdev/core/fbcon.c | 9 +++++++++
2 files changed, 11 insertions(+), 10 deletions(-)
--
2.34.1
2
3
26 Jan '26
Support mempool and NUMA.remote for 64KB page.
Jinjiang Tu (2):
mm/pfn_range_alloc: add support for CONFIG_ARM64_64K_PAGES
mm/numa_remote: remove depends on ARM64_4K_PAGES
arch/arm64/mm/Kconfig | 1 +
arch/arm64/mm/pfn_range_alloc.c | 29 ++++++++++++++++++++++-------
drivers/base/Kconfig | 2 +-
3 files changed, 24 insertions(+), 8 deletions(-)
--
2.43.0
2
3
mainline inclusion
from mainline-v6.14-rc4
commit 99333229dee41b992f3b0493f6aa2e3528138384
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/78
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
A softlockup issue was found with stress test:
watchdog: BUG: soft lockup - CPU#27 stuck for 26s! [migration/27:181]
CPU: 27 UID: 0 PID: 181 Comm: migration/27 6.14.0-rc2-next-20250210 #1
Stopper: multi_cpu_stop <- stop_machine_from_inactive_cpu
RIP: 0010:stop_machine_yield+0x2/0x10
RSP: 0000:ff4a0dcecd19be48 EFLAGS: 00000246
RAX: ffffffff89c0108f RBX: ff4a0dcec03afe44 RCX: 0000000000000000
RDX: ff1cdaaf6eba5808 RSI: 0000000000000282 RDI: ff1cda80c1775a40
RBP: 0000000000000001 R08: 00000011620096c6 R09: 7fffffffffffffff
R10: 0000000000000001 R11: 0000000000000100 R12: ff1cda80c1775a40
R13: 0000000000000000 R14: 0000000000000001 R15: ff4a0dcec03afe20
FS: 0000000000000000(0000) GS:ff1cdaaf6eb80000(0000)
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000000000000 CR3: 00000025e2c2a001 CR4: 0000000000773ef0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
multi_cpu_stop+0x8f/0x100
cpu_stopper_thread+0x90/0x140
smpboot_thread_fn+0xad/0x150
kthread+0xc2/0x100
ret_from_fork+0x2d/0x50
The stress test involves CPU hotplug operations and memory control group
(memcg) operations. The scenario can be described as follows:
echo xx > memory.max cache_ap_online oom_reaper
(CPU23) (CPU50)
xx < usage stop_machine_from_inactive_cpu
for(;;) // all active cpus
trigger OOM queue_stop_cpus_work
// waiting oom_reaper
multi_cpu_stop(migration/xx)
// sync all active cpus ack
// waiting cpu23 ack
// CPU50 loops in multi_cpu_stop
waiting cpu50
Detailed explanation:
1. When the usage is larger than xx, an OOM may be triggered. If the
process does not handle with ths kill signal immediately, it will loop
in the memory_max_write.
2. When cache_ap_online is triggered, the multi_cpu_stop is queued to the
active cpus. Within the multi_cpu_stop function, it attempts to
synchronize the CPU states. However, the CPU23 didn't acknowledge
because it is stuck in a loop within the for(;;).
3. The oom_reaper process is blocked because CPU50 is in a loop, waiting
for CPU23 to acknowledge the synchronization request.
4. Finally, it formed cyclic dependency and lead to softlockup and dead
loop.
To fix this issue, add cond_resched() in the memory_max_write, so that it
will not block migration task.
Link: https://lkml.kernel.org/r/20250211081819.33307-1-chenridong@huaweicloud.com
Fixes: b6e6edcfa405 ("mm: memcontrol: reclaim and OOM kill when shrinking memory.max below usage")
Signed-off-by: Chen Ridong <chenridong(a)huawei.com>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Cc: Roman Gushchin <roman.gushchin(a)linux.dev>
Cc: Johannes Weiner <hannes(a)cmpxchg.org>
Cc: Shakeel Butt <shakeel.butt(a)linux.dev>
Cc: Muchun Song <songmuchun(a)bytedance.com>
Cc: Wang Weiyang <wangweiyang2(a)huawei.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Chen Ridong <chenridong(a)huawei.com>
---
mm/memcontrol.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7a8795130546d..067a3dde6f87f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -8030,6 +8030,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
memcg_memory_event(memcg, MEMCG_OOM);
if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
break;
+ cond_resched();
}
memcg_wb_domain_size_changed(memcg);
--
2.34.1
2
1
[PATCH OLK-5.10] iommu/amd: Avoid stack buffer overflow from kernel cmdline
by Wang Wensheng 26 Jan '26
by Wang Wensheng 26 Jan '26
26 Jan '26
From: Kees Cook <kees(a)kernel.org>
stable inclusion
from stable-v5.10.241
commit a732502bf3bbe859613b6d7b2b0313b11f0474ac
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICUXQR
CVE: CVE-2025-38676
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 8503d0fcb1086a7cfe26df67ca4bd9bd9e99bdec ]
While the kernel command line is considered trusted in most environments,
avoid writing 1 byte past the end of "acpiid" if the "str" argument is
maximum length.
Reported-by: Simcha Kosman <simcha.kosman(a)cyberark.com>
Closes: https://lore.kernel.org/all/AS8P193MB2271C4B24BCEDA31830F37AE84A52@AS8P193M…
Fixes: b6b26d86c61c ("iommu/amd: Add a length limitation for the ivrs_acpihid command-line parameter")
Signed-off-by: Kees Cook <kees(a)kernel.org>
Reviewed-by: Ankit Soni <Ankit.Soni(a)amd.com>
Link: https://lore.kernel.org/r/20250804154023.work.970-kees@kernel.org
Signed-off-by: Joerg Roedel <joerg.roedel(a)amd.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Wang Wensheng <wangwensheng4(a)huawei.com>
---
drivers/iommu/amd/init.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index d0a4ec42fd12..8e3570b77798 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -3223,7 +3223,7 @@ static int __init parse_ivrs_acpihid(char *str)
{
u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p, *addr;
- char acpiid[ACPIID_LEN] = {0};
+ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
int i;
addr = strchr(str, '@');
@@ -3249,7 +3249,7 @@ static int __init parse_ivrs_acpihid(char *str)
/* We have the '@', make it the terminator to get just the acpiid */
*addr++ = 0;
- if (strlen(str) > ACPIID_LEN + 1)
+ if (strlen(str) > ACPIID_LEN)
goto not_found;
if (sscanf(str, "=%s", acpiid) != 1)
--
2.22.0
2
1
[PATCH OLK-5.10] HID: core: Harden s32ton() against conversion to 0 bits
by Wang Wensheng 26 Jan '26
by Wang Wensheng 26 Jan '26
26 Jan '26
From: Alan Stern <stern(a)rowland.harvard.edu>
mainline inclusion
from mainline-v6.17-rc1
commit a6b87bfc2ab5bccb7ad953693c85d9062aef3fdd
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICU7B0
CVE: CVE-2025-38556
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
Testing by the syzbot fuzzer showed that the HID core gets a
shift-out-of-bounds exception when it tries to convert a 32-bit
quantity to a 0-bit quantity. Ideally this should never occur, but
there are buggy devices and some might have a report field with size
set to zero; we shouldn't reject the report or the device just because
of that.
Instead, harden the s32ton() routine so that it returns a reasonable
result instead of crashing when it is called with the number of bits
set to 0 -- the same as what snto32() does.
Signed-off-by: Alan Stern <stern(a)rowland.harvard.edu>
Reported-by: syzbot+b63d677d63bcac06cf90(a)syzkaller.appspotmail.com
Closes: https://lore.kernel.org/linux-usb/68753a08.050a0220.33d347.0008.GAE@google.…
Tested-by: syzbot+b63d677d63bcac06cf90(a)syzkaller.appspotmail.com
Fixes: dde5845a529f ("[PATCH] Generic HID layer - code split")
Cc: stable(a)vger.kernel.org
Link: https://patch.msgid.link/613a66cd-4309-4bce-a4f7-2905f9bce0c9@rowland.harva…
Signed-off-by: Benjamin Tissoires <bentiss(a)kernel.org>
Conflicts:
drivers/hid/hid-core.c
[Only context conflict because of commit c653ffc283404 is not applied]
Signed-off-by: Wang Wensheng <wangwensheng4(a)huawei.com>
---
drivers/hid/hid-core.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3094c33bb8f9..a602f572d458 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1347,7 +1347,12 @@ EXPORT_SYMBOL_GPL(hid_snto32);
static u32 s32ton(__s32 value, unsigned n)
{
- s32 a = value >> (n - 1);
+ s32 a;
+
+ if (!value || !n)
+ return 0;
+
+ a = value >> (n - 1);
if (a && a != -1)
return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
return value & ((1 << n) - 1);
--
2.22.0
2
1
26 Jan '26
From: Junrui Luo <moonafterrain(a)outlook.com>
mainline inclusion
from mainline-v6.19-rc2
commit 6946c726c3f4c36f0f049e6f97e88c510b15f65d
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13342
CVE: CVE-2025-68789
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
The ibmpex_high_low_store() function retrieves driver data using
dev_get_drvdata() and uses it without validation. This creates a race
condition where the sysfs callback can be invoked after the data
structure is freed, leading to use-after-free.
Fix by adding a NULL check after dev_get_drvdata(), and reordering
operations in the deletion path to prevent TOCTOU.
Reported-by: Yuhao Jiang <danisjiang(a)gmail.com>
Reported-by: Junrui Luo <moonafterrain(a)outlook.com>
Closes: https://lore.kernel.org/r/MEYPR01MB7886BE2F51BFE41875B74B60AFA0A@MEYPR01MB7…
Fixes: 57c7c3a0fdea ("hwmon: IBM power meter driver")
Signed-off-by: Junrui Luo <moonafterrain(a)outlook.com>
Signed-off-by: Guenter Roeck <linux(a)roeck-us.net>
Conflicts:
drivers/hwmon/ibmpex.c
[commit aa1fd9cdabaae not merged]
Signed-off-by: Zhang Yuwei <zhangyuwei20(a)huawei.com>
---
drivers/hwmon/ibmpex.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index fe90f0536d76..235d56e96879 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -282,6 +282,9 @@ static ssize_t ibmpex_high_low_store(struct device *dev,
{
struct ibmpex_bmc_data *data = dev_get_drvdata(dev);
+ if (!data)
+ return -ENODEV;
+
ibmpex_reset_high_low_data(data);
return count;
@@ -514,6 +517,9 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
{
int i, j;
+ hwmon_device_unregister(data->hwmon_dev);
+ dev_set_drvdata(data->bmc_device, NULL);
+
device_remove_file(data->bmc_device,
&sensor_dev_attr_reset_high_low.dev_attr);
device_remove_file(data->bmc_device, &sensor_dev_attr_name.dev_attr);
@@ -527,8 +533,7 @@ static void ibmpex_bmc_delete(struct ibmpex_bmc_data *data)
}
list_del(&data->list);
- dev_set_drvdata(data->bmc_device, NULL);
- hwmon_device_unregister(data->hwmon_dev);
+
ipmi_destroy_user(data->user);
kfree(data->sensors);
kfree(data);
--
2.22.0
2
1
[PATCH OLK-5.10] of: check previous kernel's ima-kexec-buffer against memory bounds
by Wang Wensheng 26 Jan '26
by Wang Wensheng 26 Jan '26
26 Jan '26
From: Vaibhav Jain <vaibhav(a)linux.ibm.com>
mainline inclusion
from mainline-v6.0-rc1
commit cbf9c4b9617b6767886a913705ca14b7600c77db
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/ICGAML
CVE: CVE-2022-50159
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Presently ima_get_kexec_buffer() doesn't check if the previous kernel's
ima-kexec-buffer lies outside the addressable memory range. This can result
in a kernel panic if the new kernel is booted with 'mem=X' arg and the
ima-kexec-buffer was allocated beyond that range by the previous kernel.
The panic is usually of the form below:
$ sudo kexec --initrd initrd vmlinux --append='mem=16G'
<snip>
BUG: Unable to handle kernel data access on read at 0xc000c01fff7f0000
Faulting instruction address: 0xc000000000837974
Oops: Kernel access of bad area, sig: 11 [#1]
<snip>
NIP [c000000000837974] ima_restore_measurement_list+0x94/0x6c0
LR [c00000000083b55c] ima_load_kexec_buffer+0xac/0x160
Call Trace:
[c00000000371fa80] [c00000000083b55c] ima_load_kexec_buffer+0xac/0x160
[c00000000371fb00] [c0000000020512c4] ima_init+0x80/0x108
[c00000000371fb70] [c0000000020514dc] init_ima+0x4c/0x120
[c00000000371fbf0] [c000000000012240] do_one_initcall+0x60/0x2c0
[c00000000371fcc0] [c000000002004ad0] kernel_init_freeable+0x344/0x3ec
[c00000000371fda0] [c0000000000128a4] kernel_init+0x34/0x1b0
[c00000000371fe10] [c00000000000ce64] ret_from_kernel_thread+0x5c/0x64
Instruction dump:
f92100b8 f92100c0 90e10090 910100a0 4182050c 282a0017 3bc00000 40810330
7c0802a6 fb610198 7c9b2378 f80101d0 <a1240000> 2c090001 40820614 e9240010
---[ end trace 0000000000000000 ]---
Fix this issue by checking returned PFN range of previous kernel's
ima-kexec-buffer with page_is_ram() to ensure correct memory bounds.
Fixes: 467d27824920 ("powerpc: ima: get the kexec buffer passed by the previous kernel")
Cc: Frank Rowand <frowand.list(a)gmail.com>
Cc: Prakhar Srivastava <prsriva(a)linux.microsoft.com>
Cc: Lakshmi Ramasubramanian <nramas(a)linux.microsoft.com>
Cc: Thiago Jung Bauermann <bauerman(a)linux.ibm.com>
Cc: Rob Herring <robh(a)kernel.org>
Cc: Ritesh Harjani <ritesh.list(a)gmail.com>
Cc: Robin Murphy <robin.murphy(a)arm.com>
Signed-off-by: Vaibhav Jain <vaibhav(a)linux.ibm.com>
Signed-off-by: Rob Herring <robh(a)kernel.org>
Link: https://lore.kernel.org/r/20220531041446.3334259-1-vaibhav@linux.ibm.com
Signed-off-by: Wang Wensheng <wangwensheng4(a)huawei.com>
---
drivers/of/kexec.c | 17 +++++++++++++++++
1 file changed, 17 insertions(+)
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index d8231c34e873..f0e6495d0536 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -133,6 +133,7 @@ int ima_get_kexec_buffer(void **addr, size_t *size)
{
int ret, len;
unsigned long tmp_addr;
+ unsigned long start_pfn, end_pfn;
size_t tmp_size;
const void *prop;
@@ -147,6 +148,22 @@ int ima_get_kexec_buffer(void **addr, size_t *size)
if (ret)
return ret;
+ /* Do some sanity on the returned size for the ima-kexec buffer */
+ if (!tmp_size)
+ return -ENOENT;
+
+ /*
+ * Calculate the PFNs for the buffer and ensure
+ * they are with in addressable memory.
+ */
+ start_pfn = PHYS_PFN(tmp_addr);
+ end_pfn = PHYS_PFN(tmp_addr + tmp_size - 1);
+ if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) {
+ pr_warn("IMA buffer at 0x%lx, size = 0x%zx beyond memory\n",
+ tmp_addr, tmp_size);
+ return -EINVAL;
+ }
+
*addr = __va(tmp_addr);
*size = tmp_size;
--
2.22.0
2
1
[PATCH OLK-5.10] drm/amd/pm/powerplay/hwmgr/vega20_thermal: Prevent division by zero
by Wang Wensheng 26 Jan '26
by Wang Wensheng 26 Jan '26
26 Jan '26
From: Denis Arefev <arefev(a)swemel.ru>
stable inclusion
from stable-v5.10.237
commit ce773dd844ee19a605af27f11470887e0f2044a9
category: bugfix
bugzilla: 189268
CVE: CVE-2025-37766
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 4e3d9508c056d7e0a56b58d5c81253e2a0d22b6c upstream.
The user can set any speed value.
If speed is greater than UINT_MAX/8, division by zero is possible.
Found by Linux Verification Center (linuxtesting.org) with SVACE.
Fixes: 031db09017da ("drm/amd/powerplay/vega20: enable fan RPM and pwm settings V2")
Signed-off-by: Denis Arefev <arefev(a)swemel.ru>
Signed-off-by: Alex Deucher <alexander.deucher(a)amd.com>
Cc: stable(a)vger.kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Liu Mingrui <liumingrui(a)huawei.com>
---
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
index 364162ddaa9c..87b7baa9492b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
@@ -189,7 +189,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
uint32_t tach_period, crystal_clock_freq;
int result = 0;
- if (!speed)
+ if (!speed || speed > UINT_MAX/8)
return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
--
2.22.0
2
1
26 Jan '26
From: Miaohe Lin <linmiaohe(a)huawei.com>
mainline inclusion
from mainline-v5.18-rc1
commit 0cbcc92917c5de80f15c24d033566539ad696892
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBP3WP
CVE: CVE-2022-49190
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Since commit ebff7d8f270d ("mem hotunplug: fix kfree() of bootmem
memory"), we could get a resource allocated during boot via
alloc_resource(). And it's required to release the resource using
free_resource(). Howerver, many people use kfree directly which will
result in kernel BUG. In order to fix this without fixing every call
site, just leak a couple of bytes in such corner case.
Link: https://lkml.kernel.org/r/20220217083619.19305-1-linmiaohe@huawei.com
Fixes: ebff7d8f270d ("mem hotunplug: fix kfree() of bootmem memory")
Signed-off-by: Miaohe Lin <linmiaohe(a)huawei.com>
Suggested-by: David Hildenbrand <david(a)redhat.com>
Cc: Dan Williams <dan.j.williams(a)intel.com>
Cc: Alistair Popple <apopple(a)nvidia.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org>
Conflicts:
kernel/resource.c
[Just contexts conflicts.]
Signed-off-by: Wang Wensheng <wangwensheng4(a)huawei.com>
---
kernel/resource.c | 41 ++++++++---------------------------------
1 file changed, 8 insertions(+), 33 deletions(-)
diff --git a/kernel/resource.c b/kernel/resource.c
index 1087f33d70c4..ea4d7a02b8e8 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -53,14 +53,6 @@ struct resource_constraint {
static DEFINE_RWLOCK(resource_lock);
-/*
- * For memory hotplug, there is no way to free resource entries allocated
- * by boot mem after the system is up. So for reusing the resource entry
- * we need to remember the resource.
- */
-static struct resource *bootmem_resource_free;
-static DEFINE_SPINLOCK(bootmem_resource_lock);
-
static struct resource *next_resource(struct resource *p, bool sibling_only)
{
/* Caller wants to traverse through siblings only */
@@ -149,36 +141,19 @@ __initcall(ioresources_init);
static void free_resource(struct resource *res)
{
- if (!res)
- return;
-
- if (!PageSlab(virt_to_head_page(res))) {
- spin_lock(&bootmem_resource_lock);
- res->sibling = bootmem_resource_free;
- bootmem_resource_free = res;
- spin_unlock(&bootmem_resource_lock);
- } else {
+ /**
+ * If the resource was allocated using memblock early during boot
+ * we'll leak it here: we can only return full pages back to the
+ * buddy and trying to be smart and reusing them eventually in
+ * alloc_resource() overcomplicates resource handling.
+ */
+ if (res && PageSlab(virt_to_head_page(res)))
kfree(res);
- }
}
static struct resource *alloc_resource(gfp_t flags)
{
- struct resource *res = NULL;
-
- spin_lock(&bootmem_resource_lock);
- if (bootmem_resource_free) {
- res = bootmem_resource_free;
- bootmem_resource_free = res->sibling;
- }
- spin_unlock(&bootmem_resource_lock);
-
- if (res)
- memset(res, 0, sizeof(struct resource));
- else
- res = kzalloc(sizeof(struct resource), flags);
-
- return res;
+ return kzalloc(sizeof(struct resource), flags);
}
/* Return the conflict entry if you can't request it */
--
2.22.0
2
1
The cve itself and its later fixes.
Duoming Zhou (1):
drivers: staging: rtl8723bs: Fix deadlock in
rtw_surveydone_event_callback()
Hans de Goede (1):
drivers: staging: rtl8723bs: Fix locking in rtw_scan_timeout_handler()
drivers/staging/rtl8723bs/core/rtw_mlme.c | 2 ++
1 file changed, 2 insertions(+)
--
2.22.0
2
3
26 Jan '26
hulk inclusion
category: bugfix
Link: https://gitee.com/openeuler/kernel/issues/ICBFCS
CVE: NA
--------------------------------
Initialize flow table while rps_policy enabled, and add
vecls_flow_enabled static key to optimize performance.
Signed-off-by: Yue Haibing <yuehaibing(a)huawei.com>
---
include/linux/venetcls.h | 3 +--
net/core/dev.c | 14 ++++++++++----
net/venetcls/venetcls_flow.c | 22 +++++++++++++---------
net/venetcls/venetcls_main.c | 4 ++--
4 files changed, 26 insertions(+), 17 deletions(-)
diff --git a/include/linux/venetcls.h b/include/linux/venetcls.h
index acbffdb91ee8..fdafe47e8f9f 100644
--- a/include/linux/venetcls.h
+++ b/include/linux/venetcls.h
@@ -16,6 +16,7 @@ struct vecls_hook_ops {
typedef int (*enqueue_f)(struct sk_buff *skb, int cpu, unsigned int *qtail);
extern const struct vecls_hook_ops __rcu *vecls_ops;
extern struct static_key_false vecls_localrps_needed;
+extern struct static_key_false vecls_flow_enabled;
static inline void venetcls_cfg_rxcls(struct sock *sk, int is_del)
{
@@ -75,8 +76,6 @@ venetcls_skb_set_localcpu(struct sk_buff *skb, enqueue_f enq_func, int *ret)
struct net_device *dev = skb->dev;
bool result = false;
- if (!static_branch_unlikely(&vecls_localrps_needed))
- return result;
if (!dev || !(dev->type == ARPHRD_LOOPBACK && dev->flags & IFF_LOOPBACK))
return result;
diff --git a/net/core/dev.c b/net/core/dev.c
index b62fcd0a6daf..10445e98c8a4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -166,6 +166,8 @@ const struct vecls_hook_ops __rcu *vecls_ops __read_mostly;
EXPORT_SYMBOL_GPL(vecls_ops);
struct static_key_false vecls_localrps_needed __read_mostly;
EXPORT_SYMBOL(vecls_localrps_needed);
+struct static_key_false vecls_flow_enabled __read_mostly;
+EXPORT_SYMBOL(vecls_flow_enabled);
#endif
static DEFINE_SPINLOCK(ptype_lock);
@@ -5889,9 +5891,11 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
rcu_read_lock();
#if IS_ENABLED(CONFIG_VENETCLS)
- if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) {
- rcu_read_unlock();
- return ret;
+ if (static_branch_unlikely(&vecls_flow_enabled)) {
+ if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) {
+ rcu_read_unlock();
+ return ret;
+ }
}
#endif
#ifdef CONFIG_RPS
@@ -5927,7 +5931,9 @@ static void netif_receive_skb_list_internal(struct list_head *head)
rcu_read_lock();
#if IS_ENABLED(CONFIG_VENETCLS)
- venetcls_skblist_set_cpu(head, enqueue_to_backlog);
+ if (static_branch_unlikely(&vecls_flow_enabled)) {
+ venetcls_skblist_set_cpu(head, enqueue_to_backlog);
+ }
#endif
#ifdef CONFIG_RPS
if (static_branch_unlikely(&rps_needed)) {
diff --git a/net/venetcls/venetcls_flow.c b/net/venetcls/venetcls_flow.c
index 758067a7c6f1..9562dc9ae03c 100644
--- a/net/venetcls/venetcls_flow.c
+++ b/net/venetcls/venetcls_flow.c
@@ -122,13 +122,13 @@ void _vecls_flow_update(struct sock *sk, struct sk_buff *skb)
rcu_read_unlock();
}
-static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb)
+static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb, u32 hash)
{
struct vecls_numa_bound_dev_info *bound_dev = NULL;
struct vecls_netdev_info *vecls_dev;
struct vecls_numa_info *numa_info;
int i, devid, rxq_num, rxq_id;
- u32 hash, index;
+ u32 index;
numa_info = get_vecls_numa_info(nid);
if (!numa_info)
@@ -154,7 +154,6 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s
}
if (rxq_num == 0)
return -1;
- hash = skb_get_hash(skb);
index = hash % rxq_num;
i = 0;
@@ -170,19 +169,19 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s
}
static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb,
- struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu)
+ struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu, u32 hash)
{
struct netdev_rx_queue *rxqueue;
struct vecls_dev_flow_table *dtb;
struct vecls_dev_flow *rflow;
- u32 flow_id, hash;
int rxq_index, rc;
+ u32 flow_id;
if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
!(dev->features & NETIF_F_NTUPLE))
return;
- rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb);
+ rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb, hash);
if (rxq_index == skb_get_rx_queue(skb) || rxq_index < 0) {
vecls_debug("%s skb:%p, old_rxq:%d, next_cpu:%d new_rxq:%d\n",
__func__, skb, old_rxq_id, next_cpu, rxq_index);
@@ -194,7 +193,6 @@ static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb,
if (!dtb)
return;
- hash = skb_get_hash(skb);
flow_id = hash & dtb->mask;
rflow = &dtb->flows[flow_id];
@@ -245,7 +243,6 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev,
u32 last_recv_cpu, hash, val, cpu, tcpu, newcpu;
struct vecls_dev_flow *rflow;
- cpu = raw_smp_processor_id();
skb_reset_network_header(skb);
hash = skb_get_hash(skb);
if (!hash)
@@ -260,17 +257,22 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev,
return;
newcpu = get_rps_cpu(last_recv_cpu, hash, rps_policy);
+ if (rps_policy)
+ *rcpu = newcpu;
+ vecls_debug("last:%u curcpu:%d newcpu:%d rcpu:%d\n",
+ last_recv_cpu, raw_smp_processor_id(), newcpu, *rcpu);
if (rflow->isvalid && cpu_to_node(rflow->cpu) == cpu_to_node(newcpu)) {
rflow->timeout = jiffies;
return;
}
+ cpu = raw_smp_processor_id();
if (cpu_to_node(cpu) == cpu_to_node(newcpu))
return;
if (tcpu >= nr_cpu_ids)
- set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu);
+ set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu, hash);
}
static inline void do_loopback_rps(struct sk_buff *skb, int *rcpu)
@@ -618,6 +620,7 @@ int vecls_flow_res_init(void)
if (mode != 0) //for lo rps
RCU_INIT_POINTER(vecls_ops, &vecls_flow_ops);
synchronize_rcu();
+ static_branch_inc(&vecls_flow_enabled);
return 0;
clean:
@@ -627,6 +630,7 @@ int vecls_flow_res_init(void)
void vecls_flow_res_clean(void)
{
+ static_branch_dec(&vecls_flow_enabled);
RCU_INIT_POINTER(vecls_ops, NULL);
synchronize_rcu();
vecls_sock_flow_table_release();
diff --git a/net/venetcls/venetcls_main.c b/net/venetcls/venetcls_main.c
index 00ec0b0e2498..d75f1fb9fff7 100644
--- a/net/venetcls/venetcls_main.c
+++ b/net/venetcls/venetcls_main.c
@@ -1125,7 +1125,7 @@ static __init int vecls_init(void)
err = vecls_ntuple_res_init();
if (err)
goto clean_rxq;
- if (lo_rps_policy)
+ if (lo_rps_policy || rps_policy)
err = vecls_flow_res_init();
} else {
err = vecls_flow_res_init();
@@ -1163,7 +1163,7 @@ static __exit void vecls_exit(void)
#endif
if (mode == 0) {
vecls_ntuple_res_clean();
- if (lo_rps_policy)
+ if (lo_rps_policy || rps_policy)
vecls_flow_res_clean();
} else {
vecls_flow_res_clean();
--
2.34.1
2
1
26 Jan '26
From: Guangshuo Li <lgs201920130244(a)gmail.com>
mainline inclusion
from mainline-v6.19-rc4
commit 9c72a5182ed92904d01057f208c390a303f00a0f
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13404
CVE: CVE-2025-71093
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
In e1000_tbi_should_accept() we read the last byte of the frame via
'data[length - 1]' to evaluate the TBI workaround. If the descriptor-
reported length is zero or larger than the actual RX buffer size, this
read goes out of bounds and can hit unrelated slab objects. The issue
is observed from the NAPI receive path (e1000_clean_rx_irq):
==================================================================
BUG: KASAN: slab-out-of-bounds in e1000_tbi_should_accept+0x610/0x790
Read of size 1 at addr ffff888014114e54 by task sshd/363
CPU: 0 PID: 363 Comm: sshd Not tainted 5.18.0-rc1 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x5a/0x74
print_address_description+0x7b/0x440
print_report+0x101/0x200
kasan_report+0xc1/0xf0
e1000_tbi_should_accept+0x610/0x790
e1000_clean_rx_irq+0xa8c/0x1110
e1000_clean+0xde2/0x3c10
__napi_poll+0x98/0x380
net_rx_action+0x491/0xa20
__do_softirq+0x2c9/0x61d
do_softirq+0xd1/0x120
</IRQ>
<TASK>
__local_bh_enable_ip+0xfe/0x130
ip_finish_output2+0x7d5/0xb00
__ip_queue_xmit+0xe24/0x1ab0
__tcp_transmit_skb+0x1bcb/0x3340
tcp_write_xmit+0x175d/0x6bd0
__tcp_push_pending_frames+0x7b/0x280
tcp_sendmsg_locked+0x2e4f/0x32d0
tcp_sendmsg+0x24/0x40
sock_write_iter+0x322/0x430
vfs_write+0x56c/0xa60
ksys_write+0xd1/0x190
do_syscall_64+0x43/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f511b476b10
Code: 73 01 c3 48 8b 0d 88 d3 2b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 44 00 00 83 3d f9 2b 2c 00 00 75 10 b8 01 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 8e 9b 01 00 48 89 04 24
RSP: 002b:00007ffc9211d4e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 0000000000004024 RCX: 00007f511b476b10
RDX: 0000000000004024 RSI: 0000559a9385962c RDI: 0000000000000003
RBP: 0000559a9383a400 R08: fffffffffffffff0 R09: 0000000000004f00
R10: 0000000000000070 R11: 0000000000000246 R12: 0000000000000000
R13: 00007ffc9211d57f R14: 0000559a9347bde7 R15: 0000000000000003
</TASK>
Allocated by task 1:
__kasan_krealloc+0x131/0x1c0
krealloc+0x90/0xc0
add_sysfs_param+0xcb/0x8a0
kernel_add_sysfs_param+0x81/0xd4
param_sysfs_builtin+0x138/0x1a6
param_sysfs_init+0x57/0x5b
do_one_initcall+0x104/0x250
do_initcall_level+0x102/0x132
do_initcalls+0x46/0x74
kernel_init_freeable+0x28f/0x393
kernel_init+0x14/0x1a0
ret_from_fork+0x22/0x30
The buggy address belongs to the object at ffff888014114000
which belongs to the cache kmalloc-2k of size 2048
The buggy address is located 1620 bytes to the right of
2048-byte region [ffff888014114000, ffff888014114800]
The buggy address belongs to the physical page:
page:ffffea0000504400 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x14110
head:ffffea0000504400 order:3 compound_mapcount:0 compound_pincount:0
flags: 0x100000000010200(slab|head|node=0|zone=1)
raw: 0100000000010200 0000000000000000 dead000000000001 ffff888013442000
raw: 0000000000000000 0000000000080008 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
==================================================================
This happens because the TBI check unconditionally dereferences the last
byte without validating the reported length first:
u8 last_byte = *(data + length - 1);
Fix by rejecting the frame early if the length is zero, or if it exceeds
adapter->rx_buffer_len. This preserves the TBI workaround semantics for
valid frames and prevents touching memory beyond the RX buffer.
Fixes: 2037110c96d5 ("e1000: move tbi workaround code into helper function")
Cc: stable(a)vger.kernel.org
Signed-off-by: Guangshuo Li <lgs201920130244(a)gmail.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov(a)intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com>
Signed-off-by: Pu Lehui <pulehui(a)huawei.com>
---
drivers/net/ethernet/intel/e1000/e1000_main.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 18b61be9e0b9..4da32f0b99e1 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4100,7 +4100,15 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
u32 length, const u8 *data)
{
struct e1000_hw *hw = &adapter->hw;
- u8 last_byte = *(data + length - 1);
+ u8 last_byte;
+
+ /* Guard against OOB on data[length - 1] */
+ if (unlikely(!length))
+ return false;
+ /* Upper bound: length must not exceed rx_buffer_len */
+ if (unlikely(length > adapter->rx_buffer_len))
+ return false;
+ last_byte = *(data + length - 1);
if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
unsigned long irq_flags;
--
2.34.1
2
1
hulk inclusion
category: bugfix
Link: https://gitee.com/openeuler/kernel/issues/ICBFCS
CVE: NA
--------------------------------
Initialize flow table while rps_policy enabled, and add
vecls_flow_enabled static key to optimize performance.
Signed-off-by: Yue Haibing <yuehaibing(a)huawei.com>
---
include/linux/venetcls.h | 3 +--
net/core/dev.c | 14 ++++++++++----
net/venetcls/venetcls_flow.c | 22 +++++++++++++---------
net/venetcls/venetcls_main.c | 4 ++--
4 files changed, 26 insertions(+), 17 deletions(-)
diff --git a/include/linux/venetcls.h b/include/linux/venetcls.h
index acbffdb91ee8..fdafe47e8f9f 100644
--- a/include/linux/venetcls.h
+++ b/include/linux/venetcls.h
@@ -16,6 +16,7 @@ struct vecls_hook_ops {
typedef int (*enqueue_f)(struct sk_buff *skb, int cpu, unsigned int *qtail);
extern const struct vecls_hook_ops __rcu *vecls_ops;
extern struct static_key_false vecls_localrps_needed;
+extern struct static_key_false vecls_flow_enabled;
static inline void venetcls_cfg_rxcls(struct sock *sk, int is_del)
{
@@ -75,8 +76,6 @@ venetcls_skb_set_localcpu(struct sk_buff *skb, enqueue_f enq_func, int *ret)
struct net_device *dev = skb->dev;
bool result = false;
- if (!static_branch_unlikely(&vecls_localrps_needed))
- return result;
if (!dev || !(dev->type == ARPHRD_LOOPBACK && dev->flags & IFF_LOOPBACK))
return result;
diff --git a/net/core/dev.c b/net/core/dev.c
index b62fcd0a6daf..10445e98c8a4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -166,6 +166,8 @@ const struct vecls_hook_ops __rcu *vecls_ops __read_mostly;
EXPORT_SYMBOL_GPL(vecls_ops);
struct static_key_false vecls_localrps_needed __read_mostly;
EXPORT_SYMBOL(vecls_localrps_needed);
+struct static_key_false vecls_flow_enabled __read_mostly;
+EXPORT_SYMBOL(vecls_flow_enabled);
#endif
static DEFINE_SPINLOCK(ptype_lock);
@@ -5889,9 +5891,11 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
rcu_read_lock();
#if IS_ENABLED(CONFIG_VENETCLS)
- if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) {
- rcu_read_unlock();
- return ret;
+ if (static_branch_unlikely(&vecls_flow_enabled)) {
+ if (venetcls_skb_set_cpu(skb, enqueue_to_backlog, &ret)) {
+ rcu_read_unlock();
+ return ret;
+ }
}
#endif
#ifdef CONFIG_RPS
@@ -5927,7 +5931,9 @@ static void netif_receive_skb_list_internal(struct list_head *head)
rcu_read_lock();
#if IS_ENABLED(CONFIG_VENETCLS)
- venetcls_skblist_set_cpu(head, enqueue_to_backlog);
+ if (static_branch_unlikely(&vecls_flow_enabled)) {
+ venetcls_skblist_set_cpu(head, enqueue_to_backlog);
+ }
#endif
#ifdef CONFIG_RPS
if (static_branch_unlikely(&rps_needed)) {
diff --git a/net/venetcls/venetcls_flow.c b/net/venetcls/venetcls_flow.c
index 758067a7c6f1..9562dc9ae03c 100644
--- a/net/venetcls/venetcls_flow.c
+++ b/net/venetcls/venetcls_flow.c
@@ -122,13 +122,13 @@ void _vecls_flow_update(struct sock *sk, struct sk_buff *skb)
rcu_read_unlock();
}
-static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb)
+static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *skb, u32 hash)
{
struct vecls_numa_bound_dev_info *bound_dev = NULL;
struct vecls_netdev_info *vecls_dev;
struct vecls_numa_info *numa_info;
int i, devid, rxq_num, rxq_id;
- u32 hash, index;
+ u32 index;
numa_info = get_vecls_numa_info(nid);
if (!numa_info)
@@ -154,7 +154,6 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s
}
if (rxq_num == 0)
return -1;
- hash = skb_get_hash(skb);
index = hash % rxq_num;
i = 0;
@@ -170,19 +169,19 @@ static int flow_get_queue_idx(struct net_device *dev, int nid, struct sk_buff *s
}
static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb,
- struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu)
+ struct vecls_dev_flow *old_rflow, int old_rxq_id, u16 next_cpu, u32 hash)
{
struct netdev_rx_queue *rxqueue;
struct vecls_dev_flow_table *dtb;
struct vecls_dev_flow *rflow;
- u32 flow_id, hash;
int rxq_index, rc;
+ u32 flow_id;
if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
!(dev->features & NETIF_F_NTUPLE))
return;
- rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb);
+ rxq_index = flow_get_queue_idx(dev, cpu_to_node(next_cpu), skb, hash);
if (rxq_index == skb_get_rx_queue(skb) || rxq_index < 0) {
vecls_debug("%s skb:%p, old_rxq:%d, next_cpu:%d new_rxq:%d\n",
__func__, skb, old_rxq_id, next_cpu, rxq_index);
@@ -194,7 +193,6 @@ static void set_vecls_cpu(struct net_device *dev, struct sk_buff *skb,
if (!dtb)
return;
- hash = skb_get_hash(skb);
flow_id = hash & dtb->mask;
rflow = &dtb->flows[flow_id];
@@ -245,7 +243,6 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev,
u32 last_recv_cpu, hash, val, cpu, tcpu, newcpu;
struct vecls_dev_flow *rflow;
- cpu = raw_smp_processor_id();
skb_reset_network_header(skb);
hash = skb_get_hash(skb);
if (!hash)
@@ -260,17 +257,22 @@ static void __vecls_set_cpu(struct sk_buff *skb, struct net_device *ndev,
return;
newcpu = get_rps_cpu(last_recv_cpu, hash, rps_policy);
+ if (rps_policy)
+ *rcpu = newcpu;
+ vecls_debug("last:%u curcpu:%d newcpu:%d rcpu:%d\n",
+ last_recv_cpu, raw_smp_processor_id(), newcpu, *rcpu);
if (rflow->isvalid && cpu_to_node(rflow->cpu) == cpu_to_node(newcpu)) {
rflow->timeout = jiffies;
return;
}
+ cpu = raw_smp_processor_id();
if (cpu_to_node(cpu) == cpu_to_node(newcpu))
return;
if (tcpu >= nr_cpu_ids)
- set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu);
+ set_vecls_cpu(ndev, skb, rflow, old_rxq_id, newcpu, hash);
}
static inline void do_loopback_rps(struct sk_buff *skb, int *rcpu)
@@ -618,6 +620,7 @@ int vecls_flow_res_init(void)
if (mode != 0) //for lo rps
RCU_INIT_POINTER(vecls_ops, &vecls_flow_ops);
synchronize_rcu();
+ static_branch_inc(&vecls_flow_enabled);
return 0;
clean:
@@ -627,6 +630,7 @@ int vecls_flow_res_init(void)
void vecls_flow_res_clean(void)
{
+ static_branch_dec(&vecls_flow_enabled);
RCU_INIT_POINTER(vecls_ops, NULL);
synchronize_rcu();
vecls_sock_flow_table_release();
diff --git a/net/venetcls/venetcls_main.c b/net/venetcls/venetcls_main.c
index 00ec0b0e2498..d75f1fb9fff7 100644
--- a/net/venetcls/venetcls_main.c
+++ b/net/venetcls/venetcls_main.c
@@ -1125,7 +1125,7 @@ static __init int vecls_init(void)
err = vecls_ntuple_res_init();
if (err)
goto clean_rxq;
- if (lo_rps_policy)
+ if (lo_rps_policy || rps_policy)
err = vecls_flow_res_init();
} else {
err = vecls_flow_res_init();
@@ -1163,7 +1163,7 @@ static __exit void vecls_exit(void)
#endif
if (mode == 0) {
vecls_ntuple_res_clean();
- if (lo_rps_policy)
+ if (lo_rps_policy || rps_policy)
vecls_flow_res_clean();
} else {
vecls_flow_res_clean();
--
2.34.1
1
0
26 Jan '26
From: Kohei Enju <enjuk(a)amazon.com>
stable inclusion
from stable-v5.10.248
commit ceb8459df28d22c225a82d74c0f725f2a935d194
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13398
CVE: CVE-2025-71087
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 6daa2893f323981c7894c68440823326e93a7d61 ]
There are off-by-one bugs when configuring RSS hash key and lookup
table, causing out-of-bounds reads to memory [1] and out-of-bounds
writes to device registers.
Before commit 43a3d9ba34c9 ("i40evf: Allow PF driver to configure RSS"),
the loop upper bounds were:
i <= I40E_VFQF_{HKEY,HLUT}_MAX_INDEX
which is safe since the value is the last valid index.
That commit changed the bounds to:
i <= adapter->rss_{key,lut}_size / 4
where `rss_{key,lut}_size / 4` is the number of dwords, so the last
valid index is `(rss_{key,lut}_size / 4) - 1`. Therefore, using `<=`
accesses one element past the end.
Fix the issues by using `<` instead of `<=`, ensuring we do not exceed
the bounds.
[1] KASAN splat about rss_key_size off-by-one
BUG: KASAN: slab-out-of-bounds in iavf_config_rss+0x619/0x800
Read of size 4 at addr ffff888102c50134 by task kworker/u8:6/63
CPU: 0 UID: 0 PID: 63 Comm: kworker/u8:6 Not tainted 6.18.0-rc2-enjuk-tnguy-00378-g3005f5b77652-dirty #156 PREEMPT(voluntary)
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
Workqueue: iavf iavf_watchdog_task
Call Trace:
<TASK>
dump_stack_lvl+0x6f/0xb0
print_report+0x170/0x4f3
kasan_report+0xe1/0x1a0
iavf_config_rss+0x619/0x800
iavf_watchdog_task+0x2be7/0x3230
process_one_work+0x7fd/0x1420
worker_thread+0x4d1/0xd40
kthread+0x344/0x660
ret_from_fork+0x249/0x320
ret_from_fork_asm+0x1a/0x30
</TASK>
Allocated by task 63:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_kmalloc+0x7f/0x90
__kmalloc_noprof+0x246/0x6f0
iavf_watchdog_task+0x28fc/0x3230
process_one_work+0x7fd/0x1420
worker_thread+0x4d1/0xd40
kthread+0x344/0x660
ret_from_fork+0x249/0x320
ret_from_fork_asm+0x1a/0x30
The buggy address belongs to the object at ffff888102c50100
which belongs to the cache kmalloc-64 of size 64
The buggy address is located 0 bytes to the right of
allocated 52-byte region [ffff888102c50100, ffff888102c50134)
The buggy address belongs to the physical page:
page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x102c50
flags: 0x200000000000000(node=0|zone=2)
page_type: f5(slab)
raw: 0200000000000000 ffff8881000418c0 dead000000000122 0000000000000000
raw: 0000000000000000 0000000080200020 00000000f5000000 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff888102c50000: 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc
ffff888102c50080: 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc
>ffff888102c50100: 00 00 00 00 00 00 04 fc fc fc fc fc fc fc fc fc
^
ffff888102c50180: 00 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc
ffff888102c50200: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
Fixes: 43a3d9ba34c9 ("i40evf: Allow PF driver to configure RSS")
Signed-off-by: Kohei Enju <enjuk(a)amazon.com>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov(a)intel.com>
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel(a)intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski(a)intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Pu Lehui <pulehui(a)huawei.com>
---
drivers/net/ethernet/intel/iavf/iavf_main.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 65259722a572..4ed93c7f81d2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1262,11 +1262,11 @@ static int iavf_config_rss_reg(struct iavf_adapter *adapter)
u16 i;
dw = (u32 *)adapter->rss_key;
- for (i = 0; i <= adapter->rss_key_size / 4; i++)
+ for (i = 0; i < adapter->rss_key_size / 4; i++)
wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
dw = (u32 *)adapter->rss_lut;
- for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+ for (i = 0; i < adapter->rss_lut_size / 4; i++)
wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
iavf_flush(hw);
--
2.34.1
2
1
From: Guangshuo Li <lgs201920130244(a)gmail.com>
stable inclusion
from stable-v5.10.248
commit 4ccfa56f272241e8d8e2c38191fdbb03df489d80
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13404
CVE: CVE-2025-71093
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 9c72a5182ed92904d01057f208c390a303f00a0f upstream.
In e1000_tbi_should_accept() we read the last byte of the frame via
'data[length - 1]' to evaluate the TBI workaround. If the descriptor-
reported length is zero or larger than the actual RX buffer size, this
read goes out of bounds and can hit unrelated slab objects. The issue
is observed from the NAPI receive path (e1000_clean_rx_irq):
==================================================================
BUG: KASAN: slab-out-of-bounds in e1000_tbi_should_accept+0x610/0x790
Read of size 1 at addr ffff888014114e54 by task sshd/363
CPU: 0 PID: 363 Comm: sshd Not tainted 5.18.0-rc1 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x5a/0x74
print_address_description+0x7b/0x440
print_report+0x101/0x200
kasan_report+0xc1/0xf0
e1000_tbi_should_accept+0x610/0x790
e1000_clean_rx_irq+0xa8c/0x1110
e1000_clean+0xde2/0x3c10
__napi_poll+0x98/0x380
net_rx_action+0x491/0xa20
__do_softirq+0x2c9/0x61d
do_softirq+0xd1/0x120
</IRQ>
<TASK>
__local_bh_enable_ip+0xfe/0x130
ip_finish_output2+0x7d5/0xb00
__ip_queue_xmit+0xe24/0x1ab0
__tcp_transmit_skb+0x1bcb/0x3340
tcp_write_xmit+0x175d/0x6bd0
__tcp_push_pending_frames+0x7b/0x280
tcp_sendmsg_locked+0x2e4f/0x32d0
tcp_sendmsg+0x24/0x40
sock_write_iter+0x322/0x430
vfs_write+0x56c/0xa60
ksys_write+0xd1/0x190
do_syscall_64+0x43/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f511b476b10
Code: 73 01 c3 48 8b 0d 88 d3 2b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 44 00 00 83 3d f9 2b 2c 00 00 75 10 b8 01 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 8e 9b 01 00 48 89 04 24
RSP: 002b:00007ffc9211d4e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 0000000000004024 RCX: 00007f511b476b10
RDX: 0000000000004024 RSI: 0000559a9385962c RDI: 0000000000000003
RBP: 0000559a9383a400 R08: fffffffffffffff0 R09: 0000000000004f00
R10: 0000000000000070 R11: 0000000000000246 R12: 0000000000000000
R13: 00007ffc9211d57f R14: 0000559a9347bde7 R15: 0000000000000003
</TASK>
Allocated by task 1:
__kasan_krealloc+0x131/0x1c0
krealloc+0x90/0xc0
add_sysfs_param+0xcb/0x8a0
kernel_add_sysfs_param+0x81/0xd4
param_sysfs_builtin+0x138/0x1a6
param_sysfs_init+0x57/0x5b
do_one_initcall+0x104/0x250
do_initcall_level+0x102/0x132
do_initcalls+0x46/0x74
kernel_init_freeable+0x28f/0x393
kernel_init+0x14/0x1a0
ret_from_fork+0x22/0x30
The buggy address belongs to the object at ffff888014114000
which belongs to the cache kmalloc-2k of size 2048
The buggy address is located 1620 bytes to the right of
2048-byte region [ffff888014114000, ffff888014114800]
The buggy address belongs to the physical page:
page:ffffea0000504400 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x14110
head:ffffea0000504400 order:3 compound_mapcount:0 compound_pincount:0
flags: 0x100000000010200(slab|head|node=0|zone=1)
raw: 0100000000010200 0000000000000000 dead000000000001 ffff888013442000
raw: 0000000000000000 0000000000080008 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
==================================================================
This happens because the TBI check unconditionally dereferences the last
byte without validating the reported length first:
u8 last_byte = *(data + length - 1);
Fix by rejecting the frame early if the length is zero, or if it exceeds
adapter->rx_buffer_len. This preserves the TBI workaround semantics for
valid frames and prevents touching memory beyond the RX buffer.
Fixes: 2037110c96d5 ("e1000: move tbi workaround code into helper function")
Cc: stable(a)vger.kernel.org
Signed-off-by: Guangshuo Li <lgs201920130244(a)gmail.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov(a)intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Pu Lehui <pulehui(a)huawei.com>
---
drivers/net/ethernet/intel/e1000/e1000_main.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 5e28cf4fa2cd..0b7502902913 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4090,7 +4090,15 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
u32 length, const u8 *data)
{
struct e1000_hw *hw = &adapter->hw;
- u8 last_byte = *(data + length - 1);
+ u8 last_byte;
+
+ /* Guard against OOB on data[length - 1] */
+ if (unlikely(!length))
+ return false;
+ /* Upper bound: length must not exceed rx_buffer_len */
+ if (unlikely(length > adapter->rx_buffer_len))
+ return false;
+ last_byte = *(data + length - 1);
if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
unsigned long irq_flags;
--
2.34.1
2
1
[PATCH OLK-6.6] [Backport] ASoC: SOF: Intel: hda-dai: Ensure DAI widget is valid during params
by Lin Ruifeng 26 Jan '26
by Lin Ruifeng 26 Jan '26
26 Jan '26
From: Bard Liao <yung-chuan.liao(a)linux.intel.com>
stable inclusion
from stable-v6.12.14
commit e012a77e4d7632cf615ba9625b1600ed8985c3b5
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/30
CVE: CVE-2024-58012
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 569922b82ca660f8b24e705f6cf674e6b1f99cc7 ]
Each cpu DAI should associate with a widget. However, the topology might
not create the right number of DAI widgets for aggregated amps. And it
will cause NULL pointer deference.
Check that the DAI widget associated with the CPU DAI is valid to prevent
NULL pointer deference due to missing DAI widgets in topologies with
aggregated amps.
Signed-off-by: Bard Liao <yung-chuan.liao(a)linux.intel.com>
Reviewed-by: Ranjani Sridharan <ranjani.sridharan(a)linux.intel.com>
Reviewed-by: Péter Ujfalusi <peter.ujfalusi(a)linux.intel.com>
Reviewed-by: Liam Girdwood <liam.r.girdwood(a)intel.com>
Link: https://patch.msgid.link/20241203104853.56956-1-yung-chuan.liao@linux.intel…
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Conflicts:
sound/soc/sof/intel/hda-dai.c
sound/soc/sof/intel/hda.c
Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com>
---
sound/soc/sof/intel/hda-dai.c | 6 ++++++
sound/soc/sof/intel/hda.c | 5 +++++
2 files changed, 11 insertions(+)
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 19ec1a45737e..cebd9db94215 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -439,6 +439,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_sof_dev *sdev;
int ret;
+ if (!w) {
+ dev_err(cpu_dai->dev, "%s widget not found, check amp link num in the topology\n",
+ cpu_dai->name);
+ return -EINVAL;
+ }
+
ret = non_hda_dai_hw_params(substream, params, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "%s: non_hda_dai_hw_params failed %d\n", __func__, ret);
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 15e6779efaa3..aa3f9c961173 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -102,6 +102,11 @@ static int sdw_params_stream(struct device *dev,
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
struct snd_sof_dai_config_data data = { 0 };
+ if (!w) {
+ dev_err(dev, "%s widget not found, check amp link num in the topology\n",
+ d->name);
+ return -EINVAL;
+ }
data.dai_index = (params_data->link_id << 8) | d->id;
data.dai_data = params_data->alh_stream_id;
--
2.43.0
2
1
26 Jan '26
Commit 35c18f2933c5 ("Add a new optional ",cma" suffix to the
crashkernel= command line option") and commit ab475510e042 ("kdump:
implement reserve_crashkernel_cma") added CMA support for kdump
crashkernel reservation.
Crash kernel memory reservation wastes production resources if too
large, risks kdump failure if too small, and faces allocation difficulties
on fragmented systems due to contiguous block constraints. The new CMA-based
crashkernel reservation scheme splits the "large fixed reservation" into
a "small fixed region + large CMA dynamic region": the CMA memory is
available to userspace during normal operation to avoid waste, and is
reclaimed for kdump upon crash—saving memory while improving reliability.
So extend crashkernel CMA reservation support to arm64. The following changes
are made to enable CMA reservation:
- Parse and obtain the CMA reservation size along with other crashkernel
parameters.
- Call reserve_crashkernel_cma() to allocate the CMA region for kdump.
- Include the CMA-reserved ranges for kdump kernel to use.
- Exclude the CMA-reserved ranges from the crash kernel memory to
prevent them from being exported through /proc/vmcore.
Update kernel-parameters.txt to document CMA support for crashkernel on
arm64 architecture.
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
v2:
- Free cmem in prepare_elf_headers()
- Add the mtivation.
---
Documentation/admin-guide/kernel-parameters.txt | 2 +-
arch/arm64/kernel/machine_kexec_file.c | 15 ++++++++++++++-
arch/arm64/mm/init.c | 5 +++--
3 files changed, 18 insertions(+), 4 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1058f2a6d6a8..36bb642a7edd 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1119,7 +1119,7 @@ Kernel parameters
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
crashkernel=size[KMG],cma
- [KNL, X86, ppc] Reserve additional crash kernel memory from
+ [KNL, X86, ARM64, ppc] Reserve additional crash kernel memory from
CMA. This reservation is usable by the first system's
userspace memory and kernel movable allocations (memory
balloon, zswap). Pages allocated from this memory range
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 410060ebd86d..ef6ce9aaba80 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -48,7 +48,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
u64 i;
phys_addr_t start, end;
- nr_ranges = 2; /* for exclusion of crashkernel region */
+ nr_ranges = 2 + crashk_cma_cnt; /* for exclusion of crashkernel region */
for_each_mem_range(i, &start, &end)
nr_ranges++;
@@ -64,6 +64,12 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
cmem->nr_ranges++;
}
+ for (i = 0; i < crashk_cma_cnt; i++) {
+ cmem->ranges[cmem->nr_ranges].start = crashk_cma_ranges[i].start;
+ cmem->ranges[cmem->nr_ranges].end = crashk_cma_ranges[i].end;
+ cmem->nr_ranges++;
+ }
+
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
if (ret)
@@ -75,6 +81,13 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
goto out;
}
+ for (i = 0; i < crashk_cma_cnt; ++i) {
+ ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
+ crashk_cma_ranges[i].end);
+ if (ret)
+ goto out;
+ }
+
ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
out:
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 524d34a0e921..28165d94af08 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -96,8 +96,8 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
static void __init arch_reserve_crashkernel(void)
{
+ unsigned long long crash_base, crash_size, cma_size = 0;
unsigned long long low_size = 0;
- unsigned long long crash_base, crash_size;
bool high = false;
int ret;
@@ -106,11 +106,12 @@ static void __init arch_reserve_crashkernel(void)
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
- &low_size, NULL, &high);
+ &low_size, &cma_size, &high);
if (ret)
return;
reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
+ reserve_crashkernel_cma(cma_size);
}
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
--
2.34.1
1
0
[PATCH OLK-5.10] KVM: x86: use array_index_nospec with indices that come from guest
by Zhang Yuwei 24 Jan '26
by Zhang Yuwei 24 Jan '26
24 Jan '26
From: Thijs Raymakers <thijs(a)raymakers.nl>
stable inclusion
from stable-v5.10.242
commit 31a0ad2f60cb4816e06218b63e695eb72ce74974
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8676
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit c87bd4dd43a624109c3cc42d843138378a7f4548 upstream.
min and dest_id are guest-controlled indices. Using array_index_nospec()
after the bounds checks clamps these values to mitigate speculative execution
side-channels.
Signed-off-by: Thijs Raymakers <thijs(a)raymakers.nl>
Cc: stable(a)vger.kernel.org
Cc: Sean Christopherson <seanjc(a)google.com>
Cc: Paolo Bonzini <pbonzini(a)redhat.com>
Cc: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Fixes: 715062970f37 ("KVM: X86: Implement PV sched yield hypercall")
Fixes: bdf7ffc89922 ("KVM: LAPIC: Fix pv ipis out-of-bounds access")
Fixes: 4180bf1b655a ("KVM: X86: Implement "send IPI" hypercall")
Link: https://lore.kernel.org/r/20250804064405.4802-1-thijs@raymakers.nl
Signed-off-by: Sean Christopherson <seanjc(a)google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Liu Mingrui <liumingrui(a)huawei.com>
---
arch/x86/kvm/lapic.c | 2 ++
arch/x86/kvm/x86.c | 7 +++++--
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0c76022ea866..087e05214d8e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -610,6 +610,8 @@ static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
if (min > map->max_apic_id)
return 0;
+ min = array_index_nospec(min, map->max_apic_id + 1);
+
for_each_set_bit(i, ipi_bitmap,
min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
if (map->phys_map[min + i]) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6a7e91116690..fc745dd45732 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8797,8 +8797,11 @@ static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
- if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
- target = map->phys_map[dest_id]->vcpu;
+ if (likely(map) && dest_id <= map->max_apic_id) {
+ dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
+ if (map->phys_map[dest_id])
+ target = map->phys_map[dest_id]->vcpu;
+ }
rcu_read_unlock();
--
2.22.0
2
1
[PATCH OLK-5.10] KVM: x86: use array_index_nospec with indices that come from guest
by Zhang Yuwei 24 Jan '26
by Zhang Yuwei 24 Jan '26
24 Jan '26
From: Thijs Raymakers <thijs(a)raymakers.nl>
stable inclusion
from stable-v5.10.242
commit 31a0ad2f60cb4816e06218b63e695eb72ce74974
category: bugfix
bugzilla: 189268
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit c87bd4dd43a624109c3cc42d843138378a7f4548 upstream.
min and dest_id are guest-controlled indices. Using array_index_nospec()
after the bounds checks clamps these values to mitigate speculative execution
side-channels.
Signed-off-by: Thijs Raymakers <thijs(a)raymakers.nl>
Cc: stable(a)vger.kernel.org
Cc: Sean Christopherson <seanjc(a)google.com>
Cc: Paolo Bonzini <pbonzini(a)redhat.com>
Cc: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Fixes: 715062970f37 ("KVM: X86: Implement PV sched yield hypercall")
Fixes: bdf7ffc89922 ("KVM: LAPIC: Fix pv ipis out-of-bounds access")
Fixes: 4180bf1b655a ("KVM: X86: Implement "send IPI" hypercall")
Link: https://lore.kernel.org/r/20250804064405.4802-1-thijs@raymakers.nl
Signed-off-by: Sean Christopherson <seanjc(a)google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Liu Mingrui <liumingrui(a)huawei.com>
---
arch/x86/kvm/lapic.c | 2 ++
arch/x86/kvm/x86.c | 7 +++++--
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0c76022ea866..087e05214d8e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -610,6 +610,8 @@ static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
if (min > map->max_apic_id)
return 0;
+ min = array_index_nospec(min, map->max_apic_id + 1);
+
for_each_set_bit(i, ipi_bitmap,
min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
if (map->phys_map[min + i]) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6a7e91116690..fc745dd45732 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8797,8 +8797,11 @@ static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
- if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
- target = map->phys_map[dest_id]->vcpu;
+ if (likely(map) && dest_id <= map->max_apic_id) {
+ dest_id = array_index_nospec(dest_id, map->max_apic_id + 1);
+ if (map->phys_map[dest_id])
+ target = map->phys_map[dest_id]->vcpu;
+ }
rcu_read_unlock();
--
2.22.0
2
1
24 Jan '26
From: Ben Skeggs <bskeggs(a)redhat.com>
mainline inclusion
from mainline-v6.5-rc3
commit ea293f823a8805735d9e00124df81a8f448ed1ae
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13097
CVE: CVE-2023-54263
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Fixes OOPS on boards with ANX9805 DP encoders.
Cc: stable(a)vger.kernel.org # 6.4+
Signed-off-by: Ben Skeggs <bskeggs(a)redhat.com>
Reviewed-by: Karol Herbst <kherbst(a)redhat.com>
Signed-off-by: Karol Herbst <kherbst(a)redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230719044051.6975-3-skeggsb…
Conflicts:
drivers/gpu/drm/nouveau/dispnv50/disp.c
[commit 1b255f1ccc883 not merged]
Signed-off-by: Zhang Yuwei <zhangyuwei20(a)huawei.com>
---
drivers/gpu/drm/nouveau/dispnv50/disp.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 0ac120225b4d..670ebd47b595 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1965,7 +1965,10 @@ nv50_pior_help = {
static void
nv50_pior_destroy(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
drm_encoder_cleanup(encoder);
+ mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
kfree(encoder);
}
@@ -2010,6 +2013,8 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv_encoder->i2c = ddc;
nv_encoder->aux = aux;
+ mutex_init(&nv_encoder->dp.hpd_irq_lock);
+
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
--
2.22.0
2
1
[PATCH OLK-6.6] ksmbd: fix use-after-free in ksmbd_tree_connect_put under concurrency
by Li Lingfeng 23 Jan '26
by Li Lingfeng 23 Jan '26
23 Jan '26
From: Namjae Jeon <linkinjeon(a)kernel.org>
stable inclusion
from stable-v6.6.120
commit d64977495e44855f2b28d8ce56107c963a7a50e4
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13369
CVE: CVE-2025-68817
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit b39a1833cc4a2755b02603eec3a71a85e9dff926 ]
Under high concurrency, A tree-connection object (tcon) is freed on
a disconnect path while another path still holds a reference and later
executes *_put()/write on it.
Reported-by: Qianchang Zhao <pioooooooooip(a)gmail.com>
Reported-by: Zhitong Liu <liuzhitong1993(a)gmail.com>
Signed-off-by: Namjae Jeon <linkinjeon(a)kernel.org>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Li Lingfeng <lilingfeng3(a)huawei.com>
---
fs/smb/server/mgmt/tree_connect.c | 18 ++++--------------
fs/smb/server/mgmt/tree_connect.h | 1 -
fs/smb/server/smb2pdu.c | 3 ---
3 files changed, 4 insertions(+), 18 deletions(-)
diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
index 94a52a75014a..9bde1b58f9c4 100644
--- a/fs/smb/server/mgmt/tree_connect.c
+++ b/fs/smb/server/mgmt/tree_connect.c
@@ -77,7 +77,6 @@ ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
tree_conn->t_state = TREE_NEW;
status.tree_conn = tree_conn;
atomic_set(&tree_conn->refcount, 1);
- init_waitqueue_head(&tree_conn->refcount_q);
ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
GFP_KERNEL));
@@ -99,14 +98,8 @@ ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon)
{
- /*
- * Checking waitqueue to releasing tree connect on
- * tree disconnect. waitqueue_active is safe because it
- * uses atomic operation for condition.
- */
- if (!atomic_dec_return(&tcon->refcount) &&
- waitqueue_active(&tcon->refcount_q))
- wake_up(&tcon->refcount_q);
+ if (atomic_dec_and_test(&tcon->refcount))
+ kfree(tcon);
}
int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
@@ -118,14 +111,11 @@ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
xa_erase(&sess->tree_conns, tree_conn->id);
write_unlock(&sess->tree_conns_lock);
- if (!atomic_dec_and_test(&tree_conn->refcount))
- wait_event(tree_conn->refcount_q,
- atomic_read(&tree_conn->refcount) == 0);
-
ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
ksmbd_release_tree_conn_id(sess, tree_conn->id);
ksmbd_share_config_put(tree_conn->share_conf);
- kfree(tree_conn);
+ if (atomic_dec_and_test(&tree_conn->refcount))
+ kfree(tree_conn);
return ret;
}
diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
index a42cdd051041..f0023d86716f 100644
--- a/fs/smb/server/mgmt/tree_connect.h
+++ b/fs/smb/server/mgmt/tree_connect.h
@@ -33,7 +33,6 @@ struct ksmbd_tree_connect {
int maximal_access;
bool posix_extensions;
atomic_t refcount;
- wait_queue_head_t refcount_q;
unsigned int t_state;
};
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index bb1c7a399a18..5daa77c3652b 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -2166,7 +2166,6 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
goto err_out;
}
- WARN_ON_ONCE(atomic_dec_and_test(&tcon->refcount));
tcon->t_state = TREE_DISCONNECTED;
write_unlock(&sess->tree_conns_lock);
@@ -2176,8 +2175,6 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
goto err_out;
}
- work->tcon = NULL;
-
rsp->StructureSize = cpu_to_le16(4);
err = ksmbd_iov_pin_rsp(work, rsp,
sizeof(struct smb2_tree_disconnect_rsp));
--
2.52.0
2
1
[PATCH OLK-5.10] ntfs: set dummy blocksize to read boot_block when mounting
by Li Lingfeng 23 Jan '26
by Li Lingfeng 23 Jan '26
23 Jan '26
From: Pedro Demarchi Gomes <pedrodemargomes(a)gmail.com>
mainline inclusion
from mainline-v6.19-rc1
commit d1693a7d5a38acf6424235a6070bcf5b186a360d
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13378
CVE: CVE-2025-71067
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
When mounting, sb->s_blocksize is used to read the boot_block without
being defined or validated. Set a dummy blocksize before attempting to
read the boot_block.
The issue can be triggered with the following syz reproducer:
mkdirat(0xffffffffffffff9c, &(0x7f0000000080)='./file1\x00', 0x0)
r4 = openat$nullb(0xffffffffffffff9c, &(0x7f0000000040), 0x121403, 0x0)
ioctl$FS_IOC_SETFLAGS(r4, 0x40081271, &(0x7f0000000980)=0x4000)
mount(&(0x7f0000000140)=@nullb, &(0x7f0000000040)='./cgroup\x00',
&(0x7f0000000000)='ntfs3\x00', 0x2208004, 0x0)
syz_clone(0x88200200, 0x0, 0x0, 0x0, 0x0, 0x0)
Here, the ioctl sets the bdev block size to 16384. During mount,
get_tree_bdev_flags() calls sb_set_blocksize(sb, block_size(bdev)),
but since block_size(bdev) > PAGE_SIZE, sb_set_blocksize() leaves
sb->s_blocksize at zero.
Later, ntfs_init_from_boot() attempts to read the boot_block while
sb->s_blocksize is still zero, which triggers the bug.
Reported-by: syzbot+f4f84b57a01d6b8364ad(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=f4f84b57a01d6b8364ad
Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes(a)gmail.com>
[almaz.alexandrovich(a)paragon-software.com: changed comment style, added
return value handling]
Signed-off-by: Konstantin Komarov <almaz.alexandrovich(a)paragon-software.com>
Conflicts:
fs/ntfs3/super.c
[Commit 6c3684e70383 ("ntfs: set dummy blocksize to read boot_block when
mounting") add tag of read_boot in ntfs_init_from_boot().]
Signed-off-by: Li Lingfeng <lilingfeng3(a)huawei.com>
---
fs/ntfs3/super.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 1d6b45b70538..cbeb8156ff30 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -693,6 +693,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
+ /* Set dummy blocksize to read boot_block. */
+ if (!sb_min_blocksize(sb, PAGE_SIZE)) {
+ return -EINVAL;
+ }
+
bh = ntfs_bread(sb, 0);
if (!bh)
return -EIO;
--
2.52.0
2
1
[PATCH OLK-6.6] sched/fair: Track idle balance interval with idle_stamp in balance_fair
by Chen Jinghuang 23 Jan '26
by Chen Jinghuang 23 Jan '26
23 Jan '26
hulk inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8425
--------------------------------
Fix commit 70769fe636ef ("sched/fair: Hoist idle_stamp up from
idle_balance")forgot to add the complete idle_stamp start/end
tracking interval around sched_balance_newidle() in balance_fair.
Fixes: 70769fe636ef ("sched/fair: Hoist idle_stamp up from idle_balance")
Signed-off-by: Chen Jinghuang <chenjinghuang2(a)huawei.com>
---
kernel/sched/fair.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b21c1ba1ded1..e60f19cb0fee 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9467,10 +9467,19 @@ static void task_dead_fair(struct task_struct *p)
static int
balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
+ int new_tasks;
+
if (rq->nr_running)
return 1;
- return newidle_balance(rq, rf) != 0;
+ rq_idle_stamp_update(rq);
+
+ new_tasks = newidle_balance(rq, rf);
+
+ if (new_tasks)
+ rq_idle_stamp_clear(rq);
+
+ return new_tasks != 0;
}
#endif /* CONFIG_SMP */
--
2.34.1
2
1
From: Grzegorz Nitka <grzegorz.nitka(a)intel.com>
mainline inclusion
from mainline-v6.14-rc7
commit 23d97f18901ef5e4e264e3b1777fe65c760186b5
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12664
CVE: CVE-2025-21981
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
Fix aRFS (accelerated Receive Flow Steering) structures memory leak by
adding a checker to verify if aRFS memory is already allocated while
configuring VSI. aRFS objects are allocated in two cases:
- as part of VSI initialization (at probe), and
- as part of reset handling
However, VSI reconfiguration executed during reset involves memory
allocation one more time, without prior releasing already allocated
resources. This led to the memory leak with the following signature:
[root@os-delivery ~]# cat /sys/kernel/debug/kmemleak
unreferenced object 0xff3c1ca7252e6000 (size 8192):
comm "kworker/0:0", pid 8, jiffies 4296833052
hex dump (first 32 bytes):
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace (crc 0):
[<ffffffff991ec485>] __kmalloc_cache_noprof+0x275/0x340
[<ffffffffc0a6e06a>] ice_init_arfs+0x3a/0xe0 [ice]
[<ffffffffc09f1027>] ice_vsi_cfg_def+0x607/0x850 [ice]
[<ffffffffc09f244b>] ice_vsi_setup+0x5b/0x130 [ice]
[<ffffffffc09c2131>] ice_init+0x1c1/0x460 [ice]
[<ffffffffc09c64af>] ice_probe+0x2af/0x520 [ice]
[<ffffffff994fbcd3>] local_pci_probe+0x43/0xa0
[<ffffffff98f07103>] work_for_cpu_fn+0x13/0x20
[<ffffffff98f0b6d9>] process_one_work+0x179/0x390
[<ffffffff98f0c1e9>] worker_thread+0x239/0x340
[<ffffffff98f14abc>] kthread+0xcc/0x100
[<ffffffff98e45a6d>] ret_from_fork+0x2d/0x50
[<ffffffff98e083ba>] ret_from_fork_asm+0x1a/0x30
...
Fixes: 28bf26724fdb ("ice: Implement aRFS")
Reviewed-by: Michal Swiatkowski <michal.swiatkowski(a)linux.intel.com>
Signed-off-by: Grzegorz Nitka <grzegorz.nitka(a)intel.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Tested-by: Rinitha S <sx.rinitha(a)intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com>
Conflicts:
drivers/net/ethernet/intel/ice/ice_arfs.c
[context conflict]
Signed-off-by: Tirui Yin <yintirui(a)huawei.com>
Reviewed-by: Weilong Chen <chenweilong(a)huawei.com>
---
drivers/net/ethernet/intel/ice/ice_arfs.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index cca0e753f38f..d7e0116f6773 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -510,7 +510,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
--
2.43.0
2
1
23 Jan '26
Extend crashkernel CMA reservation support to arm64.
The following changes are made to enable CMA reservation on powerpc:
- Parse and obtain the CMA reservation size along with other crashkernel
parameters.
- Call reserve_crashkernel_cma() to allocate the CMA region for kdump.
- Include the CMA-reserved ranges for kdump kernel to use.
- Exclude the CMA-reserved ranges from the crash kernel memory to
prevent them from being exported through /proc/vmcore.
Update kernel-parameters.txt to document CMA support for crashkernel on
arm64 architecture.
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
Documentation/admin-guide/kernel-parameters.txt | 2 +-
arch/arm64/kernel/machine_kexec_file.c | 15 ++++++++++++++-
arch/arm64/mm/init.c | 5 +++--
3 files changed, 18 insertions(+), 4 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1058f2a6d6a8..36bb642a7edd 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1119,7 +1119,7 @@ Kernel parameters
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
crashkernel=size[KMG],cma
- [KNL, X86, ppc] Reserve additional crash kernel memory from
+ [KNL, X86, ARM64, ppc] Reserve additional crash kernel memory from
CMA. This reservation is usable by the first system's
userspace memory and kernel movable allocations (memory
balloon, zswap). Pages allocated from this memory range
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 410060ebd86d..0ee679167664 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -48,7 +48,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
u64 i;
phys_addr_t start, end;
- nr_ranges = 2; /* for exclusion of crashkernel region */
+ nr_ranges = 2 + crashk_cma_cnt; /* for exclusion of crashkernel region */
for_each_mem_range(i, &start, &end)
nr_ranges++;
@@ -64,6 +64,12 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
cmem->nr_ranges++;
}
+ for (i = 0; i < crashk_cma_cnt; i++) {
+ cmem->ranges[cmem->nr_ranges].start = crashk_cma_ranges[i].start;
+ cmem->ranges[cmem->nr_ranges].end = crashk_cma_ranges[i].end;
+ cmem->nr_ranges++;
+ }
+
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
if (ret)
@@ -75,6 +81,13 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
goto out;
}
+ for (i = 0; i < crashk_cma_cnt; ++i) {
+ ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
+ crashk_cma_ranges[i].end);
+ if (ret)
+ return ret;
+ }
+
ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
out:
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 524d34a0e921..28165d94af08 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -96,8 +96,8 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
static void __init arch_reserve_crashkernel(void)
{
+ unsigned long long crash_base, crash_size, cma_size = 0;
unsigned long long low_size = 0;
- unsigned long long crash_base, crash_size;
bool high = false;
int ret;
@@ -106,11 +106,12 @@ static void __init arch_reserve_crashkernel(void)
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
- &low_size, NULL, &high);
+ &low_size, &cma_size, &high);
if (ret)
return;
reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
+ reserve_crashkernel_cma(cma_size);
}
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
--
2.34.1
1
0
From: Grzegorz Nitka <grzegorz.nitka(a)intel.com>
mainline inclusion
from mainline-v6.14-rc7
commit 23d97f18901ef5e4e264e3b1777fe65c760186b5
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12664
CVE: CVE-2025-21981
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
Fix aRFS (accelerated Receive Flow Steering) structures memory leak by
adding a checker to verify if aRFS memory is already allocated while
configuring VSI. aRFS objects are allocated in two cases:
- as part of VSI initialization (at probe), and
- as part of reset handling
However, VSI reconfiguration executed during reset involves memory
allocation one more time, without prior releasing already allocated
resources. This led to the memory leak with the following signature:
[root@os-delivery ~]# cat /sys/kernel/debug/kmemleak
unreferenced object 0xff3c1ca7252e6000 (size 8192):
comm "kworker/0:0", pid 8, jiffies 4296833052
hex dump (first 32 bytes):
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace (crc 0):
[<ffffffff991ec485>] __kmalloc_cache_noprof+0x275/0x340
[<ffffffffc0a6e06a>] ice_init_arfs+0x3a/0xe0 [ice]
[<ffffffffc09f1027>] ice_vsi_cfg_def+0x607/0x850 [ice]
[<ffffffffc09f244b>] ice_vsi_setup+0x5b/0x130 [ice]
[<ffffffffc09c2131>] ice_init+0x1c1/0x460 [ice]
[<ffffffffc09c64af>] ice_probe+0x2af/0x520 [ice]
[<ffffffff994fbcd3>] local_pci_probe+0x43/0xa0
[<ffffffff98f07103>] work_for_cpu_fn+0x13/0x20
[<ffffffff98f0b6d9>] process_one_work+0x179/0x390
[<ffffffff98f0c1e9>] worker_thread+0x239/0x340
[<ffffffff98f14abc>] kthread+0xcc/0x100
[<ffffffff98e45a6d>] ret_from_fork+0x2d/0x50
[<ffffffff98e083ba>] ret_from_fork_asm+0x1a/0x30
...
Fixes: 28bf26724fdb ("ice: Implement aRFS")
Reviewed-by: Michal Swiatkowski <michal.swiatkowski(a)linux.intel.com>
Signed-off-by: Grzegorz Nitka <grzegorz.nitka(a)intel.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Tested-by: Rinitha S <sx.rinitha(a)intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com>
Conflicts:
drivers/net/ethernet/intel/ice/ice_arfs.c
[context conflict]
Signed-off-by: Tirui Yin <yintirui(a)huawei.com>
Reviewed-by: Weilong Chen <chenweilong(a)huawei.com>
---
drivers/net/ethernet/intel/ice/ice_arfs.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index cca0e753f38f..d7e0116f6773 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -510,7 +510,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
--
2.43.0
2
1
23 Jan '26
From: Prithvi Tambewagh <activprithvi(a)gmail.com>
mainline inclusion
from mainline-v6.19-rc1
commit 039bef30e320827bac8990c9f29d2a68cd8adb5f
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13326
CVE: CVE-2025-68771
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
syzbot reported a kernel BUG in ocfs2_find_victim_chain() because the
`cl_next_free_rec` field of the allocation chain list (next free slot in
the chain list) is 0, triggring the BUG_ON(!cl->cl_next_free_rec)
condition in ocfs2_find_victim_chain() and panicking the kernel.
To fix this, an if condition is introduced in ocfs2_claim_suballoc_bits(),
just before calling ocfs2_find_victim_chain(), the code block in it being
executed when either of the following conditions is true:
1. `cl_next_free_rec` is equal to 0, indicating that there are no free
chains in the allocation chain list
2. `cl_next_free_rec` is greater than `cl_count` (the total number of
chains in the allocation chain list)
Either of them being true is indicative of the fact that there are no
chains left for usage.
This is addressed using ocfs2_error(), which prints
the error log for debugging purposes, rather than panicking the kernel.
Link: https://lkml.kernel.org/r/20251201130711.143900-1-activprithvi@gmail.com
Signed-off-by: Prithvi Tambewagh <activprithvi(a)gmail.com>
Reported-by: syzbot+96d38c6e1655c1420a72(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=96d38c6e1655c1420a72
Tested-by: syzbot+96d38c6e1655c1420a72(a)syzkaller.appspotmail.com
Reviewed-by: Joseph Qi <joseph.qi(a)linux.alibaba.com>
Cc: Mark Fasheh <mark(a)fasheh.com>
Cc: Joel Becker <jlbec(a)evilplan.org>
Cc: Junxiao Bi <junxiao.bi(a)oracle.com>
Cc: Changwei Ge <gechangwei(a)live.cn>
Cc: Jun Piao <piaojun(a)huawei.com>
Cc: Heming Zhao <heming.zhao(a)suse.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Li Lingfeng <lilingfeng3(a)huawei.com>
---
fs/ocfs2/suballoc.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 4f48003e4327..984bf3d24c23 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1925,6 +1925,16 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
}
cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
+ if (!le16_to_cpu(cl->cl_next_free_rec) ||
+ le16_to_cpu(cl->cl_next_free_rec) > le16_to_cpu(cl->cl_count)) {
+ status = ocfs2_error(ac->ac_inode->i_sb,
+ "Chain allocator dinode %llu has invalid next "
+ "free chain record %u, but only %u total\n",
+ (unsigned long long)le64_to_cpu(fe->i_blkno),
+ le16_to_cpu(cl->cl_next_free_rec),
+ le16_to_cpu(cl->cl_count));
+ goto bail;
+ }
victim = ocfs2_find_victim_chain(cl);
ac->ac_chain = victim;
--
2.52.0
2
1
[PATCH v4 openEuler-25.03 0/1] NUMA ro-data replication for userspace applications
by Nikita Panov 22 Jan '26
by Nikita Panov 22 Jan '26
22 Jan '26
This patchset implements support of userspace translation tables and
private read-only data replication for AArch64 and is going to
improve latency and memory bandwidth by reducing cross-NUMA memory accesses.
openEuler 25.03 is used as a baseline.
Current implementation supports next functionality:
1. Per-NUMA node replication of userspace translation tables and private read-only data.
We replicate only __private read-only__ data to avoid dealing with
replicas coherence and consistency support. Translation tables, in turn, are able to be
replicated for any kind of underlying data.
2. Ability to enable userspace replication for a certain process via procfs or
for a group of processes via memory cgroup.
3. 4K and 64K pages are supported.
4. Replicated data pages can't be a ksm, migration or swap/reclaim candidates by design.
But for other pages these work as well with replicated translation tables support.
Once the user replication was enabled for a process via either procfs or memory cgroup,
all it's existing private read-only data will be immediately replicated
with translation tables for them. Later, as the process running, __any__ page fault occured
will cause replicating of translation tables related to the faulted address.
Also there is a mechanism implemented on the top of numa-balancer that
will replicate private read-only pages on NUMA faults, as the process running
(numa balancer should be enabled for the mechanism to work).
Known problems:
1. Current implementation doesn't support huge pages,
so you have to build the kernel with huge pages disabled for user replication to work.
Huge pages support will be added in the nearest future.
2. mremap syscall doesn't work with replicated memory yet.
3. page_idle, uprobes and userfaultfd support replicated translation tables,
but not replicated data. Be responsible using these features with userspace replication enabled.
4. When replicating translation tables during page faults,
there should be enough space on __each__ NUMA node for table allocations.
Otherwise it will cause OOM-killer.
Despite the problems above, they are mostly not related to workloads assumed
to benefit from user replication feature,
and such workloads will work properly with the feature enabled.
Nikita Panov (1):
mm: Support NUMA-aware replication of read-only data and translation
tables of user space applications
arch/arm64/include/asm/numa_replication.h | 3 +
arch/arm64/mm/init.c | 2 +-
arch/arm64/mm/pgd.c | 13 +-
fs/exec.c | 18 +
fs/proc/base.c | 76 +
fs/proc/task_mmu.c | 112 +-
include/asm-generic/pgalloc.h | 19 +-
include/asm-generic/tlb.h | 22 +
include/linux/cgroup.h | 1 +
include/linux/gfp_types.h | 12 +-
include/linux/memcontrol.h | 4 +
include/linux/mm.h | 77 +-
include/linux/mm_inline.h | 5 +
include/linux/mm_types.h | 52 +-
include/linux/numa_kernel_replication.h | 232 ++-
include/linux/numa_user_replication.h | 760 ++++++++++
include/linux/page-flags.h | 18 +-
include/trace/events/mmflags.h | 10 +-
include/uapi/asm-generic/mman-common.h | 3 +
kernel/cgroup/cgroup.c | 2 +-
kernel/events/uprobes.c | 5 +-
kernel/fork.c | 39 +
kernel/sched/fair.c | 8 +-
mm/Kconfig | 13 +
mm/Makefile | 1 +
mm/gup.c | 3 +-
mm/ksm.c | 15 +-
mm/madvise.c | 19 +-
mm/memcontrol.c | 137 +-
mm/memory.c | 548 +++++--
mm/mempolicy.c | 5 +
mm/migrate.c | 11 +-
mm/migrate_device.c | 17 +-
mm/mlock.c | 32 +
mm/mmap.c | 32 +
mm/mmu_gather.c | 55 +-
mm/mprotect.c | 411 +++---
mm/mremap.c | 97 +-
mm/numa_kernel_replication.c | 5 +-
mm/numa_user_replication.c | 1603 +++++++++++++++++++++
mm/page_alloc.c | 8 +-
mm/page_idle.c | 3 +-
mm/page_vma_mapped.c | 3 +-
mm/rmap.c | 41 +-
mm/swap.c | 7 +-
mm/swapfile.c | 3 +-
mm/userfaultfd.c | 7 +-
mm/userswap.c | 11 +-
48 files changed, 4145 insertions(+), 435 deletions(-)
create mode 100644 include/linux/numa_user_replication.h
create mode 100644 mm/numa_user_replication.c
--
2.34.1
2
2
[PATCH v3 openEuler-25.03 0/1] NUMA ro-data replication for userspace applications
by Nikita Panov 22 Jan '26
by Nikita Panov 22 Jan '26
22 Jan '26
This patchset implements support of userspace translation tables and
private read-only data replication for AArch64 and is going to
improve latency and memory bandwidth by reducing cross-NUMA memory accesses.
openEuler 25.03 is used as a baseline.
Current implementation supports next functionality:
1. Per-NUMA node replication of userspace translation tables and private read-only data.
We replicate only __private read-only__ data to avoid dealing with
replicas coherence and consistency support. Translation tables, in turn, are able to be
replicated for any kind of underlying data.
2. Ability to enable userspace replication for a certain process via procfs or
for a group of processes via memory cgroup.
3. 4K and 64K pages are supported.
4. Replicated data pages can't be a ksm, migration or swap/reclaim candidates by design.
But for other pages these work as well with replicated translation tables support.
Once the user replication was enabled for a process via either procfs or memory cgroup,
all it's existing private read-only data will be immediately replicated
with translation tables for them. Later, as the process running, __any__ page fault occured
will cause replicating of translation tables related to the faulted address.
Also there is a mechanism implemented on the top of numa-balancer that
will replicate private read-only pages on NUMA faults, as the process running
(numa balancer should be enabled for the mechanism to work).
Known problems:
1. Current implementation doesn't support huge pages,
so you have to build the kernel with huge pages disabled for user replication to work.
Huge pages support will be added in the nearest future.
2. mremap syscall doesn't work with replicated memory yet.
3. page_idle, uprobes and userfaultfd support replicated translation tables,
but not replicated data. Be responsible using these features with userspace replication enabled.
4. When replicating translation tables during page faults,
there should be enough space on __each__ NUMA node for table allocations.
Otherwise it will cause OOM-killer.
Despite the problems above, they are mostly not related to workloads assumed
to benefit from user replication feature,
and such workloads will work properly with the feature enabled.
Nikita Panov (1):
mm: Support NUMA-aware replication of read-only data and translation
tables of user space applications
arch/arm64/include/asm/numa_replication.h | 3 +
arch/arm64/mm/init.c | 2 +-
arch/arm64/mm/pgd.c | 13 +-
fs/exec.c | 18 +
fs/proc/base.c | 76 +
fs/proc/task_mmu.c | 112 +-
include/asm-generic/pgalloc.h | 19 +-
include/asm-generic/tlb.h | 22 +
include/linux/cgroup.h | 1 +
include/linux/gfp_types.h | 12 +-
include/linux/memcontrol.h | 4 +
include/linux/mm.h | 77 +-
include/linux/mm_inline.h | 5 +
include/linux/mm_types.h | 52 +-
include/linux/numa_kernel_replication.h | 232 ++-
include/linux/numa_user_replication.h | 760 ++++++++++
include/linux/page-flags.h | 18 +-
include/trace/events/mmflags.h | 10 +-
include/uapi/asm-generic/mman-common.h | 3 +
kernel/cgroup/cgroup.c | 2 +-
kernel/events/uprobes.c | 5 +-
kernel/fork.c | 39 +
kernel/sched/fair.c | 8 +-
mm/Kconfig | 13 +
mm/Makefile | 1 +
mm/gup.c | 3 +-
mm/ksm.c | 15 +-
mm/madvise.c | 19 +-
mm/memcontrol.c | 137 +-
mm/memory.c | 548 +++++--
mm/mempolicy.c | 5 +
mm/migrate.c | 11 +-
mm/migrate_device.c | 17 +-
mm/mlock.c | 32 +
mm/mmap.c | 32 +
mm/mmu_gather.c | 55 +-
mm/mprotect.c | 411 +++---
mm/mremap.c | 97 +-
mm/numa_kernel_replication.c | 5 +-
mm/numa_user_replication.c | 1603 +++++++++++++++++++++
mm/page_alloc.c | 8 +-
mm/page_idle.c | 3 +-
mm/page_vma_mapped.c | 3 +-
mm/rmap.c | 41 +-
mm/swap.c | 7 +-
mm/swapfile.c | 3 +-
mm/userfaultfd.c | 7 +-
mm/userswap.c | 11 +-
48 files changed, 4145 insertions(+), 435 deletions(-)
create mode 100644 include/linux/numa_user_replication.h
create mode 100644 mm/numa_user_replication.c
--
2.34.1
2
2
get_timer_this_cpu_base() and get_timer_cpu_base() have almost identical
implementations, so replace the former with the latter.
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
kernel/time/timer.c | 17 ++---------------
1 file changed, 2 insertions(+), 15 deletions(-)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 1f2364126894..da1295646d39 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -925,20 +925,6 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
return per_cpu_ptr(&timer_bases[index], cpu);
}
-static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
-{
- int index = tflags & TIMER_PINNED ? BASE_LOCAL : BASE_GLOBAL;
-
- /*
- * If the timer is deferrable and NO_HZ_COMMON is set then we need
- * to use the deferrable base.
- */
- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
- index = BASE_DEF;
-
- return this_cpu_ptr(&timer_bases[index]);
-}
-
static inline struct timer_base *get_timer_base(u32 tflags)
{
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
@@ -1019,6 +1005,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
{
unsigned long clk = 0, flags, bucket_expiry;
struct timer_base *base, *new_base;
+ int cpu = smp_processor_id();
unsigned int idx = UINT_MAX;
int ret = 0;
@@ -1098,7 +1085,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
if (!ret && (options & MOD_TIMER_PENDING_ONLY))
goto out_unlock;
- new_base = get_timer_this_cpu_base(timer->flags);
+ new_base = get_timer_cpu_base(timer->flags, cpu);
if (base != new_base) {
/*
--
2.34.1
1
0
[PATCH v2 openEuler-25.03 0/1] NUMA ro-data replication for userspace applications
by Nikita Panov 22 Jan '26
by Nikita Panov 22 Jan '26
22 Jan '26
This patchset implements support of userspace translation tables and
private read-only data replication for AArch64 and is going to
improve latency and memory bandwidth by reducing cross-NUMA memory accesses.
openEuler 25.03 is used as a baseline.
Current implementation supports next functionality:
1. Per-NUMA node replication of userspace translation tables and private read-only data.
We replicate only __private read-only__ data to avoid dealing with
replicas coherence and consistency support. Translation tables, in turn, are able to be
replicated for any kind of underlying data.
2. Ability to enable userspace replication for a certain process via procfs or
for a group of processes via memory cgroup.
3. 4K and 64K pages are supported.
4. Replicated data pages can't be a ksm, migration or swap/reclaim candidates by design.
But for other pages these work as well with replicated translation tables support.
Once the user replication was enabled for a process via either procfs or memory cgroup,
all it's existing private read-only data will be immediately replicated
with translation tables for them. Later, as the process running, __any__ page fault occured
will cause replicating of translation tables related to the faulted address.
Also there is a mechanism implemented on the top of numa-balancer that
will replicate private read-only pages on NUMA faults, as the process running
(numa balancer should be enabled for the mechanism to work).
Known problems:
1. Current implementation doesn't support huge pages,
so you have to build the kernel with huge pages disabled for user replication to work.
Huge pages support will be added in the nearest future.
2. mremap syscall doesn't work with replicated memory yet.
3. page_idle, uprobes and userfaultfd support replicated translation tables,
but not replicated data. Be responsible using these features with userspace replication enabled.
4. When replicating translation tables during page faults,
there should be enough space on __each__ NUMA node for table allocations.
Otherwise it will cause OOM-killer.
Despite the problems above, they are mostly not related to workloads assumed
to benefit from user replication feature,
and such workloads will work properly with the feature enabled.
Nikita Panov (1):
mm: Support NUMA-aware replication of read-only data and translation
tables of user space applications
arch/arm64/include/asm/numa_replication.h | 3 +
arch/arm64/mm/init.c | 2 +-
arch/arm64/mm/pgd.c | 13 +-
fs/exec.c | 18 +
fs/proc/base.c | 76 +
fs/proc/task_mmu.c | 112 +-
include/asm-generic/pgalloc.h | 19 +-
include/asm-generic/tlb.h | 22 +
include/linux/cgroup.h | 1 +
include/linux/gfp_types.h | 12 +-
include/linux/memcontrol.h | 4 +
include/linux/mm.h | 77 +-
include/linux/mm_inline.h | 5 +
include/linux/mm_types.h | 52 +-
include/linux/numa_kernel_replication.h | 232 ++-
include/linux/numa_user_replication.h | 760 ++++++++++
include/linux/page-flags.h | 18 +-
include/trace/events/mmflags.h | 11 +-
include/uapi/asm-generic/mman-common.h | 3 +
kernel/cgroup/cgroup.c | 2 +-
kernel/events/uprobes.c | 5 +-
kernel/fork.c | 39 +
kernel/sched/fair.c | 8 +-
mm/Kconfig | 13 +
mm/Makefile | 1 +
mm/gup.c | 3 +-
mm/ksm.c | 15 +-
mm/madvise.c | 19 +-
mm/memcontrol.c | 137 +-
mm/memory.c | 548 +++++--
mm/mempolicy.c | 5 +
mm/migrate.c | 11 +-
mm/migrate_device.c | 17 +-
mm/mlock.c | 32 +
mm/mmap.c | 32 +
mm/mmu_gather.c | 55 +-
mm/mprotect.c | 411 +++---
mm/mremap.c | 97 +-
mm/numa_kernel_replication.c | 5 +-
mm/numa_user_replication.c | 1603 +++++++++++++++++++++
mm/page_alloc.c | 8 +-
mm/page_idle.c | 3 +-
mm/page_vma_mapped.c | 3 +-
mm/rmap.c | 41 +-
mm/swap.c | 7 +-
mm/swapfile.c | 3 +-
mm/userfaultfd.c | 7 +-
mm/userswap.c | 11 +-
48 files changed, 4145 insertions(+), 436 deletions(-)
create mode 100644 include/linux/numa_user_replication.h
create mode 100644 mm/numa_user_replication.c
--
2.34.1
2
2
LeapIO inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8338
------------------------------------------
The LeapRAID driver provides support for LeapRAID PCIe RAID controllers,
enabling communication between the host operating system, firmware, and
hardware for efficient storage management.
The main source files are organized as follows:
leapraid_os.c:
Implements the scsi_host_template functions, PCIe device probing, and
initialization routines, integrating the driver with the Linux SCSI
subsystem.
leapraid_func.c:
Provides the core functional routines that handle low-level interactions
with the controller firmware and hardware, including interrupt handling,
topology management, and reset sequence processing, and other related
operations.
leapraid_app.c:
Implements the ioctl interface, providing user-space tools access to device
management and diagnostic operations.
leapraid_transport.c:
Interacts with the Linux SCSI transport layer to add SAS phys and ports.
leapraid_func.h:
Declares common data structures, constants, and function prototypes shared
across the driver.
leapraid.h:
Provides global constants, register mappings, and interface definitions
that facilitate communication between the driver and the controller
firmware.
The leapraid_probe function is called when the driver detects a supported
LeapRAID PCIe device. It allocates and initializes the Scsi_Host structure,
configures hardware and firmware interfaces, and registers the host adapter
with the Linux SCSI mid-layer.
After registration, the driver invokes scsi_scan_host() to initiate device
discovery. The firmware then reports discovered logical and physical
devices to the host through interrupt-driven events and synchronizes their
operational states.
leapraid_adapter is the core data structure that encapsulates all resources
and runtime state information maintained during driver operation, described
as follows:
/**
* struct leapraid_adapter - Main LeapRaid adapter structure
* @list: List head for adapter management
* @shost: SCSI host structure
* @pdev: PCI device structure
* @iomem_base: I/O memory mapped base address
* @rep_msg_host_idx: Host index for reply messages
* @mask_int: Interrupt masking flag
* @timestamp_sync_cnt: Timestamp synchronization counter
* @adapter_attr: Adapter attributes
* @mem_desc: Memory descriptor
* @driver_cmds: Driver commands
* @dynamic_task_desc: Dynamic task descriptor
* @fw_evt_s: Firmware event structure
* @notification_desc: Notification descriptor
* @reset_desc: Reset descriptor
* @scan_dev_desc: Device scan descriptor
* @access_ctrl: Access control
* @fw_log_desc: Firmware log descriptor
* @dev_topo: Device topology
* @boot_devs: Boot devices
* @smart_poll_desc: SMART polling descriptor
*/
struct leapraid_adapter {
struct list_head list;
struct Scsi_Host *shost;
struct pci_dev *pdev;
struct leapraid_reg_base __iomem *iomem_base;
u32 rep_msg_host_idx;
bool mask_int;
u32 timestamp_sync_cnt;
struct leapraid_adapter_attr adapter_attr;
struct leapraid_mem_desc mem_desc;
struct leapraid_driver_cmds driver_cmds;
struct leapraid_dynamic_task_desc dynamic_task_desc;
struct leapraid_fw_evt_struct fw_evt_s;
struct leapraid_notification_desc notification_desc;
struct leapraid_reset_desc reset_desc;
struct leapraid_scan_dev_desc scan_dev_desc;
struct leapraid_access_ctrl access_ctrl;
struct leapraid_fw_log_desc fw_log_desc;
struct leapraid_dev_topo dev_topo;
struct leapraid_boot_devs boot_devs;
struct leapraid_smart_poll_desc smart_poll_desc;
};
Signed-off-by: haodongdong <doubled(a)leap-io.com>
---
arch/arm64/configs/openeuler_defconfig | 2 +-
arch/x86/configs/openeuler_defconfig | 2 +-
drivers/scsi/Kconfig | 2 +-
drivers/scsi/Makefile | 2 +-
drivers/scsi/leapioraid/Kconfig | 13 -
drivers/scsi/leapioraid/Makefile | 9 -
drivers/scsi/leapioraid/leapioraid.h | 2026 ----
drivers/scsi/leapioraid/leapioraid_app.c | 2253 ----
drivers/scsi/leapioraid/leapioraid_func.c | 7056 ------------
drivers/scsi/leapioraid/leapioraid_func.h | 1262 ---
drivers/scsi/leapioraid/leapioraid_os.c | 9825 -----------------
.../scsi/leapioraid/leapioraid_transport.c | 1926 ----
drivers/scsi/leapraid/Kconfig | 14 +
drivers/scsi/leapraid/Makefile | 10 +
drivers/scsi/leapraid/leapraid.h | 2070 ++++
drivers/scsi/leapraid/leapraid_app.c | 675 ++
drivers/scsi/leapraid/leapraid_func.c | 8264 ++++++++++++++
drivers/scsi/leapraid/leapraid_func.h | 1425 +++
drivers/scsi/leapraid/leapraid_os.c | 2365 ++++
drivers/scsi/leapraid/leapraid_transport.c | 1256 +++
20 files changed, 16083 insertions(+), 24374 deletions(-)
delete mode 100644 drivers/scsi/leapioraid/Kconfig
delete mode 100644 drivers/scsi/leapioraid/Makefile
delete mode 100644 drivers/scsi/leapioraid/leapioraid.h
delete mode 100644 drivers/scsi/leapioraid/leapioraid_app.c
delete mode 100644 drivers/scsi/leapioraid/leapioraid_func.c
delete mode 100644 drivers/scsi/leapioraid/leapioraid_func.h
delete mode 100644 drivers/scsi/leapioraid/leapioraid_os.c
delete mode 100644 drivers/scsi/leapioraid/leapioraid_transport.c
create mode 100644 drivers/scsi/leapraid/Kconfig
create mode 100644 drivers/scsi/leapraid/Makefile
create mode 100644 drivers/scsi/leapraid/leapraid.h
create mode 100644 drivers/scsi/leapraid/leapraid_app.c
create mode 100644 drivers/scsi/leapraid/leapraid_func.c
create mode 100644 drivers/scsi/leapraid/leapraid_func.h
create mode 100644 drivers/scsi/leapraid/leapraid_os.c
create mode 100644 drivers/scsi/leapraid/leapraid_transport.c
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 425616aa8422..7c5144c70018 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2628,7 +2628,7 @@ CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_PS3STOR=m
# CONFIG_SCSI_MPI3MR is not set
CONFIG_SCSI_3SNIC_SSSRAID=m
-CONFIG_SCSI_LEAPIORAID=m
+CONFIG_SCSI_LEAPRAID=m
CONFIG_SCSI_SMARTPQI=m
CONFIG_SCSI_HISI_RAID=m
# CONFIG_SCSI_HPTIOP is not set
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index e6c7a62045d9..7047dc6c010e 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2587,7 +2587,7 @@ CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_PS3STOR=m
# CONFIG_SCSI_MPI3MR is not set
CONFIG_SCSI_3SNIC_SSSRAID=m
-CONFIG_SCSI_LEAPIORAID=m
+CONFIG_SCSI_LEAPRAID=m
CONFIG_SCSI_SMARTPQI=m
CONFIG_SCSI_HISI_RAID=m
# CONFIG_SCSI_HPTIOP is not set
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 6e2f48159a44..2aec402c69c7 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -491,7 +491,7 @@ source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/linkdata/Kconfig"
source "drivers/scsi/mpi3mr/Kconfig"
source "drivers/scsi/sssraid/Kconfig"
-source "drivers/scsi/leapioraid/Kconfig"
+source "drivers/scsi/leapraid/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/hisi_raid/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 26fcfc54dc7b..5f75bf1ad711 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
obj-$(CONFIG_SCSI_HPSA) += hpsa.o
+obj-$(CONFIG_SCSI_LEAPRAID) += leapraid/
obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi/
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
@@ -103,7 +104,6 @@ obj-$(CONFIG_SCSI_PS3STOR) += linkdata/
obj-$(CONFIG_SCSI_HISI_RAID) += hisi_raid/
obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/
obj-$(CONFIG_SCSI_3SNIC_SSSRAID) += sssraid/
-obj-$(CONFIG_SCSI_LEAPIORAID) += leapioraid/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
obj-$(CONFIG_SCSI_INITIO) += initio.o
diff --git a/drivers/scsi/leapioraid/Kconfig b/drivers/scsi/leapioraid/Kconfig
deleted file mode 100644
index a309d530284b..000000000000
--- a/drivers/scsi/leapioraid/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Kernel configuration file for the LEAPIORAID
-#
-
-config SCSI_LEAPIORAID
- tristate "LeapIO RAID Adapter"
- depends on PCI && SCSI
- select SCSI_SAS_ATTRS
- select RAID_ATTRS
- select IRQ_POLL
- help
- This driver supports LEAPIO RAID controller, which supports PCI Express Gen4 interface
- and supports SAS/SATA HDD/SSD.
diff --git a/drivers/scsi/leapioraid/Makefile b/drivers/scsi/leapioraid/Makefile
deleted file mode 100644
index 81f286f44bd0..000000000000
--- a/drivers/scsi/leapioraid/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for the LEAPIORAID drivers.
-#
-
-obj-$(CONFIG_SCSI_LEAPIORAID) += leapioraid.o
-leapioraid-objs += leapioraid_func.o \
- leapioraid_os.o \
- leapioraid_transport.o \
- leapioraid_app.o
\ No newline at end of file
diff --git a/drivers/scsi/leapioraid/leapioraid.h b/drivers/scsi/leapioraid/leapioraid.h
deleted file mode 100644
index 30908fffe43b..000000000000
--- a/drivers/scsi/leapioraid/leapioraid.h
+++ /dev/null
@@ -1,2026 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *
- * Copyright 2000-2020 Broadcom Inc. All rights reserved.
- *
- * Copyright (C) 2024 LeapIO Tech Inc.
- *
- */
-
-#ifndef LEAPIORAID_H
-#define LEAPIORAID_H
-
-typedef u8 U8;
-typedef __le16 U16;
-typedef __le32 U32;
-typedef __le64 U64 __aligned(4);
-
-#define LEAPIORAID_IOC_STATE_RESET (0x00000000)
-#define LEAPIORAID_IOC_STATE_READY (0x10000000)
-#define LEAPIORAID_IOC_STATE_OPERATIONAL (0x20000000)
-#define LEAPIORAID_IOC_STATE_FAULT (0x40000000)
-#define LEAPIORAID_IOC_STATE_COREDUMP (0x50000000)
-#define LEAPIORAID_IOC_STATE_MASK (0xF0000000)
-
-struct LeapioraidSysInterfaceRegs_t {
- U32 Doorbell;
- U32 WriteSequence;
- U32 HostDiagnostic;
- U32 Reserved1;
- U32 DiagRWData;
- U32 DiagRWAddressLow;
- U32 DiagRWAddressHigh;
- U32 Reserved2[5];
- U32 HostInterruptStatus;
- U32 HostInterruptMask;
- U32 DCRData;
- U32 DCRAddress;
- U32 Reserved3[2];
- U32 ReplyFreeHostIndex;
- U32 Reserved4[8];
- U32 ReplyPostHostIndex;
- U32 Reserved5;
- U32 HCBSize;
- U32 HCBAddressLow;
- U32 HCBAddressHigh;
- U32 Reserved6[12];
- U32 Scratchpad[4];
- U32 RequestDescriptorPostLow;
- U32 RequestDescriptorPostHigh;
- U32 AtomicRequestDescriptorPost;
- U32 IocLogBufPosition;
- U32 HostLogBufPosition;
- U32 Reserved7[11];
-};
-
-#define LEAPIORAID_DOORBELL_USED (0x08000000)
-#define LEAPIORAID_DOORBELL_DATA_MASK (0x0000FFFF)
-#define LEAPIORAID_DOORBELL_FUNCTION_SHIFT (24)
-#define LEAPIORAID_DOORBELL_ADD_DWORDS_SHIFT (16)
-
-#define LEAPIORAID_DIAG_RESET_ADAPTER (0x00000004)
-
-#define LEAPIORAID_HIS_SYS2IOC_DB_STATUS (0x80000000)
-#define LEAPIORAID_HIS_IOC2SYS_DB_STATUS (0x00000001)
-
-#define LEAPIORAID_RPHI_MSIX_INDEX_SHIFT (24)
-
-#define LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
-#define LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
-#define LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
-#define LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
-
-struct LEAPIORAID_DEFAULT_REQUEST_DESCRIPTOR {
- U8 RequestFlags;
- U8 MSIxIndex;
- U16 SMID;
- U16 LMID;
- U16 DescriptorTypeDependent;
-};
-
-struct LEAPIORAID_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
- U8 RequestFlags;
- U8 MSIxIndex;
- U16 SMID;
- U16 LMID;
- U16 Reserved1;
-};
-
-struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR {
- U8 RequestFlags;
- U8 MSIxIndex;
- U16 SMID;
- U16 LMID;
- U16 DevHandle;
-};
-
-typedef
-struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR
- LEAPIORAID_FP_SCSI_IO_REQUEST_DESCRIPTOR;
-
-union LeapioraidReqDescUnion_t {
- struct LEAPIORAID_DEFAULT_REQUEST_DESCRIPTOR Default;
- struct LEAPIORAID_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
- struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
- LEAPIORAID_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
- U64 Words;
-};
-
-struct LeapioraidAtomicReqDesc_t {
- U8 RequestFlags;
- U8 MSIxIndex;
- U16 SMID;
-};
-
-#define LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
-#define LEAPIORAID_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
-#define LEAPIORAID_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
-#define LEAPIORAID_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
-#define LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
-
-struct LeapioraidDefaultRepDesc_t {
- U8 ReplyFlags;
- U8 MSIxIndex;
- U16 DescriptorTypeDependent1;
- U32 DescriptorTypeDependent2;
-};
-
-struct LEAPIORAID_ADDRESS_REPLY_DESCRIPTOR {
- U8 ReplyFlags;
- U8 MSIxIndex;
- U16 SMID;
- U32 ReplyFrameAddress;
-};
-
-struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
- U8 ReplyFlags;
- U8 MSIxIndex;
- U16 SMID;
- U16 TaskTag;
- U16 Reserved1;
-};
-
-typedef
-struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
- LEAPIORAID_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR;
-
-union LeapioraidRepDescUnion_t {
- struct LeapioraidDefaultRepDesc_t Default;
- struct LEAPIORAID_ADDRESS_REPLY_DESCRIPTOR AddressReply;
- struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
- LEAPIORAID_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
- U64 Words;
-};
-
-#define LEAPIORAID_FUNC_SCSI_IO_REQUEST (0x00)
-#define LEAPIORAID_FUNC_SCSI_TASK_MGMT (0x01)
-#define LEAPIORAID_FUNC_IOC_INIT (0x02)
-#define LEAPIORAID_FUNC_IOC_FACTS (0x03)
-#define LEAPIORAID_FUNC_CONFIG (0x04)
-#define LEAPIORAID_FUNC_PORT_FACTS (0x05)
-#define LEAPIORAID_FUNC_PORT_ENABLE (0x06)
-#define LEAPIORAID_FUNC_EVENT_NOTIFICATION (0x07)
-#define LEAPIORAID_FUNC_EVENT_ACK (0x08)
-#define LEAPIORAID_FUNC_FW_DOWNLOAD (0x09)
-#define LEAPIORAID_FUNC_FW_UPLOAD (0x12)
-#define LEAPIORAID_FUNC_RAID_ACTION (0x15)
-#define LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH (0x16)
-#define LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR (0x18)
-#define LEAPIORAID_FUNC_SMP_PASSTHROUGH (0x1A)
-#define LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL (0x1B)
-#define LEAPIORAID_FUNC_IO_UNIT_CONTROL (0x1B)
-#define LEAPIORAID_FUNC_SATA_PASSTHROUGH (0x1C)
-#define LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET (0x40)
-#define LEAPIORAID_FUNC_HANDSHAKE (0x42)
-#define LEAPIORAID_FUNC_LOG_INIT (0x57)
-
-#define LEAPIORAID_IOCSTATUS_MASK (0x7FFF)
-#define LEAPIORAID_IOCSTATUS_SUCCESS (0x0000)
-#define LEAPIORAID_IOCSTATUS_INVALID_FUNCTION (0x0001)
-#define LEAPIORAID_IOCSTATUS_BUSY (0x0002)
-#define LEAPIORAID_IOCSTATUS_INVALID_SGL (0x0003)
-#define LEAPIORAID_IOCSTATUS_INTERNAL_ERROR (0x0004)
-#define LEAPIORAID_IOCSTATUS_INVALID_VPID (0x0005)
-#define LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
-#define LEAPIORAID_IOCSTATUS_INVALID_FIELD (0x0007)
-#define LEAPIORAID_IOCSTATUS_INVALID_STATE (0x0008)
-#define LEAPIORAID_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
-#define LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER (0x000A)
-
-#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
-#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
-#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
-#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
-#define LEAPIORAID_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
-#define LEAPIORAID_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
-
-#define LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
-#define LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
-#define LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
-#define LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
-#define LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
-#define LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
-#define LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
-#define LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
-#define LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
-#define LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
-#define LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
-#define LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
-
-#define LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
-#define LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
-#define LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
-
-#define LEAPIORAID_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
-#define LEAPIORAID_IOCSTATUS_TARGET_ABORTED (0x0063)
-#define LEAPIORAID_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
-#define LEAPIORAID_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
-#define LEAPIORAID_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
-#define LEAPIORAID_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D)
-#define LEAPIORAID_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E)
-#define LEAPIORAID_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F)
-#define LEAPIORAID_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
-#define LEAPIORAID_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
-
-#define LEAPIORAID_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
-#define LEAPIORAID_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
-#define LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
-
-struct LeapioraidReqHeader_t {
- U16 FunctionDependent1;
- U8 ChainOffset;
- U8 Function;
- U16 FunctionDependent2;
- U8 FunctionDependent3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved1;
-};
-
-struct LeapioraidDefaultRep_t {
- U16 FunctionDependent1;
- U8 MsgLength;
- U8 Function;
- U16 FunctionDependent2;
- U8 FunctionDependent3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved1;
- U16 FunctionDependent5;
- U16 IOCStatus;
- U32 IOCLogInfo;
-};
-
-struct LEAPIORAID_VERSION_STRUCT {
- U8 Dev;
- U8 Unit;
- U8 Minor;
- U8 Major;
-};
-
-union LEAPIORAID_VERSION_UNION {
- struct LEAPIORAID_VERSION_STRUCT Struct;
- U32 Word;
-};
-
-struct LeapioSGESimple32_t {
- U32 FlagsLength;
- U32 Address;
-};
-
-struct LeapioSGESimple64_t {
- U32 FlagsLength;
- U64 Address;
-};
-
-struct LEAPIORAID_SGE_SIMPLE_UNION {
- U32 FlagsLength;
- union {
- U32 Address32;
- U64 Address64;
- } u;
-};
-
-struct LEAPIORAID_SGE_CHAIN_UNION {
- U16 Length;
- U8 NextChainOffset;
- U8 Flags;
- union {
- U32 Address32;
- U64 Address64;
- } u;
-};
-
-#define LEAPIORAID_SGE_FLAGS_LAST_ELEMENT (0x80)
-#define LEAPIORAID_SGE_FLAGS_END_OF_BUFFER (0x40)
-#define LEAPIORAID_SGE_FLAGS_END_OF_LIST (0x01)
-#define LEAPIORAID_SGE_FLAGS_SHIFT (24)
-#define LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
-#define LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
-#define LEAPIORAID_SGE_FLAGS_HOST_TO_IOC (0x04)
-#define LEAPIORAID_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
-#define LEAPIORAID_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
-
-struct LEAPIORAID_IEEE_SGE_SIMPLE32 {
- U32 Address;
- U32 FlagsLength;
-};
-
-struct LEAPIORAID_IEEE_SGE_SIMPLE64 {
- U64 Address;
- U32 Length;
- U16 Reserved1;
- U8 Reserved2;
- U8 Flags;
-};
-
-union LEAPIORAID_IEEE_SGE_SIMPLE_UNION {
- struct LEAPIORAID_IEEE_SGE_SIMPLE32 Simple32;
- struct LEAPIORAID_IEEE_SGE_SIMPLE64 Simple64;
-};
-
-union LEAPIORAID_IEEE_SGE_CHAIN_UNION {
- struct LEAPIORAID_IEEE_SGE_SIMPLE32 Chain32;
- struct LEAPIORAID_IEEE_SGE_SIMPLE64 Chain64;
-};
-
-struct LEAPIORAID_IEEE_SGE_CHAIN64 {
- U64 Address;
- U32 Length;
- U16 Reserved1;
- U8 NextChainOffset;
- U8 Flags;
-};
-
-union LEAPIORAID_IEEE_SGE_IO_UNION {
- struct LEAPIORAID_IEEE_SGE_SIMPLE64 IeeeSimple;
- struct LEAPIORAID_IEEE_SGE_CHAIN64 IeeeChain;
-};
-
-#define LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST (0x40)
-#define LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
-#define LEAPIORAID_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
-#define LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
-
-union LEAPIORAID_SIMPLE_SGE_UNION {
- struct LEAPIORAID_SGE_SIMPLE_UNION LeapioSimple;
- union LEAPIORAID_IEEE_SGE_SIMPLE_UNION IeeeSimple;
-};
-
-union LEAPIORAID_SGE_IO_UNION {
- struct LEAPIORAID_SGE_SIMPLE_UNION LeapioSimple;
- struct LEAPIORAID_SGE_CHAIN_UNION LeapioChain;
- union LEAPIORAID_IEEE_SGE_SIMPLE_UNION IeeeSimple;
- union LEAPIORAID_IEEE_SGE_CHAIN_UNION IeeeChain;
-};
-
-struct LEAPIORAID_CONFIG_PAGE_HEADER {
- U8 PageVersion;
- U8 PageLength;
- U8 PageNumber;
- U8 PageType;
-};
-
-struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER {
- U8 PageVersion;
- U8 Reserved1;
- U8 PageNumber;
- U8 PageType;
- U16 ExtPageLength;
- U8 ExtPageType;
- U8 Reserved2;
-};
-
-#define LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT (0x00)
-#define LEAPIORAID_CONFIG_PAGETYPE_IOC (0x01)
-#define LEAPIORAID_CONFIG_PAGETYPE_BIOS (0x02)
-#define LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
-#define LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING (0x09)
-#define LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
-#define LEAPIORAID_CONFIG_PAGETYPE_EXTENDED (0x0F)
-#define LEAPIORAID_CONFIG_PAGETYPE_MASK (0x0F)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_LOG (0x14)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
-#define LEAPIORAID_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
-
-#define LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
-#define LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000)
-
-#define LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000)
-#define LEAPIORAID_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000)
-
-#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
-#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000)
-#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000)
-#define LEAPIORAID_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
-#define LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
-#define LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000)
-#define LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
-#define LEAPIORAID_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
-#define LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
-#define LEAPIORAID_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000)
-
-struct LeapioraidCfgReq_t {
- U8 Action;
- U8 SGLFlags;
- U8 ChainOffset;
- U8 Function;
- U16 ExtPageLength;
- U8 ExtPageType;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved1;
- U8 Reserved2;
- U8 ProxyVF_ID;
- U16 Reserved4;
- U32 Reserved3;
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U32 PageAddress;
- union LEAPIORAID_SGE_IO_UNION PageBufferSGE;
-};
-
-#define LEAPIORAID_CONFIG_ACTION_PAGE_HEADER (0x00)
-#define LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
-#define LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
-#define LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
-
-struct LeapioraidCfgRep_t {
- U8 Action;
- U8 SGLFlags;
- U8 MsgLength;
- U8 Function;
- U16 ExtPageLength;
- U8 ExtPageType;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved1;
- U16 Reserved2;
- U16 IOCStatus;
- U32 IOCLogInfo;
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
-};
-
-struct LeapioraidManP0_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U8 ChipName[16];
- U8 ChipRevision[8];
- U8 BoardName[16];
- U8 BoardAssembly[16];
- U8 BoardTracerNumber[16];
-};
-
-struct LEAPIORAID_MANPAGE7_CONNECTOR_INFO {
- U32 Pinout;
- U8 Connector[16];
- U8 Location;
- U8 ReceptacleID;
- U16 Slot;
- U16 Slotx2;
- U16 Slotx4;
-};
-
-struct LeapioraidIOUnitP0_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U64 UniqueValue;
- union LEAPIORAID_VERSION_UNION NvdataVersionDefault;
- union LEAPIORAID_VERSION_UNION NvdataVersionPersistent;
-};
-
-struct LeapioraidIOUnitP1_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U32 Flags;
-};
-
-#define LEAPIORAID_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
-#define LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
-
-struct LEAPIORAID_IOUNIT8_SENSOR {
- U16 Flags;
- U16 Reserved1;
- U16 Threshold[4];
- U32 Reserved2;
- U32 Reserved3;
- U32 Reserved4;
-};
-
-struct LeapioraidIOUnitP8_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U32 Reserved1;
- U32 Reserved2;
- U8 NumSensors;
- U8 PollingInterval;
- U16 Reserved3;
- struct LEAPIORAID_IOUNIT8_SENSOR Sensor[];
-};
-
-struct LeapioraidIOCP1_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U32 Flags;
- U32 CoalescingTimeout;
- U8 CoalescingDepth;
- U8 PCISlotNum;
- U8 PCIBusNum;
- U8 PCIDomainSegment;
- U32 Reserved1;
- U32 ProductSpecific;
-};
-
-struct LeapioraidIOCP8_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U8 NumDevsPerEnclosure;
- U8 Reserved1;
- U16 Reserved2;
- U16 MaxPersistentEntries;
- U16 MaxNumPhysicalMappedIDs;
- U16 Flags;
- U16 Reserved3;
- U16 IRVolumeMappingFlags;
- U16 Reserved4;
- U32 Reserved5;
-};
-
-#define LEAPIORAID_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003)
-#define LEAPIORAID_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000)
-
-struct LEAPIORAID_BOOT_DEVICE_ADAPTER_ORDER {
- U32 Reserved1;
- U32 Reserved2;
- U32 Reserved3;
- U32 Reserved4;
- U32 Reserved5;
- U32 Reserved6;
-};
-
-struct LEAPIORAID_BOOT_DEVICE_SAS_WWID {
- U64 SASAddress;
- U8 LUN[8];
- U32 Reserved1;
- U32 Reserved2;
-};
-
-struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT {
- U64 EnclosureLogicalID;
- U32 Reserved1;
- U32 Reserved2;
- U16 SlotNumber;
- U16 Reserved3;
- U32 Reserved4;
-};
-
-struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME {
- U64 DeviceName;
- U8 LUN[8];
- U32 Reserved1;
- U32 Reserved2;
-};
-
-union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE {
- struct LEAPIORAID_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder;
- struct LEAPIORAID_BOOT_DEVICE_SAS_WWID SasWwid;
- struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot;
- struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME DeviceName;
-};
-
-struct LeapioraidBiosP2_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U32 Reserved1;
- U32 Reserved2;
- U32 Reserved3;
- U32 Reserved4;
- U32 Reserved5;
- U32 Reserved6;
- U8 ReqBootDeviceForm;
- U8 Reserved7;
- U16 Reserved8;
- union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice;
- U8 ReqAltBootDeviceForm;
- U8 Reserved9;
- U16 Reserved10;
- union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice;
- U8 CurrentBootDeviceForm;
- U8 Reserved11;
- U16 Reserved12;
- union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice;
-};
-
-#define LEAPIORAID_BIOSPAGE2_FORM_MASK (0x0F)
-#define LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00)
-#define LEAPIORAID_BIOSPAGE2_FORM_SAS_WWID (0x05)
-#define LEAPIORAID_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
-#define LEAPIORAID_BIOSPAGE2_FORM_DEVICE_NAME (0x07)
-
-struct LEAPIORAID_ADAPTER_INFO {
- U8 PciBusNumber;
- U8 PciDeviceAndFunctionNumber;
- U16 AdapterFlags;
-};
-
-struct LEAPIORAID_ADAPTER_ORDER_AUX {
- U64 WWID;
- U32 Reserved1;
- U32 Reserved2;
-};
-
-struct LeapioraidBiosP3_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U32 GlobalFlags;
- U32 BiosVersion;
- struct LEAPIORAID_ADAPTER_INFO AdapterOrder[4];
- U32 Reserved1;
- struct LEAPIORAID_ADAPTER_ORDER_AUX AdapterOrderAux[4];
-};
-
-struct LEAPIORAID_RAIDVOL0_PHYS_DISK {
- U8 RAIDSetNum;
- U8 PhysDiskMap;
- U8 PhysDiskNum;
- U8 Reserved;
-};
-
-struct LEAPIORAID_RAIDVOL0_SETTINGS {
- U16 Settings;
- U8 HotSparePool;
- U8 Reserved;
-};
-
-struct LeapioraidRaidVolP0_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U16 DevHandle;
- U8 VolumeState;
- U8 VolumeType;
- U32 VolumeStatusFlags;
- struct LEAPIORAID_RAIDVOL0_SETTINGS VolumeSettings;
- U64 MaxLBA;
- U32 StripeSize;
- U16 BlockSize;
- U16 Reserved1;
- U8 SupportedPhysDisks;
- U8 ResyncRate;
- U16 DataScrubDuration;
- U8 NumPhysDisks;
- U8 Reserved2;
- U8 Reserved3;
- U8 InactiveStatus;
- struct LEAPIORAID_RAIDVOL0_PHYS_DISK PhysDisk[];
-};
-
-#define LEAPIORAID_RAID_VOL_STATE_MISSING (0x00)
-#define LEAPIORAID_RAID_VOL_STATE_FAILED (0x01)
-#define LEAPIORAID_RAID_VOL_STATE_INITIALIZING (0x02)
-#define LEAPIORAID_RAID_VOL_STATE_ONLINE (0x03)
-#define LEAPIORAID_RAID_VOL_STATE_DEGRADED (0x04)
-#define LEAPIORAID_RAID_VOL_STATE_OPTIMAL (0x05)
-#define LEAPIORAID_RAID_VOL_TYPE_RAID0 (0x00)
-#define LEAPIORAID_RAID_VOL_TYPE_RAID1E (0x01)
-#define LEAPIORAID_RAID_VOL_TYPE_RAID1 (0x02)
-#define LEAPIORAID_RAID_VOL_TYPE_RAID10 (0x05)
-#define LEAPIORAID_RAID_VOL_TYPE_UNKNOWN (0xFF)
-
-#define LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
-
-struct LeapioraidRaidVolP1_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U16 DevHandle;
- U16 Reserved0;
- U8 GUID[24];
- U8 Name[16];
- U64 WWID;
- U32 Reserved1;
- U32 Reserved2;
-};
-
-struct LEAPIORAID_RAIDPHYSDISK0_SETTINGS {
- U16 Reserved1;
- U8 HotSparePool;
- U8 Reserved2;
-};
-
-struct LEAPIORAID_RAIDPHYSDISK0_INQUIRY_DATA {
- U8 VendorID[8];
- U8 ProductID[16];
- U8 ProductRevLevel[4];
- U8 SerialNum[32];
-};
-
-struct LeapioraidRaidPDP0_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U16 DevHandle;
- U8 Reserved1;
- U8 PhysDiskNum;
- struct LEAPIORAID_RAIDPHYSDISK0_SETTINGS PhysDiskSettings;
- U32 Reserved2;
- struct LEAPIORAID_RAIDPHYSDISK0_INQUIRY_DATA InquiryData;
- U32 Reserved3;
- U8 PhysDiskState;
- U8 OfflineReason;
- U8 IncompatibleReason;
- U8 PhysDiskAttributes;
- U32 PhysDiskStatusFlags;
- U64 DeviceMaxLBA;
- U64 HostMaxLBA;
- U64 CoercedMaxLBA;
- U16 BlockSize;
- U16 Reserved5;
- U32 Reserved6;
-};
-
-#define LEAPIORAID_RAID_PD_STATE_NOT_CONFIGURED (0x00)
-#define LEAPIORAID_RAID_PD_STATE_NOT_COMPATIBLE (0x01)
-#define LEAPIORAID_RAID_PD_STATE_OFFLINE (0x02)
-#define LEAPIORAID_RAID_PD_STATE_ONLINE (0x03)
-#define LEAPIORAID_RAID_PD_STATE_HOT_SPARE (0x04)
-#define LEAPIORAID_RAID_PD_STATE_DEGRADED (0x05)
-#define LEAPIORAID_RAID_PD_STATE_REBUILDING (0x06)
-#define LEAPIORAID_RAID_PD_STATE_OPTIMAL (0x07)
-
-#define LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_1_5 (0x08)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_3_0 (0x09)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_6_0 (0x0A)
-#define LEAPIORAID_SAS_NEG_LINK_RATE_12_0 (0x0B)
-
-#define LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
-
-#define LEAPIORAID_SAS_PRATE_MIN_RATE_MASK (0x0F)
-#define LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK (0x0F)
-
-struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA {
- U8 Port;
- U8 PortFlags;
- U8 PhyFlags;
- U8 NegotiatedLinkRate;
- U32 ControllerPhyDeviceInfo;
- U16 AttachedDevHandle;
- U16 ControllerDevHandle;
- U32 DiscoveryStatus;
- U32 Reserved;
-};
-
-struct LeapioraidSasIOUnitP0_t {
- struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header;
- U32 Reserved1;
- U8 NumPhys;
- U8 Reserved2;
- U16 Reserved3;
- struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA PhyData[];
-};
-
-#define LEAPIORAID_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08)
-#define LEAPIORAID_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
-#define LEAPIORAID_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
-#define LEAPIORAID_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
-
-struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA {
- U8 Port;
- U8 PortFlags;
- U8 PhyFlags;
- U8 MaxMinLinkRate;
- U32 ControllerPhyDeviceInfo;
- U16 MaxTargetPortConnectTime;
- U16 Reserved1;
-};
-
-struct LeapioraidSasIOUnitP1_t {
- struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header;
- U16 ControlFlags;
- U16 SASNarrowMaxQueueDepth;
- U16 AdditionalControlFlags;
- U16 SASWideMaxQueueDepth;
- U8 NumPhys;
- U8 SATAMaxQDepth;
- U8 ReportDeviceMissingDelay;
- U8 IODeviceMissingDelay;
- struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA PhyData[];
-};
-
-#define LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
-#define LEAPIORAID_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
-#define LEAPIORAID_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
-#define LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
-
-struct LeapioraidExpanderP0_t {
- struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header;
- U8 PhysicalPort;
- U8 ReportGenLength;
- U16 EnclosureHandle;
- U64 SASAddress;
- U32 DiscoveryStatus;
- U16 DevHandle;
- U16 ParentDevHandle;
- U16 ExpanderChangeCount;
- U16 ExpanderRouteIndexes;
- U8 NumPhys;
- U8 SASLevel;
- U16 Flags;
- U16 STPBusInactivityTimeLimit;
- U16 STPMaxConnectTimeLimit;
- U16 STP_SMP_NexusLossTime;
- U16 MaxNumRoutedSasAddresses;
- U64 ActiveZoneManagerSASAddress;
- U16 ZoneLockInactivityLimit;
- U16 Reserved1;
- U8 TimeToReducedFunc;
- U8 InitialTimeToReducedFunc;
- U8 MaxReducedFuncTime;
- U8 Reserved2;
-};
-
-struct LeapioraidExpanderP1_t {
- struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header;
- U8 PhysicalPort;
- U8 Reserved1;
- U16 Reserved2;
- U8 NumPhys;
- U8 Phy;
- U16 NumTableEntriesProgrammed;
- U8 ProgrammedLinkRate;
- U8 HwLinkRate;
- U16 AttachedDevHandle;
- U32 PhyInfo;
- U32 AttachedDeviceInfo;
- U16 ExpanderDevHandle;
- U8 ChangeCount;
- U8 NegotiatedLinkRate;
- U8 PhyIdentifier;
- U8 AttachedPhyIdentifier;
- U8 Reserved3;
- U8 DiscoveryInfo;
- U32 AttachedPhyInfo;
- U8 ZoneGroup;
- U8 SelfConfigStatus;
- U16 Reserved4;
-};
-
-struct LeapioraidSasDevP0_t {
- struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header;
- U16 Slot;
- U16 EnclosureHandle;
- U64 SASAddress;
- U16 ParentDevHandle;
- U8 PhyNum;
- U8 AccessStatus;
- U16 DevHandle;
- U8 AttachedPhyIdentifier;
- U8 ZoneGroup;
- U32 DeviceInfo;
- U16 Flags;
- U8 PhysicalPort;
- U8 MaxPortConnections;
- U64 DeviceName;
- U8 PortGroups;
- U8 DmaGroup;
- U8 ControlGroup;
- U8 EnclosureLevel;
- U8 ConnectorName[4];
- U32 Reserved3;
-};
-
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
-#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
-#define LEAPIORAID_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000)
-#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
-#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
-#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
-#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
-#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
-#define LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002)
-#define LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
-
-struct LeapioraidSasPhyP0_t {
- struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header;
- U16 OwnerDevHandle;
- U16 Reserved1;
- U16 AttachedDevHandle;
- U8 AttachedPhyIdentifier;
- U8 Reserved2;
- U32 AttachedPhyInfo;
- U8 ProgrammedLinkRate;
- U8 HwLinkRate;
- U8 ChangeCount;
- U8 Flags;
- U32 PhyInfo;
- U8 NegotiatedLinkRate;
- U8 Reserved3;
- U16 Reserved4;
-};
-
-struct LeapioraidSasPhyP1_t {
- struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header;
- U32 Reserved1;
- U32 InvalidDwordCount;
- U32 RunningDisparityErrorCount;
- U32 LossDwordSynchCount;
- U32 PhyResetProblemCount;
-};
-
-struct LeapioraidSasEncP0_t {
- struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header;
- U32 Reserved1;
- U64 EnclosureLogicalID;
- U16 Flags;
- U16 EnclosureHandle;
- U16 NumSlots;
- U16 StartSlot;
- U8 ChassisSlot;
- U8 EnclosureLevel;
- U16 SEPDevHandle;
- U8 OEMRD;
- U8 Reserved1a;
- U16 Reserved2;
- U32 Reserved3;
-};
-
-#define LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
-
-struct LEAPIORAID_RAIDCONFIG0_CONFIG_ELEMENT {
- U16 ElementFlags;
- U16 VolDevHandle;
- U8 HotSparePool;
- U8 PhysDiskNum;
- U16 PhysDiskDevHandle;
-};
-
-#define LEAPIORAID_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F)
-#define LEAPIORAID_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001)
-#define LEAPIORAID_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002)
-#define LEAPIORAID_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003)
-
-struct LeapioraidRaidCfgP0_t {
- struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header;
- U8 NumHotSpares;
- U8 NumPhysDisks;
- U8 NumVolumes;
- U8 ConfigNum;
- U32 Flags;
- U8 ConfigGUID[24];
- U32 Reserved1;
- U8 NumElements;
- U8 Reserved2;
- U16 Reserved3;
- struct LEAPIORAID_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[];
-};
-
-struct LeapioraidFWImgHeader_t {
- U32 Signature;
- U32 Signature0;
- U32 Signature1;
- U32 Signature2;
- union LEAPIORAID_VERSION_UNION LEAPIOVersion;
- union LEAPIORAID_VERSION_UNION FWVersion;
- union LEAPIORAID_VERSION_UNION NVDATAVersion;
- union LEAPIORAID_VERSION_UNION PackageVersion;
- U16 VendorID;
- U16 ProductID;
- U16 ProtocolFlags;
- U16 Reserved26;
- U32 IOCCapabilities;
- U32 ImageSize;
- U32 NextImageHeaderOffset;
- U32 Checksum;
- U32 Reserved38;
- U32 Reserved3C;
- U32 Reserved40;
- U32 Reserved44;
- U32 Reserved48;
- U32 Reserved4C;
- U32 Reserved50;
- U32 Reserved54;
- U32 Reserved58;
- U32 Reserved5C;
- U32 BootFlags;
- U32 FirmwareVersionNameWhat;
- U8 FirmwareVersionName[32];
- U32 VendorNameWhat;
- U8 VendorName[32];
- U32 PackageNameWhat;
- U8 PackageName[32];
- U32 ReservedD0;
- U32 ReservedD4;
- U32 ReservedD8;
- U32 ReservedDC;
- U32 ReservedE0;
- U32 ReservedE4;
- U32 ReservedE8;
- U32 ReservedEC;
- U32 ReservedF0;
- U32 ReservedF4;
- U32 ReservedF8;
- U32 ReservedFC;
-};
-
-struct LEAPIORAID_HASH_EXCLUSION_FORMAT {
- U32 Offset;
- U32 Size;
-};
-
-struct LeapioraidComptImgHeader_t {
- U32 Signature0;
- U32 LoadAddress;
- U32 DataSize;
- U32 StartAddress;
- U32 Signature1;
- U32 FlashOffset;
- U32 FlashSize;
- U32 VersionStringOffset;
- U32 BuildDateStringOffset;
- U32 BuildTimeStringOffset;
- U32 EnvironmentVariableOffset;
- U32 ApplicationSpecific;
- U32 Signature2;
- U32 HeaderSize;
- U32 Crc;
- U8 NotFlashImage;
- U8 Compressed;
- U16 Reserved3E;
- U32 SecondaryFlashOffset;
- U32 Reserved44;
- U32 Reserved48;
- union LEAPIORAID_VERSION_UNION RMCInterfaceVersion;
- union LEAPIORAID_VERSION_UNION Reserved50;
- union LEAPIORAID_VERSION_UNION FWVersion;
- union LEAPIORAID_VERSION_UNION NvdataVersion;
- struct LEAPIORAID_HASH_EXCLUSION_FORMAT HashExclusion[4];
- U32 NextImageHeaderOffset;
- U32 Reserved80[32];
-};
-
-struct LEAPIORAID_SCSI_IO_CDB_EEDP32 {
- U8 CDB[20];
- __be32 PrimaryReferenceTag;
- U16 PrimaryApplicationTag;
- U16 PrimaryApplicationTagMask;
- U32 TransferLength;
-};
-
-union LEAPIO_SCSI_IO_CDB_UNION {
- U8 CDB32[32];
- struct LEAPIORAID_SCSI_IO_CDB_EEDP32 EEDP32;
- struct LEAPIORAID_SGE_SIMPLE_UNION SGE;
-};
-
-struct LeapioSCSIIOReq_t {
- U16 DevHandle;
- U8 ChainOffset;
- U8 Function;
- U16 Reserved1;
- U8 Reserved2;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
- U32 SenseBufferLowAddress;
- U16 SGLFlags;
- U8 SenseBufferLength;
- U8 Reserved4;
- U8 SGLOffset0;
- U8 SGLOffset1;
- U8 SGLOffset2;
- U8 SGLOffset3;
- U32 SkipCount;
- U32 DataLength;
- U32 BidirectionalDataLength;
- U16 IoFlags;
- U16 EEDPFlags;
- U32 EEDPBlockSize;
- U32 SecondaryReferenceTag;
- U16 SecondaryApplicationTag;
- U16 ApplicationTagTranslationMask;
- U8 LUN[8];
- U32 Control;
- union LEAPIO_SCSI_IO_CDB_UNION CDB;
- union LEAPIORAID_SGE_IO_UNION SGL;
-};
-
-#define LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00)
-
-#define LEAPIORAID_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
-#define LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
-#define LEAPIORAID_SCSIIO_CONTROL_WRITE (0x01000000)
-#define LEAPIORAID_SCSIIO_CONTROL_READ (0x02000000)
-#define LEAPIORAID_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000)
-#define LEAPIORAID_SCSIIO_CONTROL_CMDPRI_SHIFT (11)
-#define LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
-#define LEAPIORAID_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
-#define LEAPIORAID_SCSIIO_CONTROL_TLR_ON (0x00000040)
-
-union LEAPIORAID_SCSI_IO_CDB_UNION {
- U8 CDB32[32];
- struct LEAPIORAID_SCSI_IO_CDB_EEDP32 EEDP32;
- struct LEAPIORAID_IEEE_SGE_SIMPLE64 SGE;
-};
-
-struct LeapioraidSCSIIOReq_t {
- U16 DevHandle;
- U8 ChainOffset;
- U8 Function;
- U16 Reserved1;
- U8 Reserved2;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
- U32 SenseBufferLowAddress;
- U8 DMAFlags;
- U8 Reserved5;
- U8 SenseBufferLength;
- U8 Reserved4;
- U8 SGLOffset0;
- U8 SGLOffset1;
- U8 SGLOffset2;
- U8 SGLOffset3;
- U32 SkipCount;
- U32 DataLength;
- U32 BidirectionalDataLength;
- U16 IoFlags;
- U16 EEDPFlags;
- U16 EEDPBlockSize;
- U16 Reserved6;
- U32 SecondaryReferenceTag;
- U16 SecondaryApplicationTag;
- U16 ApplicationTagTranslationMask;
- U8 LUN[8];
- U32 Control;
- union LEAPIORAID_SCSI_IO_CDB_UNION CDB;
- union LEAPIORAID_IEEE_SGE_IO_UNION SGL;
-};
-
-struct LeapioraidSCSIIORep_t {
- U16 DevHandle;
- U8 MsgLength;
- U8 Function;
- U16 Reserved1;
- U8 Reserved2;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
- U8 SCSIStatus;
- U8 SCSIState;
- U16 IOCStatus;
- U32 IOCLogInfo;
- U32 TransferCount;
- U32 SenseCount;
- U32 ResponseInfo;
- U16 TaskTag;
- U16 SCSIStatusQualifier;
- U32 BidirectionalTransferCount;
- U32 EEDPErrorOffset;
- U16 EEDPObservedAppTag;
- U16 EEDPObservedGuard;
- U32 EEDPObservedRefTag;
-};
-
-#define LEAPIORAID_SCSI_STATUS_GOOD (0x00)
-#define LEAPIORAID_SCSI_STATUS_CHECK_CONDITION (0x02)
-#define LEAPIORAID_SCSI_STATUS_CONDITION_MET (0x04)
-#define LEAPIORAID_SCSI_STATUS_BUSY (0x08)
-#define LEAPIORAID_SCSI_STATUS_INTERMEDIATE (0x10)
-#define LEAPIORAID_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
-#define LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
-#define LEAPIORAID_SCSI_STATUS_COMMAND_TERMINATED (0x22)
-#define LEAPIORAID_SCSI_STATUS_TASK_SET_FULL (0x28)
-#define LEAPIORAID_SCSI_STATUS_ACA_ACTIVE (0x30)
-#define LEAPIORAID_SCSI_STATUS_TASK_ABORTED (0x40)
-#define LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
-#define LEAPIORAID_SCSI_STATE_TERMINATED (0x08)
-#define LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS (0x04)
-#define LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED (0x02)
-#define LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID (0x01)
-
-struct LeapioraidSCSITmgReq_t {
- U16 DevHandle;
- U8 ChainOffset;
- U8 Function;
- U8 Reserved1;
- U8 TaskType;
- U8 Reserved2;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
- U8 LUN[8];
- U32 Reserved4[7];
- U16 TaskMID;
- U16 Reserved5;
-};
-
-#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
-#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
-#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
-#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
-#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
-#define LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
-
-struct LeapioraidSCSITmgRep_t {
- U16 DevHandle;
- U8 MsgLength;
- U8 Function;
- U8 ResponseCode;
- U8 TaskType;
- U8 Reserved1;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved2;
- U16 Reserved3;
- U16 IOCStatus;
- U32 IOCLogInfo;
- U32 TerminationCount;
- U32 ResponseInfo;
-};
-
-#define LEAPIORAID_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
-#define LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
-#define LEAPIORAID_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
-#define LEAPIORAID_SCSITASKMGMT_RSP_TM_FAILED (0x05)
-#define LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
-#define LEAPIORAID_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
-#define LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
-
-struct LeapioraidSepReq_t {
- U16 DevHandle;
- U8 ChainOffset;
- U8 Function;
- U8 Action;
- U8 Flags;
- U8 Reserved1;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved2;
- U32 SlotStatus;
- U32 Reserved3;
- U32 Reserved4;
- U32 Reserved5;
- U16 Slot;
- U16 EnclosureHandle;
-};
-
-#define LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS (0x00)
-#define LEAPIORAID_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00)
-#define LEAPIORAID_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
-#define LEAPIORAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
-
-struct LeapioraidSepRep_t {
- U16 DevHandle;
- U8 MsgLength;
- U8 Function;
- U8 Action;
- U8 Flags;
- U8 Reserved1;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved2;
- U16 Reserved3;
- U16 IOCStatus;
- U32 IOCLogInfo;
- U32 SlotStatus;
- U32 Reserved4;
- U16 Slot;
- U16 EnclosureHandle;
-};
-
-struct LeapioraidIOCInitReq_t {
- U8 WhoInit;
- U8 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 Reserved2;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
- U16 MsgVersion;
- U16 HeaderVersion;
- U32 Reserved5;
- U16 ConfigurationFlags;
- U8 HostPageSize;
- U8 HostMSIxVectors;
- U16 Reserved8;
- U16 SystemRequestFrameSize;
- U16 ReplyDescriptorPostQueueDepth;
- U16 ReplyFreeQueueDepth;
- U32 SenseBufferAddressHigh;
- U32 SystemReplyAddressHigh;
- U64 SystemRequestFrameBaseAddress;
- U64 ReplyDescriptorPostQueueAddress;
- U64 ReplyFreeQueueAddress;
- U64 TimeStamp;
-};
-
-#define LEAPIORAID_WHOINIT_HOST_DRIVER (0x04)
-#define LEAPIORAID_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
-
-struct LeapioraidIOCInitRDPQArrayEntry {
- U64 RDPQBaseAddress;
- U32 Reserved1;
- U32 Reserved2;
-};
-
-struct LeapioraidIOCInitRep_t {
- U8 WhoInit;
- U8 Reserved1;
- U8 MsgLength;
- U8 Function;
- U16 Reserved2;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
- U16 Reserved5;
- U16 IOCStatus;
- U32 IOCLogInfo;
-};
-
-struct LeapioraidIOCLogReq_t {
- U16 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 Reserved2;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
- U64 BufAddr;
- U32 BufSize;
-};
-
-struct LeapioraidIOCLogRep_t {
- U16 Reserved1;
- U8 MsgLength;
- U8 Function;
- U16 Reserved2;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
- U16 Reserved5;
- U16 IOCStatus;
- U32 IOCLogInfo;
-};
-
-struct LeapioraidIOCFactsReq_t {
- U16 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 Reserved2;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
-};
-
-struct LeapioraidIOCFactsRep_t {
- U16 MsgVersion;
- U8 MsgLength;
- U8 Function;
- U16 HeaderVersion;
- U8 IOCNumber;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved1;
- U16 IOCExceptions;
- U16 IOCStatus;
- U32 IOCLogInfo;
- U8 MaxChainDepth;
- U8 WhoInit;
- U8 NumberOfPorts;
- U8 MaxMSIxVectors;
- U16 RequestCredit;
- U16 ProductID;
- U32 IOCCapabilities;
- union LEAPIORAID_VERSION_UNION FWVersion;
- U16 IOCRequestFrameSize;
- U16 IOCMaxChainSegmentSize;
- U16 MaxInitiators;
- U16 MaxTargets;
- U16 MaxSasExpanders;
- U16 MaxEnclosures;
- U16 ProtocolFlags;
- U16 HighPriorityCredit;
- U16 MaxReplyDescriptorPostQueueDepth;
- U8 ReplyFrameSize;
- U8 MaxVolumes;
- U16 MaxDevHandle;
- U16 MaxPersistentEntries;
- U16 MinDevHandle;
- U8 CurrentHostPageSize;
- U8 Reserved4;
- U8 SGEModifierMask;
- U8 SGEModifierValue;
- U8 SGEModifierShift;
- U8 Reserved5;
-};
-
-#define LEAPIORAID_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
-#define LEAPIORAID_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
-#define LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
-#define LEAPIORAID_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
-#define LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000)
-#define LEAPIORAID_IOCFACTS_CAPABILITY_TLR (0x00000800)
-#define LEAPIORAID_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
-#define LEAPIORAID_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080)
-#define LEAPIORAID_IOCFACTS_CAPABILITY_EEDP (0x00000040)
-#define LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
-#define LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
-#define LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
-
-struct LeapioraidPortFactsReq_t {
- U16 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 Reserved2;
- U8 PortNumber;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
-};
-
-struct LeapioraidPortFactsRep_t {
- U16 Reserved1;
- U8 MsgLength;
- U8 Function;
- U16 Reserved2;
- U8 PortNumber;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
- U16 Reserved4;
- U16 IOCStatus;
- U32 IOCLogInfo;
- U8 Reserved5;
- U8 PortType;
- U16 Reserved6;
- U16 MaxPostedCmdBuffers;
- U16 Reserved7;
-};
-
-struct LeapioraidPortEnableReq_t {
- U16 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U8 Reserved2;
- U8 PortFlags;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
-};
-
-struct LeapioraidPortEnableRep_t {
- U16 Reserved1;
- U8 MsgLength;
- U8 Function;
- U8 Reserved2;
- U8 PortFlags;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
- U16 Reserved5;
- U16 IOCStatus;
- U32 IOCLogInfo;
-};
-
-#define LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS (4)
-struct LeapioraidEventNotificationReq_t {
- U16 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 Reserved2;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
- U32 Reserved5;
- U32 Reserved6;
- U32 EventMasks[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS];
- U16 SASBroadcastPrimitiveMasks;
- U16 SASNotifyPrimitiveMasks;
- U32 Reserved8;
-};
-
-struct LeapioraidEventNotificationRep_t {
- U16 EventDataLength;
- U8 MsgLength;
- U8 Function;
- U16 Reserved1;
- U8 AckRequired;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved2;
- U16 Reserved3;
- U16 IOCStatus;
- U32 IOCLogInfo;
- U16 Event;
- U16 Reserved4;
- U32 EventContext;
- U32 EventData[];
-};
-
-#define LEAPIORAID_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
-#define LEAPIORAID_EVENT_LOG_DATA (0x0001)
-#define LEAPIORAID_EVENT_STATE_CHANGE (0x0002)
-#define LEAPIORAID_EVENT_HARD_RESET_RECEIVED (0x0005)
-#define LEAPIORAID_EVENT_EVENT_CHANGE (0x000A)
-#define LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
-#define LEAPIORAID_EVENT_IR_OPERATION_STATUS (0x0014)
-#define LEAPIORAID_EVENT_SAS_DISCOVERY (0x0016)
-#define LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017)
-#define LEAPIORAID_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018)
-#define LEAPIORAID_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
-#define LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
-#define LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
-#define LEAPIORAID_EVENT_IR_VOLUME (0x001E)
-#define LEAPIORAID_EVENT_IR_PHYSICAL_DISK (0x001F)
-#define LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
-#define LEAPIORAID_EVENT_LOG_ENTRY_ADDED (0x0021)
-#define LEAPIORAID_EVENT_SAS_QUIESCE (0x0025)
-#define LEAPIORAID_EVENT_TEMP_THRESHOLD (0x0027)
-#define LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035)
-
-struct LeapioraidEventDataSasDeviceStatusChange_t {
- U16 TaskTag;
- U8 ReasonCode;
- U8 PhysicalPort;
- U8 ASC;
- U8 ASCQ;
- U16 DevHandle;
- U32 Reserved2;
- U64 SASAddress;
- U8 LUN[8];
-};
-
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11)
-#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12)
-
-struct LeapioraidEventDataIrOpStatus_t {
- U16 VolDevHandle;
- U16 Reserved1;
- U8 RAIDOperation;
- U8 PercentComplete;
- U16 Reserved2;
- U32 ElapsedSeconds;
-};
-
-#define LEAPIORAID_EVENT_IR_RAIDOP_RESYNC (0x00)
-#define LEAPIORAID_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01)
-#define LEAPIORAID_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02)
-#define LEAPIORAID_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03)
-#define LEAPIORAID_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04)
-
-struct LeapioraidEventDataIrVol_t {
- U16 VolDevHandle;
- U8 ReasonCode;
- U8 Reserved1;
- U32 NewValue;
- U32 PreviousValue;
-};
-
-#define LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03)
-struct LeapioraidEventDataIrPhyDisk_t {
- U16 Reserved1;
- U8 ReasonCode;
- U8 PhysDiskNum;
- U16 PhysDiskDevHandle;
- U16 Reserved2;
- U16 Slot;
- U16 EnclosureHandle;
- U32 NewValue;
- U32 PreviousValue;
-};
-
-#define LEAPIORAID_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03)
-
-struct LeapioraidEventIrCfgEle_t {
- U16 ElementFlags;
- U16 VolDevHandle;
- U8 ReasonCode;
- U8 PhysDiskNum;
- U16 PhysDiskDevHandle;
-};
-
-#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F)
-#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000)
-#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001)
-#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002)
-#define LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED (0x01)
-#define LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED (0x02)
-#define LEAPIORAID_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03)
-#define LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE (0x04)
-#define LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE (0x05)
-#define LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06)
-#define LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07)
-#define LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED (0x08)
-#define LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED (0x09)
-
-struct LeapioraidEventDataIrCfgChangeList_t {
- U8 NumElements;
- U8 Reserved1;
- U8 Reserved2;
- U8 ConfigNum;
- U32 Flags;
- struct LeapioraidEventIrCfgEle_t ConfigElement[];
-};
-
-#define LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001)
-struct LeapioraidEventDataSasDiscovery_t {
- U8 Flags;
- U8 ReasonCode;
- U8 PhysicalPort;
- U8 Reserved1;
- U32 DiscoveryStatus;
-};
-
-#define LEAPIORAID_EVENT_SAS_DISC_RC_STARTED (0x01)
-
-struct LeapioraidEventDataSasBroadcastPrimitive_t {
- U8 PhyNum;
- U8 Port;
- U8 PortWidth;
- U8 Primitive;
-};
-
-#define LEAPIORAID_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
-
-struct LEAPIORAID_EVENT_SAS_TOPO_PHY_ENTRY {
- U16 AttachedDevHandle;
- U8 LinkRate;
- U8 PhyStatus;
-};
-
-struct LeapioraidEventDataSasTopoChangeList_t {
- U16 EnclosureHandle;
- U16 ExpanderDevHandle;
- U8 NumPhys;
- U8 Reserved1;
- U16 Reserved2;
- U8 NumEntries;
- U8 StartPhyNum;
- U8 ExpStatus;
- U8 PhysicalPort;
- struct LEAPIORAID_EVENT_SAS_TOPO_PHY_ENTRY PHY[];
-};
-
-#define LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED (0x01)
-#define LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
-#define LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
-#define LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
-#define LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
-#define LEAPIORAID_EVENT_SAS_TOPO_RC_MASK (0x0F)
-#define LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01)
-#define LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02)
-#define LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03)
-#define LEAPIORAID_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04)
-#define LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05)
-
-struct LeapioraidEventDataSasEnclDevStatusChange_t {
- U16 EnclosureHandle;
- U8 ReasonCode;
- U8 PhysicalPort;
- U64 EnclosureLogicalID;
- U16 NumSlots;
- U16 StartSlot;
- U32 PhyBits;
-};
-
-#define LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED (0x01)
-#define LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
-
-struct LeapioraidEventDataSasDeviceDiscoveryError_t {
- U16 DevHandle;
- U8 ReasonCode;
- U8 PhysicalPort;
- U32 Reserved1[2];
- U64 SASAddress;
- U32 Reserved2[2];
-};
-
-#define LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01)
-#define LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02)
-
-struct LeapioraidEventAckReq_t {
- U16 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 Reserved2;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
- U16 Event;
- U16 Reserved5;
- U32 EventContext;
-};
-
-struct LeapioraidFWUploadReq_t {
- U8 ImageType;
- U8 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 Reserved2;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
- U32 Reserved5;
- U32 Reserved6;
- U32 Reserved7;
- U32 ImageOffset;
- U32 ImageSize;
- union LEAPIORAID_IEEE_SGE_IO_UNION SGL;
-};
-
-struct LeapioraidFWUploadRep_t {
- U8 ImageType;
- U8 Reserved1;
- U8 MsgLength;
- U8 Function;
- U16 Reserved2;
- U8 Reserved3;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved4;
- U16 Reserved5;
- U16 IOCStatus;
- U32 IOCLogInfo;
- U32 ActualImageSize;
-};
-
-struct LeapioraidIoUnitControlReq_t {
- U8 Operation;
- U8 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 DevHandle;
- U8 IOCParameter;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
- U16 Reserved4;
- U8 PhyNum;
- U8 PrimFlags;
- U32 Primitive;
- U8 LookupMethod;
- U8 Reserved5;
- U16 SlotNumber;
- U64 LookupAddress;
- U32 IOCParameterValue;
- U32 IOCParameterValue2;
- U32 Reserved8;
-};
-
-#define LEAPIORAID_CTRL_OP_REMOVE_DEVICE (0x0D)
-
-struct LeapioraidIoUnitControlRep_t {
- U8 Operation;
- U8 Reserved1;
- U8 MsgLength;
- U8 Function;
- U16 DevHandle;
- U8 IOCParameter;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
- U16 Reserved4;
- U16 IOCStatus;
- U32 IOCLogInfo;
-};
-
-struct LEAPIORAID_RAID_ACTION_RATE_DATA {
- U8 RateToChange;
- U8 RateOrMode;
- U16 DataScrubDuration;
-};
-
-struct LEAPIORAID_RAID_ACTION_START_RAID_FUNCTION {
- U8 RAIDFunction;
- U8 Flags;
- U16 Reserved1;
-};
-
-struct LEAPIORAID_RAID_ACTION_STOP_RAID_FUNCTION {
- U8 RAIDFunction;
- U8 Flags;
- U16 Reserved1;
-};
-
-struct LEAPIORAID_RAID_ACTION_HOT_SPARE {
- U8 HotSparePool;
- U8 Reserved1;
- U16 DevHandle;
-};
-
-struct LEAPIORAID_RAID_ACTION_FW_UPDATE_MODE {
- U8 Flags;
- U8 DeviceFirmwareUpdateModeTimeout;
- U16 Reserved1;
-};
-
-union LEAPIORAID_RAID_ACTION_DATA {
- U32 Word;
- struct LEAPIORAID_RAID_ACTION_RATE_DATA Rates;
- struct LEAPIORAID_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction;
- struct LEAPIORAID_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction;
- struct LEAPIORAID_RAID_ACTION_HOT_SPARE HotSpare;
- struct LEAPIORAID_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode;
-};
-
-struct LeapioraidRaidActionReq_t {
- U8 Action;
- U8 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 VolDevHandle;
- U8 PhysDiskNum;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved2;
- U32 Reserved3;
- union LEAPIORAID_RAID_ACTION_DATA ActionDataWord;
- struct LEAPIORAID_SGE_SIMPLE_UNION ActionDataSGE;
-};
-
-struct LEAPIORAID_RAID_VOL_INDICATOR {
- U64 TotalBlocks;
- U64 BlocksRemaining;
- U32 Flags;
- U32 ElapsedSeconds;
-};
-
-struct LEAPIORAID_RAID_COMPATIBILITY_RESULT_STRUCT {
- U8 State;
- U8 Reserved1;
- U16 Reserved2;
- U32 GenericAttributes;
- U32 OEMSpecificAttributes;
- U32 Reserved3;
- U32 Reserved4;
-};
-
-union LEAPIORAID_RAID_ACTION_REPLY_DATA {
- U32 Word[6];
- struct LEAPIORAID_RAID_VOL_INDICATOR RaidVolumeIndicator;
- U16 VolDevHandle;
- U8 VolumeState;
- U8 PhysDiskNum;
- struct LEAPIORAID_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
-};
-
-struct LeapioraidRaidActionRep_t {
- U8 Action;
- U8 Reserved1;
- U8 MsgLength;
- U8 Function;
- U16 VolDevHandle;
- U8 PhysDiskNum;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved2;
- U16 Reserved3;
- U16 IOCStatus;
- U32 IOCLogInfo;
- union LEAPIORAID_RAID_ACTION_REPLY_DATA ActionData;
-};
-
-#define LEAPIORAID_SAS_DEVICE_INFO_SEP (0x00004000)
-#define LEAPIORAID_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
-#define LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
-#define LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
-#define LEAPIORAID_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
-#define LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
-#define LEAPIORAID_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
-#define LEAPIORAID_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
-#define LEAPIORAID_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
-#define LEAPIORAID_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
-#define LEAPIORAID_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
-#define LEAPIORAID_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
-#define LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
-#define LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
-#define LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
-
-struct LeapioraidSmpPassthroughReq_t {
- U8 PassthroughFlags;
- U8 PhysicalPort;
- U8 ChainOffset;
- U8 Function;
- U16 RequestDataLength;
- U8 SGLFlags;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved1;
- U32 Reserved2;
- U64 SASAddress;
- U32 Reserved3;
- U32 Reserved4;
- union LEAPIORAID_SIMPLE_SGE_UNION SGL;
-};
-
-struct LeapioraidSmpPassthroughRep_t {
- U8 PassthroughFlags;
- U8 PhysicalPort;
- U8 MsgLength;
- U8 Function;
- U16 ResponseDataLength;
- U8 SGLFlags;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved1;
- U8 Reserved2;
- U8 SASStatus;
- U16 IOCStatus;
- U32 IOCLogInfo;
- U32 Reserved3;
- U8 ResponseData[4];
-};
-
-struct LeapioraidSasIoUnitControlReq_t {
- U8 Operation;
- U8 Reserved1;
- U8 ChainOffset;
- U8 Function;
- U16 DevHandle;
- U8 IOCParameter;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
- U16 Reserved4;
- U8 PhyNum;
- U8 PrimFlags;
- U32 Primitive;
- U8 LookupMethod;
- U8 Reserved5;
- U16 SlotNumber;
- U64 LookupAddress;
- U32 IOCParameterValue;
- U32 Reserved7;
- U32 Reserved8;
-};
-
-#define LEAPIORAID_SAS_OP_PHY_LINK_RESET (0x06)
-#define LEAPIORAID_SAS_OP_PHY_HARD_RESET (0x07)
-#define LEAPIORAID_SAS_OP_REMOVE_DEVICE (0x0D)
-struct LeapioraidSasIoUnitControlRep_t {
- U8 Operation;
- U8 Reserved1;
- U8 MsgLength;
- U8 Function;
- U16 DevHandle;
- U8 IOCParameter;
- U8 MsgFlags;
- U8 VP_ID;
- U8 VF_ID;
- U16 Reserved3;
- U16 Reserved4;
- U16 IOCStatus;
- U32 IOCLogInfo;
-};
-#endif
diff --git a/drivers/scsi/leapioraid/leapioraid_app.c b/drivers/scsi/leapioraid/leapioraid_app.c
deleted file mode 100644
index 039b4d8ffd02..000000000000
--- a/drivers/scsi/leapioraid/leapioraid_app.c
+++ /dev/null
@@ -1,2253 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Management Module Support for MPT (Message Passing Technology) based
- * controllers
- *
- * Copyright (C) 2013-2021 LSI Corporation
- * Copyright (C) 2013-2021 Avago Technologies
- * Copyright (C) 2013-2021 Broadcom Inc.
- * (mailto:MPT-FusionLinux.pdl@broadcom.com)
- *
- * Copyright (C) 2024 LeapIO Tech Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/compat.h>
-#include <linux/poll.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#ifdef __KERNEL__
-#include <linux/miscdevice.h>
-#endif
-#include "leapioraid_func.h"
-
-#define LEAPIORAID_DEV_NAME "leapioraid_ctl"
-
-#define LEAPIORAID_MAGIC_NUMBER 'L'
-#define LEAPIORAID_IOCTL_DEFAULT_TIMEOUT (10)
-
-#define LEAPIORAID_IOCINFO \
- _IOWR(LEAPIORAID_MAGIC_NUMBER, 17, struct leapio_ioctl_iocinfo)
-#define LEAPIORAID_COMMAND \
- _IOWR(LEAPIORAID_MAGIC_NUMBER, 20, struct leapio_ioctl_command)
-#ifdef CONFIG_COMPAT
-#define LEAPIORAID_COMMAND32 \
- _IOWR(LEAPIORAID_MAGIC_NUMBER, 20, struct leapio_ioctl_command32)
-#endif
-#define LEAPIORAID_EVENTQUERY \
- _IOWR(LEAPIORAID_MAGIC_NUMBER, 21, struct leapio_ioctl_eventquery)
-#define LEAPIORAID_EVENTENABLE \
- _IOWR(LEAPIORAID_MAGIC_NUMBER, 22, struct leapio_ioctl_eventenable)
-#define LEAPIORAID_EVENTREPORT \
- _IOWR(LEAPIORAID_MAGIC_NUMBER, 23, struct leapio_ioctl_eventreport)
-#define LEAPIORAID_HARDRESET \
- _IOWR(LEAPIORAID_MAGIC_NUMBER, 24, struct leapio_ioctl_diag_reset)
-#define LEAPIORAID_BTDHMAPPING \
- _IOWR(LEAPIORAID_MAGIC_NUMBER, 31, struct leapio_ioctl_btdh_mapping)
-
-struct leapio_ioctl_header {
- uint32_t ioc_number;
- uint32_t port_number;
- uint32_t max_data_size;
-};
-
-struct leapio_ioctl_diag_reset {
- struct leapio_ioctl_header hdr;
-};
-
-struct leapio_ioctl_pci_info {
- union {
- struct {
- uint32_t device:5;
- uint32_t function:3;
- uint32_t bus:24;
- } bits;
- uint32_t word;
- } u;
- uint32_t segment_id;
-};
-
-struct leapio_ioctl_iocinfo {
- struct leapio_ioctl_header hdr;
- uint32_t adapter_type;
- uint32_t port_number;
- uint32_t pci_id;
- uint32_t hw_rev;
- uint32_t subsystem_device;
- uint32_t subsystem_vendor;
- uint32_t rsvd0;
- uint32_t firmware_version;
- uint32_t bios_version;
- uint8_t driver_version[32];
- uint8_t rsvd1;
- uint8_t scsi_id;
- uint16_t rsvd2;
- struct leapio_ioctl_pci_info pci_information;
-};
-
-#define LEAPIORAID_CTL_EVENT_LOG_SIZE (200)
-struct leapio_ioctl_eventquery {
- struct leapio_ioctl_header hdr;
- uint16_t event_entries;
- uint16_t rsvd;
- uint32_t event_types[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS];
-};
-
-struct leapio_ioctl_eventenable {
- struct leapio_ioctl_header hdr;
- uint32_t event_types[4];
-};
-
-#define LEAPIORAID_EVENT_DATA_SIZE (192)
-struct LEAPIORAID_IOCTL_EVENTS {
- uint32_t event;
- uint32_t context;
- uint8_t data[LEAPIORAID_EVENT_DATA_SIZE];
-};
-
-struct leapio_ioctl_eventreport {
- struct leapio_ioctl_header hdr;
- struct LEAPIORAID_IOCTL_EVENTS event_data[];
-};
-
-struct leapio_ioctl_command {
- struct leapio_ioctl_header hdr;
- uint32_t timeout;
- void __user *reply_frame_buf_ptr;
- void __user *data_in_buf_ptr;
- void __user *data_out_buf_ptr;
- void __user *sense_data_ptr;
- uint32_t max_reply_bytes;
- uint32_t data_in_size;
- uint32_t data_out_size;
- uint32_t max_sense_bytes;
- uint32_t data_sge_offset;
- uint8_t mf[];
-};
-
-#ifdef CONFIG_COMPAT
-struct leapio_ioctl_command32 {
- struct leapio_ioctl_header hdr;
- uint32_t timeout;
- uint32_t reply_frame_buf_ptr;
- uint32_t data_in_buf_ptr;
- uint32_t data_out_buf_ptr;
- uint32_t sense_data_ptr;
- uint32_t max_reply_bytes;
- uint32_t data_in_size;
- uint32_t data_out_size;
- uint32_t max_sense_bytes;
- uint32_t data_sge_offset;
- uint8_t mf[];
-};
-#endif
-
-struct leapio_ioctl_btdh_mapping {
- struct leapio_ioctl_header hdr;
- uint32_t id;
- uint32_t bus;
- uint16_t handle;
- uint16_t rsvd;
-};
-
-static struct fasync_struct *leapioraid_async_queue;
-static DECLARE_WAIT_QUEUE_HEAD(leapioraid_ctl_poll_wait);
-
-enum leapioraid_block_state {
- NON_BLOCKING,
- BLOCKING,
-};
-
-static void
-leapioraid_ctl_display_some_debug(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- char *calling_function_name,
- struct LeapioraidDefaultRep_t *mpi_reply)
-{
- struct LeapioraidCfgReq_t *mpi_request;
- char *desc = NULL;
-
- if (!(ioc->logging_level & LEAPIORAID_DEBUG_IOCTL))
- return;
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- switch (mpi_request->Function) {
- case LEAPIORAID_FUNC_SCSI_IO_REQUEST:
- {
- struct LeapioSCSIIOReq_t *scsi_request =
- (struct LeapioSCSIIOReq_t *) mpi_request;
- snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH,
- "scsi_io, cmd(0x%02x), cdb_len(%d)",
- scsi_request->CDB.CDB32[0],
- le16_to_cpu(scsi_request->IoFlags) & 0xF);
- desc = ioc->tmp_string;
- break;
- }
- case LEAPIORAID_FUNC_SCSI_TASK_MGMT:
- desc = "task_mgmt";
- break;
- case LEAPIORAID_FUNC_IOC_INIT:
- desc = "ioc_init";
- break;
- case LEAPIORAID_FUNC_IOC_FACTS:
- desc = "ioc_facts";
- break;
- case LEAPIORAID_FUNC_CONFIG:
- {
- struct LeapioraidCfgReq_t *config_request =
- (struct LeapioraidCfgReq_t *) mpi_request;
- snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH,
- "config, type(0x%02x), ext_type(0x%02x), number(%d)",
- (config_request->Header.PageType &
- LEAPIORAID_CONFIG_PAGETYPE_MASK),
- config_request->ExtPageType,
- config_request->Header.PageNumber);
- desc = ioc->tmp_string;
- break;
- }
- case LEAPIORAID_FUNC_PORT_FACTS:
- desc = "port_facts";
- break;
- case LEAPIORAID_FUNC_PORT_ENABLE:
- desc = "port_enable";
- break;
- case LEAPIORAID_FUNC_EVENT_NOTIFICATION:
- desc = "event_notification";
- break;
- case LEAPIORAID_FUNC_FW_DOWNLOAD:
- desc = "fw_download";
- break;
- case LEAPIORAID_FUNC_FW_UPLOAD:
- desc = "fw_upload";
- break;
- case LEAPIORAID_FUNC_RAID_ACTION:
- desc = "raid_action";
- break;
- case LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH:
- {
- struct LeapioSCSIIOReq_t *scsi_request =
- (struct LeapioSCSIIOReq_t *) mpi_request;
- snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH,
- "raid_pass, cmd(0x%02x), cdb_len(%d)",
- scsi_request->CDB.CDB32[0],
- le16_to_cpu(scsi_request->IoFlags) & 0xF);
- desc = ioc->tmp_string;
- break;
- }
- case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL:
- desc = "sas_iounit_cntl";
- break;
- case LEAPIORAID_FUNC_SATA_PASSTHROUGH:
- desc = "sata_pass";
- break;
- case LEAPIORAID_FUNC_SMP_PASSTHROUGH:
- desc = "smp_passthrough";
- break;
- }
- if (!desc)
- return;
- pr_info("%s %s: %s, smid(%d)\n",
- ioc->name, calling_function_name, desc, smid);
- if (!mpi_reply)
- return;
- if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
- pr_info(
- "%s \tiocstatus(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
- if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST ||
- mpi_request->Function ==
- LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH) {
- struct LeapioraidSCSIIORep_t *scsi_reply =
- (struct LeapioraidSCSIIORep_t *) mpi_reply;
- struct leapioraid_sas_device *sas_device = NULL;
-
- sas_device = leapioraid_get_sdev_by_handle(ioc,
- le16_to_cpu(scsi_reply->DevHandle));
- if (sas_device) {
- pr_info("%s \tsas_address(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->sas_address, sas_device->phy);
- if (sas_device->enclosure_handle != 0)
- pr_info(
- "%s \tenclosure_logical_id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- leapioraid_sas_device_put(sas_device);
- }
- if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
- pr_info(
- "%s \tscsi_state(0x%02x), scsi_status (0x%02x)\n",
- ioc->name, scsi_reply->SCSIState, scsi_reply->SCSIStatus);
- }
-}
-
-u8
-leapioraid_ctl_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index,
- u32 reply)
-{
- struct LeapioraidDefaultRep_t *mpi_reply;
- struct LeapioraidSCSIIORep_t *scsiio_reply;
- const void *sense_data;
- u32 sz;
-
- if (ioc->ctl_cmds.status == LEAPIORAID_CMD_NOT_USED)
- return 1;
- if (ioc->ctl_cmds.smid != smid)
- return 1;
- ioc->ctl_cmds.status |= LEAPIORAID_CMD_COMPLETE;
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (mpi_reply) {
- memcpy(ioc->ctl_cmds.reply, mpi_reply,
- mpi_reply->MsgLength * 4);
- ioc->ctl_cmds.status |= LEAPIORAID_CMD_REPLY_VALID;
- if (mpi_reply->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST ||
- mpi_reply->Function ==
- LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH) {
- scsiio_reply = (struct LeapioraidSCSIIORep_t *) mpi_reply;
- if (scsiio_reply->SCSIState &
- LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) {
- sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
- le32_to_cpu(scsiio_reply->SenseCount));
- sense_data =
- leapioraid_base_get_sense_buffer(ioc, smid);
- memcpy(ioc->ctl_cmds.sense, sense_data, sz);
- }
- }
- }
- leapioraid_ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
- ioc->ctl_cmds.status &= ~LEAPIORAID_CMD_PENDING;
- complete(&ioc->ctl_cmds.done);
- return 1;
-}
-
-static int leapioraid_ctl_check_event_type(
- struct LEAPIORAID_ADAPTER *ioc, u16 event)
-{
- u16 i;
- u32 desired_event;
-
- if (event >= 128 || !event || !ioc->event_log)
- return 0;
- desired_event = (1 << (event % 32));
- if (!desired_event)
- desired_event = 1;
- i = event / 32;
- return desired_event & ioc->event_type[i];
-}
-
-void
-leapioraid_ctl_add_to_event_log(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventNotificationRep_t *mpi_reply)
-{
- struct LEAPIORAID_IOCTL_EVENTS *event_log;
- u16 event;
- int i;
- u32 sz, event_data_sz;
- u8 send_aen = 0;
-
- if (!ioc->event_log)
- return;
- event = le16_to_cpu(mpi_reply->Event);
- if (leapioraid_ctl_check_event_type(ioc, event)) {
- i = ioc->event_context % LEAPIORAID_CTL_EVENT_LOG_SIZE;
- event_log = ioc->event_log;
- event_log[i].event = event;
- event_log[i].context = ioc->event_context++;
- event_data_sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
- sz = min_t(u32, event_data_sz, LEAPIORAID_EVENT_DATA_SIZE);
- memset(event_log[i].data, 0, LEAPIORAID_EVENT_DATA_SIZE);
- memcpy(event_log[i].data, mpi_reply->EventData, sz);
- send_aen = 1;
- }
- if (event == LEAPIORAID_EVENT_LOG_ENTRY_ADDED ||
- (send_aen && !ioc->aen_event_read_flag)) {
- ioc->aen_event_read_flag = 1;
- wake_up_interruptible(&leapioraid_ctl_poll_wait);
- if (leapioraid_async_queue)
- kill_fasync(&leapioraid_async_queue, SIGIO, POLL_IN);
- }
-}
-
-u8
-leapioraid_ctl_event_callback(
- struct LEAPIORAID_ADAPTER *ioc, u8 msix_index,
- u32 reply)
-{
- struct LeapioraidEventNotificationRep_t *mpi_reply;
-
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (mpi_reply)
- leapioraid_ctl_add_to_event_log(ioc, mpi_reply);
- return 1;
-}
-
-static int
-leapioraid_ctl_verify_adapter(
- int ioc_number, struct LEAPIORAID_ADAPTER **iocpp)
-{
- struct LEAPIORAID_ADAPTER *ioc;
-
- spin_lock(&leapioraid_gioc_lock);
- list_for_each_entry(ioc, &leapioraid_ioc_list, list) {
- if (ioc->id != ioc_number)
- continue;
- spin_unlock(&leapioraid_gioc_lock);
- *iocpp = ioc;
- return ioc_number;
- }
- spin_unlock(&leapioraid_gioc_lock);
- *iocpp = NULL;
- return -1;
-}
-
-void
-leapioraid_ctl_clear_outstanding_ioctls(struct LEAPIORAID_ADAPTER *ioc)
-{
- if (ioc->ctl_cmds.status & LEAPIORAID_CMD_PENDING) {
- ioc->ctl_cmds.status |= LEAPIORAID_CMD_RESET;
- leapioraid_base_free_smid(ioc, ioc->ctl_cmds.smid);
- complete(&ioc->ctl_cmds.done);
- }
-}
-
-void
-leapioraid_ctl_reset_handler(struct LEAPIORAID_ADAPTER *ioc, int reset_phase)
-{
- switch (reset_phase) {
- case LEAPIORAID_IOC_PRE_RESET_PHASE:
- dtmprintk(ioc, pr_info(
- "%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", ioc->name,
- __func__));
- break;
- case LEAPIORAID_IOC_AFTER_RESET_PHASE:
- dtmprintk(ioc, pr_info(
- "%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", ioc->name,
- __func__));
- leapioraid_ctl_clear_outstanding_ioctls(ioc);
- break;
- case LEAPIORAID_IOC_DONE_RESET_PHASE:
- dtmprintk(ioc, pr_info(
- "%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", ioc->name,
- __func__));
- break;
- }
-}
-
-static int
-leapioraid_ctl_fasync(int fd, struct file *filep, int mode)
-{
- return fasync_helper(fd, filep, mode, &leapioraid_async_queue);
-}
-
-int
-leapioraid_ctl_release(struct inode *inode, struct file *filep)
-{
- return fasync_helper(-1, filep, 0, &leapioraid_async_queue);
-}
-
-static unsigned int
-leapioraid_ctl_poll(struct file *filep, poll_table *wait)
-{
- struct LEAPIORAID_ADAPTER *ioc;
-
- poll_wait(filep, &leapioraid_ctl_poll_wait, wait);
- spin_lock(&leapioraid_gioc_lock);
- list_for_each_entry(ioc, &leapioraid_ioc_list, list) {
- if (ioc->aen_event_read_flag) {
- spin_unlock(&leapioraid_gioc_lock);
- return POLLIN | POLLRDNORM;
- }
- }
- spin_unlock(&leapioraid_gioc_lock);
- return 0;
-}
-
-static int
-leapioraid_ctl_set_task_mid(struct LEAPIORAID_ADAPTER *ioc,
- struct leapio_ioctl_command *karg,
- struct LeapioraidSCSITmgReq_t *tm_request)
-{
- u8 found = 0;
- u16 smid;
- u16 handle;
- struct scsi_cmnd *scmd;
- struct LEAPIORAID_DEVICE *priv_data;
- struct LeapioraidSCSITmgRep_t *tm_reply;
- u32 sz;
- u32 lun;
- char *desc = NULL;
- struct leapioraid_scsiio_tracker *st = NULL;
-
- if (tm_request->TaskType == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
- desc = "abort_task";
- else if (tm_request->TaskType ==
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
- desc = "query_task";
- else
- return 0;
- lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
- handle = le16_to_cpu(tm_request->DevHandle);
- for (smid = ioc->shost->can_queue; smid && !found; smid--) {
- scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid);
- if (scmd == NULL || scmd->device == NULL ||
- scmd->device->hostdata == NULL)
- continue;
- if (lun != scmd->device->lun)
- continue;
- priv_data = scmd->device->hostdata;
- if (priv_data->sas_target == NULL)
- continue;
- if (priv_data->sas_target->handle != handle)
- continue;
- st = leapioraid_base_scsi_cmd_priv(scmd);
- if ((!st) || (st->smid == 0))
- continue;
- if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
- tm_request->TaskMID = cpu_to_le16(st->smid);
- found = 1;
- }
- }
- if (!found) {
- dctlprintk(ioc, pr_info(
- "%s %s: handle(0x%04x), lun(%d), no active mid!!\n",
- ioc->name, desc,
- le16_to_cpu(tm_request->DevHandle),
- lun));
- tm_reply = ioc->ctl_cmds.reply;
- tm_reply->DevHandle = tm_request->DevHandle;
- tm_reply->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT;
- tm_reply->TaskType = tm_request->TaskType;
- tm_reply->MsgLength =
- sizeof(struct LeapioraidSCSITmgRep_t) / 4;
- tm_reply->VP_ID = tm_request->VP_ID;
- tm_reply->VF_ID = tm_request->VF_ID;
- sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
- if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
- sz))
- pr_err("failure at %s:%d/%s()!\n", __FILE__,
- __LINE__, __func__);
- return 1;
- }
- dctlprintk(ioc, pr_info(
- "%s %s: handle(0x%04x), lun(%d), task_mid(%d)\n",
- ioc->name, desc,
- le16_to_cpu(tm_request->DevHandle), lun,
- le16_to_cpu(tm_request->TaskMID)));
- return 0;
-}
-
-static long
-leapioraid_ctl_do_command(struct LEAPIORAID_ADAPTER *ioc,
- struct leapio_ioctl_command karg, void __user *mf)
-{
- struct LeapioraidReqHeader_t *mpi_request = NULL, *request;
- struct LeapioraidDefaultRep_t *mpi_reply;
- u16 smid;
- unsigned long timeout;
- u8 issue_reset;
- u32 sz, sz_arg;
- void *psge;
- void *data_out = NULL;
- dma_addr_t data_out_dma = 0;
- size_t data_out_sz = 0;
- void *data_in = NULL;
- dma_addr_t data_in_dma = 0;
- size_t data_in_sz = 0;
- long ret;
- u16 device_handle = LEAPIORAID_INVALID_DEVICE_HANDLE;
-
- issue_reset = 0;
- if (ioc->ctl_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: ctl_cmd in use\n",
- ioc->name, __func__);
- ret = -EAGAIN;
- goto out;
- }
- ret = leapioraid_wait_for_ioc_to_operational(ioc, 10);
- if (ret)
- goto out;
- mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
- if (!mpi_request) {
- ret = -ENOMEM;
- goto out;
- }
- if (karg.data_sge_offset * 4 > ioc->request_sz ||
- karg.data_sge_offset > (UINT_MAX / 4)) {
- ret = -EINVAL;
- goto out;
- }
- if (copy_from_user(mpi_request, mf, karg.data_sge_offset * 4)) {
- pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
- __func__);
- ret = -EFAULT;
- goto out;
- }
- if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT) {
- smid = leapioraid_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
- if (!smid) {
- pr_err(
- "%s %s: failed obtaining a smid\n", ioc->name,
- __func__);
- ret = -EAGAIN;
- goto out;
- }
- } else {
- smid = ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL;
- }
- ret = 0;
- ioc->ctl_cmds.status = LEAPIORAID_CMD_PENDING;
- memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
- request = leapioraid_base_get_msg_frame(ioc, smid);
- memset(request, 0, ioc->request_sz);
- memcpy(request, mpi_request, karg.data_sge_offset * 4);
- ioc->ctl_cmds.smid = smid;
- data_out_sz = karg.data_out_size;
- data_in_sz = karg.data_in_size;
- if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST ||
- mpi_request->Function == LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH
- || mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT
- || mpi_request->Function == LEAPIORAID_FUNC_SATA_PASSTHROUGH) {
- device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
- if (!device_handle || (device_handle > ioc->facts.MaxDevHandle)) {
- ret = -EINVAL;
- leapioraid_base_free_smid(ioc, smid);
- goto out;
- }
- }
- if (data_out_sz) {
- data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
- &data_out_dma, GFP_ATOMIC);
- if (!data_out) {
- ret = -ENOMEM;
- leapioraid_base_free_smid(ioc, smid);
- goto out;
- }
- if (copy_from_user(data_out, karg.data_out_buf_ptr,
- data_out_sz)) {
- pr_err("failure at %s:%d/%s()!\n", __FILE__,
- __LINE__, __func__);
- ret = -EFAULT;
- leapioraid_base_free_smid(ioc, smid);
- goto out;
- }
- }
- if (data_in_sz) {
- data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
- &data_in_dma, GFP_ATOMIC);
- if (!data_in) {
- ret = -ENOMEM;
- leapioraid_base_free_smid(ioc, smid);
- goto out;
- }
- }
- psge = (void *)request + (karg.data_sge_offset * 4);
- leapioraid_ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
- init_completion(&ioc->ctl_cmds.done);
- switch (mpi_request->Function) {
- case LEAPIORAID_FUNC_SCSI_IO_REQUEST:
- case LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH:
- {
- struct LeapioSCSIIOReq_t *scsiio_request =
- (struct LeapioSCSIIOReq_t *) request;
- scsiio_request->SenseBufferLength =
- SCSI_SENSE_BUFFERSIZE;
- scsiio_request->SenseBufferLowAddress =
- leapioraid_base_get_sense_buffer_dma(ioc, smid);
- memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
- ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
- data_in_dma, data_in_sz);
- if (test_bit
- (device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc,
- pr_info(
- "%s handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
- leapioraid_base_free_smid(ioc, smid);
- ret = -EINVAL;
- goto out;
- }
- if (mpi_request->Function ==
- LEAPIORAID_FUNC_SCSI_IO_REQUEST)
- ioc->put_smid_scsi_io(ioc, smid, device_handle);
- else
- ioc->put_smid_default(ioc, smid);
- break;
- }
- case LEAPIORAID_FUNC_SCSI_TASK_MGMT:
- {
- struct LeapioraidSCSITmgReq_t *tm_request =
- (struct LeapioraidSCSITmgReq_t *) request;
- dtmprintk(ioc,
- pr_info("%s TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
- ioc->name,
- le16_to_cpu(tm_request->DevHandle),
- tm_request->TaskType));
- ioc->got_task_abort_from_ioctl = 1;
- if (tm_request->TaskType ==
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
- tm_request->TaskType ==
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
- if (leapioraid_ctl_set_task_mid(ioc, &karg, tm_request)) {
- leapioraid_base_free_smid(ioc, smid);
- ioc->got_task_abort_from_ioctl = 0;
- goto out;
- }
- }
- ioc->got_task_abort_from_ioctl = 0;
- if (test_bit
- (device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc,
- pr_info(
- "%s handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
- leapioraid_base_free_smid(ioc, smid);
- ret = -EINVAL;
- goto out;
- }
- leapioraid_scsihost_set_tm_flag(ioc,
- le16_to_cpu(tm_request->DevHandle));
- ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
- data_in_dma, data_in_sz);
- ioc->put_smid_hi_priority(ioc, smid, 0);
- break;
- }
- case LEAPIORAID_FUNC_SMP_PASSTHROUGH:
- {
- struct LeapioraidSmpPassthroughReq_t *smp_request =
- (struct LeapioraidSmpPassthroughReq_t *) mpi_request;
- u8 *data;
-
- if (!ioc->multipath_on_hba)
- smp_request->PhysicalPort = 0xFF;
- if (smp_request->PassthroughFlags &
- 0x80)
- data = (u8 *) &smp_request->SGL;
- else {
- if (unlikely(data_out == NULL)) {
- pr_err(
- "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- leapioraid_base_free_smid(ioc, smid);
- ret = -EINVAL;
- goto out;
- }
- data = data_out;
- }
- if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
- ioc->ioc_link_reset_in_progress = 1;
- ioc->ignore_loginfos = 1;
- }
- ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
- data_in_dma, data_in_sz);
- ioc->put_smid_default(ioc, smid);
- break;
- }
- case LEAPIORAID_FUNC_SATA_PASSTHROUGH:
- {
- ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
- data_in_dma, data_in_sz);
- if (test_bit
- (device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc,
- pr_info(
- "%s handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
- leapioraid_base_free_smid(ioc, smid);
- ret = -EINVAL;
- goto out;
- }
- ioc->put_smid_default(ioc, smid);
- break;
- }
- case LEAPIORAID_FUNC_FW_DOWNLOAD:
- case LEAPIORAID_FUNC_FW_UPLOAD:
- {
- ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
- data_in_dma, data_in_sz);
- ioc->put_smid_default(ioc, smid);
- break;
- }
- case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL:
- {
- struct LeapioraidSasIoUnitControlReq_t *sasiounit_request =
- (struct LeapioraidSasIoUnitControlReq_t *) mpi_request;
- if (sasiounit_request->Operation ==
- LEAPIORAID_SAS_OP_PHY_HARD_RESET
- || sasiounit_request->Operation ==
- LEAPIORAID_SAS_OP_PHY_LINK_RESET) {
- ioc->ioc_link_reset_in_progress = 1;
- ioc->ignore_loginfos = 1;
- }
- }
- fallthrough;
- default:
- ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
- data_in_dma, data_in_sz);
- ioc->put_smid_default(ioc, smid);
- break;
- }
- timeout = karg.timeout;
- if (timeout < LEAPIORAID_IOCTL_DEFAULT_TIMEOUT)
- timeout = LEAPIORAID_IOCTL_DEFAULT_TIMEOUT;
- wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout * HZ);
- if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT) {
- struct LeapioraidSCSITmgReq_t *tm_request =
- (struct LeapioraidSCSITmgReq_t *) mpi_request;
- leapioraid_scsihost_clear_tm_flag(ioc,
- le16_to_cpu(tm_request->DevHandle));
- } else if ((mpi_request->Function == LEAPIORAID_FUNC_SMP_PASSTHROUGH
- || mpi_request->Function ==
- LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL)
- && ioc->ioc_link_reset_in_progress) {
- ioc->ioc_link_reset_in_progress = 0;
- ioc->ignore_loginfos = 0;
- }
- if (!(ioc->ctl_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- leapioraid_check_cmd_timeout(ioc,
- ioc->ctl_cmds.status, mpi_request,
- karg.data_sge_offset, issue_reset);
- goto issue_host_reset;
- }
- mpi_reply = ioc->ctl_cmds.reply;
- if (mpi_reply->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT &&
- (ioc->logging_level & LEAPIORAID_DEBUG_TM)) {
- struct LeapioraidSCSITmgRep_t *tm_reply =
- (struct LeapioraidSCSITmgRep_t *) mpi_reply;
- pr_info(
- "%s TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
- ioc->name,
- le16_to_cpu(tm_reply->IOCStatus),
- le32_to_cpu(tm_reply->IOCLogInfo),
- le32_to_cpu(tm_reply->TerminationCount));
- }
- if (data_in_sz) {
- if (copy_to_user(karg.data_in_buf_ptr, data_in, data_in_sz)) {
- pr_err("failure at %s:%d/%s()!\n", __FILE__,
- __LINE__, __func__);
- ret = -ENODATA;
- goto out;
- }
- }
- if (karg.max_reply_bytes) {
- sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
- if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
- sz)) {
- pr_err("failure at %s:%d/%s()!\n", __FILE__,
- __LINE__, __func__);
- ret = -ENODATA;
- goto out;
- }
- }
- if (karg.max_sense_bytes && (mpi_request->Function ==
- LEAPIORAID_FUNC_SCSI_IO_REQUEST
- || mpi_request->Function ==
- LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH)) {
- if (karg.sense_data_ptr == NULL) {
- pr_err(
- "%s Response buffer provided by application is NULL; Response data will not be returned.\n",
- ioc->name);
- goto out;
- }
- sz_arg = SCSI_SENSE_BUFFERSIZE;
- sz = min_t(u32, karg.max_sense_bytes, sz_arg);
- if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, sz)) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- ret = -ENODATA;
- goto out;
- }
- }
-issue_host_reset:
- if (issue_reset) {
- ret = -ENODATA;
- if ((mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST
- || mpi_request->Function ==
- LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH
- || mpi_request->Function ==
- LEAPIORAID_FUNC_SATA_PASSTHROUGH)) {
- pr_err(
- "%s issue target reset: handle = (0x%04x)\n",
- ioc->name,
- le16_to_cpu(mpi_request->FunctionDependent1));
- leapioraid_halt_firmware(ioc, 0);
- leapioraid_scsihost_issue_locked_tm(ioc,
- le16_to_cpu
- (mpi_request->FunctionDependent1),
- 0, 0, 0,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
- smid, 30,
- LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
- } else
- leapioraid_base_hard_reset_handler(ioc,
- FORCE_BIG_HAMMER);
- }
-out:
- if (data_in)
- dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
- data_in_dma);
- if (data_out)
- dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
- data_out_dma);
- kfree(mpi_request);
- ioc->ctl_cmds.status = LEAPIORAID_CMD_NOT_USED;
- return ret;
-}
-
-static long
-leapioraid_ctl_getiocinfo(
- struct LEAPIORAID_ADAPTER *ioc, void __user *arg)
-{
- struct leapio_ioctl_iocinfo karg;
- u8 revision;
-
- dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name,
- __func__));
- memset(&karg, 0, sizeof(karg));
- if (ioc->pfacts)
- karg.port_number = ioc->pfacts[0].PortNumber;
- pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
- karg.hw_rev = revision;
- karg.pci_id = ioc->pdev->device;
- karg.subsystem_device = ioc->pdev->subsystem_device;
- karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
- karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
- karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
- karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
- karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
- karg.firmware_version = ioc->facts.FWVersion.Word;
- strscpy(karg.driver_version, ioc->driver_name, sizeof(karg.driver_version));
- strcat(karg.driver_version, "-");
- karg.adapter_type = 0x06;
- strcat(karg.driver_version, LEAPIORAID_DRIVER_VERSION);
- karg.adapter_type = 0x07;
- karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
- if (copy_to_user(arg, &karg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- return 0;
-}
-
-static long
-leapioraid_ctl_eventquery(
- struct LEAPIORAID_ADAPTER *ioc, void __user *arg)
-{
- struct leapio_ioctl_eventquery karg;
-
- if (copy_from_user(&karg, arg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name,
- __func__));
- karg.event_entries = LEAPIORAID_CTL_EVENT_LOG_SIZE;
- memcpy(karg.event_types, ioc->event_type,
- LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
- if (copy_to_user(arg, &karg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- return 0;
-}
-
-static long
-leapioraid_ctl_eventenable(
- struct LEAPIORAID_ADAPTER *ioc, void __user *arg)
-{
- struct leapio_ioctl_eventenable karg;
-
- if (copy_from_user(&karg, arg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name,
- __func__));
- memcpy(ioc->event_type, karg.event_types,
- LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
- leapioraid_base_validate_event_type(ioc, ioc->event_type);
- if (ioc->event_log)
- return 0;
- ioc->event_context = 0;
- ioc->aen_event_read_flag = 0;
- ioc->event_log = kcalloc(LEAPIORAID_CTL_EVENT_LOG_SIZE,
- sizeof(struct LEAPIORAID_IOCTL_EVENTS),
- GFP_KERNEL);
- if (!ioc->event_log) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -ENOMEM;
- }
- return 0;
-}
-
-static long
-leapioraid_ctl_eventreport(
- struct LEAPIORAID_ADAPTER *ioc, void __user *arg)
-{
- struct leapio_ioctl_eventreport karg;
- u32 number_bytes, max_events, max;
- struct leapio_ioctl_eventreport __user *uarg = arg;
-
- if (copy_from_user(&karg, arg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name,
- __func__));
- number_bytes = karg.hdr.max_data_size -
- sizeof(struct leapio_ioctl_header);
- max_events = number_bytes / sizeof(struct LEAPIORAID_IOCTL_EVENTS);
- max = min_t(u32, LEAPIORAID_CTL_EVENT_LOG_SIZE, max_events);
- if (!max || !ioc->event_log)
- return -ENODATA;
- number_bytes = max * sizeof(struct LEAPIORAID_IOCTL_EVENTS);
- if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- ioc->aen_event_read_flag = 0;
- return 0;
-}
-
-static long
-leapioraid_ctl_do_reset(
- struct LEAPIORAID_ADAPTER *ioc, void __user *arg)
-{
- struct leapio_ioctl_diag_reset karg;
- int retval;
-
- if (copy_from_user(&karg, arg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- if (ioc->shost_recovery ||
- ioc->pci_error_recovery || ioc->is_driver_loading ||
- ioc->remove_host)
- return -EAGAIN;
- dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name,
- __func__));
- ioc->reset_from_user = 1;
- scsi_block_requests(ioc->shost);
- retval = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- scsi_unblock_requests(ioc->shost);
- pr_info("%s ioctl: host reset: %s\n",
- ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
- return 0;
-}
-
-static int
-leapioraid_ctl_btdh_search_sas_device(struct LEAPIORAID_ADAPTER *ioc,
- struct leapio_ioctl_btdh_mapping *btdh)
-{
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
- int rc = 0;
-
- if (list_empty(&ioc->sas_device_list))
- return rc;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
- btdh->handle == sas_device->handle) {
- btdh->bus = sas_device->channel;
- btdh->id = sas_device->id;
- rc = 1;
- goto out;
- } else if (btdh->bus == sas_device->channel && btdh->id ==
- sas_device->id && btdh->handle == 0xFFFF) {
- btdh->handle = sas_device->handle;
- rc = 1;
- goto out;
- }
- }
-out:
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return rc;
-}
-
-static int
-leapioraid_ctl_btdh_search_raid_device(struct LEAPIORAID_ADAPTER *ioc,
- struct leapio_ioctl_btdh_mapping *btdh)
-{
- struct leapioraid_raid_device *raid_device;
- unsigned long flags;
- int rc = 0;
-
- if (list_empty(&ioc->raid_device_list))
- return rc;
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
- if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
- btdh->handle == raid_device->handle) {
- btdh->bus = raid_device->channel;
- btdh->id = raid_device->id;
- rc = 1;
- goto out;
- } else if (btdh->bus == raid_device->channel && btdh->id ==
- raid_device->id && btdh->handle == 0xFFFF) {
- btdh->handle = raid_device->handle;
- rc = 1;
- goto out;
- }
- }
-out:
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- return rc;
-}
-
-static long
-leapioraid_ctl_btdh_mapping(
- struct LEAPIORAID_ADAPTER *ioc, void __user *arg)
-{
- struct leapio_ioctl_btdh_mapping karg;
- int rc;
-
- if (copy_from_user(&karg, arg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- dctlprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- rc = leapioraid_ctl_btdh_search_sas_device(ioc, &karg);
- if (!rc)
- leapioraid_ctl_btdh_search_raid_device(ioc, &karg);
- if (copy_to_user(arg, &karg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- return 0;
-}
-
-#ifdef CONFIG_COMPAT
-static long
-leapioraid_ctl_compat_command(
- struct LEAPIORAID_ADAPTER *ioc, unsigned int cmd,
- void __user *arg)
-{
- struct leapio_ioctl_command32 karg32;
- struct leapio_ioctl_command32 __user *uarg;
- struct leapio_ioctl_command karg;
-
- if (_IOC_SIZE(cmd) != sizeof(struct leapio_ioctl_command32))
- return -EINVAL;
- uarg = (struct leapio_ioctl_command32 __user *)arg;
- if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- memset(&karg, 0, sizeof(struct leapio_ioctl_command));
- karg.hdr.ioc_number = karg32.hdr.ioc_number;
- karg.hdr.port_number = karg32.hdr.port_number;
- karg.hdr.max_data_size = karg32.hdr.max_data_size;
- karg.timeout = karg32.timeout;
- karg.max_reply_bytes = karg32.max_reply_bytes;
- karg.data_in_size = karg32.data_in_size;
- karg.data_out_size = karg32.data_out_size;
- karg.max_sense_bytes = karg32.max_sense_bytes;
- karg.data_sge_offset = karg32.data_sge_offset;
- karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
- karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
- karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
- karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
- return leapioraid_ctl_do_command(ioc, karg, &uarg->mf);
-}
-#endif
-
-static long
-leapioraid_ctl_ioctl_main(
- struct file *file, unsigned int cmd, void __user *arg,
- u8 compat)
-{
- struct LEAPIORAID_ADAPTER *ioc;
- struct leapio_ioctl_header ioctl_header;
- enum leapioraid_block_state state;
- long ret = -ENOIOCTLCMD;
-
- if (copy_from_user(&ioctl_header, (char __user *)arg,
- sizeof(struct leapio_ioctl_header))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
- if (leapioraid_ctl_verify_adapter(ioctl_header.ioc_number,
- &ioc) == -1 || !ioc)
- return -ENODEV;
- mutex_lock(&ioc->pci_access_mutex);
- if (ioc->shost_recovery ||
- ioc->pci_error_recovery || ioc->is_driver_loading ||
- ioc->remove_host) {
- ret = -EAGAIN;
- goto unlock_pci_access;
- }
- state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
- if (state == NON_BLOCKING) {
- if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
- ret = -EAGAIN;
- goto unlock_pci_access;
- }
- } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
- ret = -ERESTARTSYS;
- goto unlock_pci_access;
- }
- switch (cmd) {
- case LEAPIORAID_IOCINFO:
- if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_iocinfo))
- ret = leapioraid_ctl_getiocinfo(ioc, arg);
- break;
-#ifdef CONFIG_COMPAT
- case LEAPIORAID_COMMAND32:
-#endif
- case LEAPIORAID_COMMAND:
- {
- struct leapio_ioctl_command __user *uarg;
- struct leapio_ioctl_command karg;
-
-#ifdef CONFIG_COMPAT
- if (compat) {
- ret =
- leapioraid_ctl_compat_command(ioc, cmd, arg);
- break;
- }
-#endif
- if (copy_from_user(&karg, arg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- ret = -EFAULT;
- break;
- }
- if (karg.hdr.ioc_number != ioctl_header.ioc_number) {
- ret = -EINVAL;
- break;
- }
- if (_IOC_SIZE(cmd) ==
- sizeof(struct leapio_ioctl_command)) {
- uarg = arg;
- ret =
- leapioraid_ctl_do_command(ioc, karg,
- &uarg->mf);
- }
- break;
- }
- case LEAPIORAID_EVENTQUERY:
- if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_eventquery))
- ret = leapioraid_ctl_eventquery(ioc, arg);
- break;
- case LEAPIORAID_EVENTENABLE:
- if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_eventenable))
- ret = leapioraid_ctl_eventenable(ioc, arg);
- break;
- case LEAPIORAID_EVENTREPORT:
- ret = leapioraid_ctl_eventreport(ioc, arg);
- break;
- case LEAPIORAID_HARDRESET:
- if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_diag_reset))
- ret = leapioraid_ctl_do_reset(ioc, arg);
- break;
- case LEAPIORAID_BTDHMAPPING:
- if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_btdh_mapping))
- ret = leapioraid_ctl_btdh_mapping(ioc, arg);
- break;
- default:
- dctlprintk(ioc, pr_err(
- "%s unsupported ioctl opcode(0x%08x)\n",
- ioc->name, cmd));
- break;
- }
- mutex_unlock(&ioc->ctl_cmds.mutex);
-unlock_pci_access:
- mutex_unlock(&ioc->pci_access_mutex);
- return ret;
-}
-
-static long
-leapioraid_ctl_ioctl(
- struct file *file, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- ret = leapioraid_ctl_ioctl_main(file, cmd, (void __user *)arg, 0);
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long
-leapioraid_ctl_ioctl_compat(
- struct file *file, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- ret = leapioraid_ctl_ioctl_main(file, cmd, (void __user *)arg, 1);
- return ret;
-}
-#endif
-
-static ssize_t
-version_fw_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
- (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
- (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
- (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
- ioc->facts.FWVersion.Word & 0x000000FF);
-}
-static DEVICE_ATTR_RO(version_fw);
-
-static ssize_t
-version_bios_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
- u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
-
- return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
- (version & 0xFF000000) >> 24,
- (version & 0x00FF0000) >> 16,
- (version & 0x0000FF00) >> 8, version & 0x000000FF);
-}
-static DEVICE_ATTR_RO(version_bios);
-
-static ssize_t
-version_leapioraid_show(struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
- ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
-}
-static DEVICE_ATTR_RO(version_leapioraid);
-
-static ssize_t
-version_product_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
-}
-static DEVICE_ATTR_RO(version_product);
-
-static ssize_t
-version_nvdata_persistent_show(struct device *cdev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%08xh\n",
- le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
-}
-static DEVICE_ATTR_RO(version_nvdata_persistent);
-
-static ssize_t
-version_nvdata_default_show(struct device *cdev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%08xh\n",
- le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
-}
-static DEVICE_ATTR_RO(version_nvdata_default);
-
-static ssize_t
-board_name_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
-}
-static DEVICE_ATTR_RO(board_name);
-
-static ssize_t
-board_assembly_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
-}
-static DEVICE_ATTR_RO(board_assembly);
-
-static ssize_t
-board_tracer_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
-}
-static DEVICE_ATTR_RO(board_tracer);
-
-static ssize_t
-io_delay_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
-}
-static DEVICE_ATTR_RO(io_delay);
-
-static ssize_t
-device_delay_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
-}
-static DEVICE_ATTR_RO(device_delay);
-
-static ssize_t
-fw_queue_depth_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
-}
-static DEVICE_ATTR_RO(fw_queue_depth);
-
-static ssize_t
-host_sas_address_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
- (unsigned long long)ioc->sas_hba.sas_address);
-}
-static DEVICE_ATTR_RO(host_sas_address);
-
-static ssize_t
-logging_level_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
-}
-
-static ssize_t
-logging_level_store(
- struct device *cdev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
- int val = 0;
-
- if (kstrtoint(buf, 0, &val))
- return -EINVAL;
- ioc->logging_level = val;
- pr_info("%s logging_level=%08xh\n", ioc->name,
- ioc->logging_level);
- return strlen(buf);
-}
-static DEVICE_ATTR_RW(logging_level);
-
-static ssize_t
-fwfault_debug_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
-}
-
-static ssize_t
-fwfault_debug_store(
- struct device *cdev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
- int val = 0;
-
- if (kstrtoint(buf, 0, &val))
- return -EINVAL;
- ioc->fwfault_debug = val;
- pr_info("%s fwfault_debug=%d\n", ioc->name,
- ioc->fwfault_debug);
- return strlen(buf);
-}
-static DEVICE_ATTR_RW(fwfault_debug);
-
-static
-struct leapioraid_raid_device *leapioraid_ctl_raid_device_find_by_handle(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_raid_device *raid_device, *r;
-
- r = NULL;
- list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
- if (raid_device->handle != handle)
- continue;
- r = raid_device;
- goto out;
- }
-out:
- return r;
-}
-
-u8
-leapioraid_ctl_tm_done(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index,
- u32 reply)
-{
- u8 rc;
- unsigned long flags;
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_raid_device *raid_device;
- u16 smid_task_abort;
- u16 handle;
- struct LeapioraidSCSITmgReq_t *mpi_request;
- struct LeapioraidSCSITmgRep_t *mpi_reply =
- leapioraid_base_get_reply_virt_addr(ioc, reply);
-
- rc = 1;
- if (unlikely(!mpi_reply)) {
- pr_err(
- "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- return rc;
- }
- handle = le16_to_cpu(mpi_reply->DevHandle);
- sas_device = leapioraid_get_sdev_by_handle(ioc, handle);
- if (sas_device) {
- smid_task_abort = 0;
- if (mpi_reply->TaskType ==
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- smid_task_abort = le16_to_cpu(mpi_request->TaskMID);
- }
- pr_info("\tcomplete: sas_addr(0x%016llx), handle(0x%04x), smid(%d), term(%d)\n",
- (unsigned long long)sas_device->sas_address, handle,
- (smid_task_abort ? smid_task_abort : smid),
- le32_to_cpu(mpi_reply->TerminationCount));
- leapioraid_sas_device_put(sas_device);
- }
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_ctl_raid_device_find_by_handle(ioc, handle);
- if (raid_device)
- pr_info("\tcomplete: wwid(0x%016llx), handle(0x%04x), smid(%d), term(%d)\n",
- (unsigned long long)raid_device->wwid, handle,
- smid, le32_to_cpu(mpi_reply->TerminationCount));
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- ioc->terminated_tm_count += le32_to_cpu(mpi_reply->TerminationCount);
- if (ioc->out_of_frames) {
- rc = 0;
- leapioraid_base_free_smid(ioc, smid);
- ioc->out_of_frames = 0;
- wake_up(&ioc->no_frames_tm_wq);
- }
- ioc->pending_tm_count--;
- if (!ioc->pending_tm_count)
- wake_up(&ioc->pending_tm_wq);
- return rc;
-}
-
-static void
-leapioraid_ctl_tm_sysfs(struct LEAPIORAID_ADAPTER *ioc, u8 task_type)
-{
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_raid_device *raid_device;
- struct LeapioraidSCSITmgReq_t *mpi_request;
- u16 smid, handle, hpr_smid;
- struct LEAPIORAID_DEVICE *device_priv_data;
- struct LEAPIORAID_TARGET *target_priv_data;
- struct scsi_cmnd *scmd;
- struct scsi_device *sdev;
- unsigned long flags;
- int tm_count;
- int lun;
- u32 doorbell;
- struct leapioraid_scsiio_tracker *st;
- u8 tr_method = 0x00;
-
- if (list_empty(&ioc->sas_device_list))
- return;
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->shost_recovery || ioc->remove_host) {
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- pr_err(
- "%s %s: busy : host reset in progress, try later\n",
- ioc->name, __func__);
- return;
- }
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- scsi_block_requests(ioc->shost);
- init_waitqueue_head(&ioc->pending_tm_wq);
- ioc->ignore_loginfos = 1;
- ioc->pending_tm_count = 0;
- ioc->terminated_tm_count = 0;
- ioc->out_of_frames = 0;
- tm_count = 0;
- switch (task_type) {
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
- for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
- if (list_empty(&ioc->hpr_free_list)) {
- ioc->out_of_frames = 1;
- init_waitqueue_head(&ioc->no_frames_tm_wq);
- wait_event_timeout(ioc->no_frames_tm_wq,
- !ioc->out_of_frames, HZ);
- }
- scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid);
- if (!scmd)
- continue;
- st = leapioraid_base_scsi_cmd_priv(scmd);
- if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0))
- continue;
- lun = scmd->device->lun;
- device_priv_data = scmd->device->hostdata;
- if (!device_priv_data || !device_priv_data->sas_target)
- continue;
- target_priv_data = device_priv_data->sas_target;
- if (!target_priv_data)
- continue;
- if (target_priv_data->flags &
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT ||
- target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)
- continue;
- handle = device_priv_data->sas_target->handle;
- hpr_smid = leapioraid_base_get_smid_hpr(ioc,
- ioc->ctl_tm_cb_idx);
- if (!hpr_smid) {
- pr_err(
- "%s %s: out of hi-priority requests!!\n",
- ioc->name, __func__);
- goto out_of_frames;
- }
- mpi_request =
- leapioraid_base_get_msg_frame(ioc, hpr_smid);
- memset(mpi_request, 0,
- sizeof(struct LeapioraidSCSITmgReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT;
- mpi_request->DevHandle = cpu_to_le16(handle);
- mpi_request->TaskType =
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
- mpi_request->TaskMID = cpu_to_le16(st->smid);
- int_to_scsilun(lun,
- (struct scsi_lun *)mpi_request->LUN);
- starget_printk(KERN_INFO,
- device_priv_data->sas_target->starget,
- "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n",
- (unsigned long long)
- device_priv_data->sas_target->sas_address, handle, st->smid);
- ioc->pending_tm_count++;
- tm_count++;
- doorbell = leapioraid_base_get_iocstate(ioc, 0);
- if ((doorbell &
- LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT
- || (doorbell & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP)
- goto fault_in_progress;
- ioc->put_smid_hi_priority(ioc, hpr_smid, 0);
- }
- break;
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (list_empty(&ioc->hpr_free_list)) {
- spin_unlock_irqrestore(&ioc->sas_device_lock,
- flags);
- ioc->out_of_frames = 1;
- init_waitqueue_head(&ioc->no_frames_tm_wq);
- wait_event_timeout(ioc->no_frames_tm_wq,
- !ioc->out_of_frames, HZ);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- }
- if (!sas_device->starget)
- continue;
- if (test_bit(sas_device->handle, ioc->pd_handles))
- continue;
- hpr_smid = leapioraid_base_get_smid_hpr(ioc,
- ioc->ctl_tm_cb_idx);
- if (!hpr_smid) {
- pr_err(
- "%s %s: out of hi-priority requests!!\n",
- ioc->name, __func__);
- spin_unlock_irqrestore(&ioc->sas_device_lock,
- flags);
- goto out_of_frames;
- }
- mpi_request =
- leapioraid_base_get_msg_frame(ioc, hpr_smid);
- memset(mpi_request, 0,
- sizeof(struct LeapioraidSCSITmgReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT;
- mpi_request->DevHandle =
- cpu_to_le16(sas_device->handle);
- mpi_request->TaskType =
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- starget_printk(KERN_INFO,
- sas_device->starget,
- "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n",
- (unsigned long long)sas_device->sas_address,
- sas_device->handle,
- hpr_smid);
- ioc->pending_tm_count++;
- tm_count++;
- doorbell = leapioraid_base_get_iocstate(ioc, 0);
- if ((doorbell &
- LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT
- || (doorbell & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP) {
- spin_unlock_irqrestore(&ioc->sas_device_lock,
- flags);
- goto fault_in_progress;
- }
- ioc->put_smid_hi_priority(ioc, hpr_smid, 0);
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
- if (list_empty(&ioc->hpr_free_list)) {
- spin_unlock_irqrestore(&ioc->raid_device_lock,
- flags);
- ioc->out_of_frames = 1;
- init_waitqueue_head(&ioc->no_frames_tm_wq);
- wait_event_timeout(ioc->no_frames_tm_wq,
- !ioc->out_of_frames, HZ);
- spin_lock_irqsave(&ioc->raid_device_lock,
- flags);
- }
- if (!raid_device->starget)
- continue;
- hpr_smid = leapioraid_base_get_smid_hpr(ioc,
- ioc->ctl_tm_cb_idx);
- if (!hpr_smid) {
- pr_err("%s %s: out of hi-priority requests!!\n",
- ioc->name, __func__);
- spin_unlock_irqrestore(&ioc->raid_device_lock,
- flags);
- goto out_of_frames;
- }
- mpi_request =
- leapioraid_base_get_msg_frame(ioc, hpr_smid);
- memset(mpi_request, 0,
- sizeof(struct LeapioraidSCSITmgReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT;
- mpi_request->DevHandle =
- cpu_to_le16(raid_device->handle);
- mpi_request->TaskType =
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- starget_printk(KERN_INFO,
- raid_device->starget,
- "sending tm: wwid(0x%016llx), handle(0x%04x), smid(%d)\n",
- (unsigned long long)raid_device->wwid,
- raid_device->handle, hpr_smid);
- ioc->pending_tm_count++;
- tm_count++;
- doorbell = leapioraid_base_get_iocstate(ioc, 0);
- if ((doorbell &
- LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT
- || (doorbell & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP) {
- spin_unlock_irqrestore(&ioc->raid_device_lock,
- flags);
- goto fault_in_progress;
- }
- ioc->put_smid_hi_priority(ioc, hpr_smid, 0);
- }
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- break;
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
- shost_for_each_device(sdev, ioc->shost) {
- if (list_empty(&ioc->hpr_free_list)) {
- ioc->out_of_frames = 1;
- init_waitqueue_head(&ioc->no_frames_tm_wq);
- wait_event_timeout(ioc->no_frames_tm_wq,
- !ioc->out_of_frames, HZ);
- }
- device_priv_data = sdev->hostdata;
- if (!device_priv_data || !device_priv_data->sas_target)
- continue;
- target_priv_data = device_priv_data->sas_target;
- if (!target_priv_data)
- continue;
- if (target_priv_data->flags &
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT)
- continue;
- if ((target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)
- && (task_type ==
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET))
- continue;
- handle = device_priv_data->sas_target->handle;
- hpr_smid = leapioraid_base_get_smid_hpr(ioc,
- ioc->ctl_tm_cb_idx);
- if (!hpr_smid) {
- pr_err("%s %s: out of hi-priority requests!!\n",
- ioc->name, __func__);
- scsi_device_put(sdev);
- goto out_of_frames;
- }
- mpi_request =
- leapioraid_base_get_msg_frame(ioc, hpr_smid);
- memset(mpi_request, 0,
- sizeof(struct LeapioraidSCSITmgReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT;
- mpi_request->DevHandle = cpu_to_le16(handle);
- mpi_request->TaskType = task_type;
- mpi_request->MsgFlags = tr_method;
- int_to_scsilun(sdev->lun, (struct scsi_lun *)
- mpi_request->LUN);
- sdev_printk(KERN_INFO, sdev,
- "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n",
- (unsigned long long)target_priv_data->sas_address,
- handle, hpr_smid);
- ioc->pending_tm_count++;
- tm_count++;
- doorbell = leapioraid_base_get_iocstate(ioc, 0);
- if ((doorbell &
- LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT
- || (doorbell & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP) {
- scsi_device_put(sdev);
- goto fault_in_progress;
- }
- ioc->put_smid_hi_priority(ioc, hpr_smid, 0);
- }
- break;
- }
-out_of_frames:
- if (ioc->pending_tm_count)
- wait_event_timeout(ioc->pending_tm_wq,
- !ioc->pending_tm_count, 30 * HZ);
- pr_info("%s task management requests issued(%d)\n",
- ioc->name, tm_count);
- pr_info("%s number IO terminated(%d)\n",
- ioc->name, ioc->terminated_tm_count);
-fault_in_progress:
- scsi_unblock_requests(ioc->shost);
- ioc->ignore_loginfos = 0;
-}
-
-static ssize_t
-task_management_store(
- struct device *cdev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
- int opcode = 0;
-
- if (kstrtoint(buf, 0, &opcode))
- return -EINVAL;
- switch (opcode) {
- case 1:
- ioc->reset_from_user = 1;
- scsi_block_requests(ioc->shost);
- pr_err("%s sysfs: diag reset issued: %s\n", ioc->name,
- ((!leapioraid_base_hard_reset_handler(ioc,
- FORCE_BIG_HAMMER))
- ? "SUCCESS" : "FAILED"));
- scsi_unblock_requests(ioc->shost);
- break;
- case 2:
- ioc->reset_from_user = 1;
- scsi_block_requests(ioc->shost);
- pr_err("%s sysfs: message unit reset issued: %s\n", ioc->name,
- ((!leapioraid_base_hard_reset_handler(ioc,
- SOFT_RESET)) ?
- "SUCCESS" : "FAILED"));
- scsi_unblock_requests(ioc->shost);
- break;
- case 3:
- pr_err("%s sysfs: TASKTYPE_ABORT_TASK :\n", ioc->name);
- ioc->got_task_abort_from_sysfs = 1;
- leapioraid_ctl_tm_sysfs(ioc,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
- ioc->got_task_abort_from_sysfs = 0;
- break;
- case 4:
- pr_err("%s sysfs: TASKTYPE_TARGET_RESET:\n", ioc->name);
- leapioraid_ctl_tm_sysfs(ioc,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
- break;
- case 5:
- pr_err("%s sysfs: TASKTYPE_LOGICAL_UNIT_RESET:\n", ioc->name);
- leapioraid_ctl_tm_sysfs(ioc,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
- break;
- case 6:
- pr_info("%s sysfs: TASKTYPE_ABRT_TASK_SET\n", ioc->name);
- leapioraid_ctl_tm_sysfs(ioc,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET);
- break;
- default:
- pr_info("%s unsupported opcode(%d)\n",
- ioc->name, opcode);
- break;
- };
- return strlen(buf);
-}
-static DEVICE_ATTR_WO(task_management);
-
-static ssize_t
-ioc_reset_count_show(
- struct device *cdev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
-}
-static DEVICE_ATTR_RO(ioc_reset_count);
-
-static ssize_t
-reply_queue_count_show(struct device *cdev,
- struct device_attribute *attr, char *buf)
-{
- u8 reply_queue_count;
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- if ((ioc->facts.IOCCapabilities &
- LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
- reply_queue_count = ioc->reply_queue_count;
- else
- reply_queue_count = 1;
- return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
-}
-static DEVICE_ATTR_RO(reply_queue_count);
-
-static ssize_t
-drv_support_bitmap_show(struct device *cdev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap);
-}
-static DEVICE_ATTR_RO(drv_support_bitmap);
-
-static ssize_t
-enable_sdev_max_qd_show(struct device *cdev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd);
-}
-
-static ssize_t
-enable_sdev_max_qd_store(struct device *cdev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(cdev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- int val = 0;
- struct scsi_device *sdev;
- struct leapioraid_raid_device *raid_device;
- int qdepth;
-
- if (kstrtoint(buf, 0, &val))
- return -EINVAL;
- switch (val) {
- case 0:
- ioc->enable_sdev_max_qd = 0;
- shost_for_each_device(sdev, ioc->shost) {
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- continue;
- sas_target_priv_data = sas_device_priv_data->sas_target;
- if (!sas_target_priv_data)
- continue;
- if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) {
- raid_device =
- leapioraid_raid_device_find_by_handle(ioc,
- sas_target_priv_data->handle);
- switch (raid_device->volume_type) {
- case LEAPIORAID_RAID_VOL_TYPE_RAID0:
- if (raid_device->device_info &
- LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET)
- qdepth =
- LEAPIORAID_SAS_QUEUE_DEPTH;
- else
- qdepth =
- LEAPIORAID_SATA_QUEUE_DEPTH;
- break;
- case LEAPIORAID_RAID_VOL_TYPE_RAID1E:
- case LEAPIORAID_RAID_VOL_TYPE_RAID1:
- case LEAPIORAID_RAID_VOL_TYPE_RAID10:
- case LEAPIORAID_RAID_VOL_TYPE_UNKNOWN:
- default:
- qdepth = LEAPIORAID_RAID_QUEUE_DEPTH;
- }
- } else
- qdepth =
- (sas_target_priv_data->sas_dev->port_type >
- 1) ? ioc->max_wideport_qd : ioc->max_narrowport_qd;
- leapioraid__scsihost_change_queue_depth(sdev, qdepth);
- }
- break;
- case 1:
- ioc->enable_sdev_max_qd = 1;
- shost_for_each_device(sdev, ioc->shost) {
- leapioraid__scsihost_change_queue_depth(sdev,
- shost->can_queue);
- }
- break;
- default:
- return -EINVAL;
- }
- return strlen(buf);
-}
-static DEVICE_ATTR_RW(enable_sdev_max_qd);
-
-static struct attribute *leapioraid_host_attrs[] = {
- &dev_attr_version_fw.attr,
- &dev_attr_version_bios.attr,
- &dev_attr_version_leapioraid.attr,
- &dev_attr_version_product.attr,
- &dev_attr_version_nvdata_persistent.attr,
- &dev_attr_version_nvdata_default.attr,
- &dev_attr_board_name.attr,
- &dev_attr_board_assembly.attr,
- &dev_attr_board_tracer.attr,
- &dev_attr_io_delay.attr,
- &dev_attr_device_delay.attr,
- &dev_attr_logging_level.attr,
- &dev_attr_fwfault_debug.attr,
- &dev_attr_fw_queue_depth.attr,
- &dev_attr_host_sas_address.attr,
- &dev_attr_task_management.attr,
- &dev_attr_ioc_reset_count.attr,
- &dev_attr_reply_queue_count.attr,
- &dev_attr_drv_support_bitmap.attr,
- &dev_attr_enable_sdev_max_qd.attr,
- NULL,
-};
-
-static const struct attribute_group leapioraid_host_attr_group = {
- .attrs = leapioraid_host_attrs
-};
-
-const struct attribute_group *leapioraid_host_groups[] = {
- &leapioraid_host_attr_group,
- NULL
-};
-
-static ssize_t
-sas_address_show(
- struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata;
-
- return snprintf(
- buf, PAGE_SIZE, "0x%016llx\n",
- (unsigned long long)sas_device_priv_data->sas_target->sas_address);
-}
-static DEVICE_ATTR_RO(sas_address);
-
-static ssize_t
-sas_device_handle_show(
- struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata;
-
- return snprintf(buf, PAGE_SIZE, "0x%04x\n",
- sas_device_priv_data->sas_target->handle);
-}
-static DEVICE_ATTR_RO(sas_device_handle);
-
-static ssize_t
-sas_ncq_prio_enable_show(
- struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata;
-
- return snprintf(buf, PAGE_SIZE, "%d\n",
- sas_device_priv_data->ncq_prio_enable);
-}
-
-static ssize_t
-sas_ncq_prio_enable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata;
- int ncq_prio_enable = 0;
-
- if (kstrtoint(buf, 0, &ncq_prio_enable))
- return -EINVAL;
- if (!leapioraid_scsihost_ncq_prio_supp(sdev))
- return -EINVAL;
- sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
- return strlen(buf);
-}
-static DEVICE_ATTR_RW(sas_ncq_prio_enable);
-
-static struct attribute *leapioraid_dev_attrs[] = {
- &dev_attr_sas_address.attr,
- &dev_attr_sas_device_handle.attr,
- &dev_attr_sas_ncq_prio_enable.attr,
- NULL,
-};
-static const struct attribute_group leapioraid_dev_attr_group = {
- .attrs = leapioraid_dev_attrs
-};
-const struct attribute_group *leapioraid_dev_groups[] = {
- &leapioraid_dev_attr_group,
- NULL
-};
-
-static int my_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct LEAPIORAID_ADAPTER *ioc;
- unsigned long pfn;
- unsigned long length = vma->vm_end - vma->vm_start;
-
- ioc = list_first_entry(&leapioraid_ioc_list,
- struct LEAPIORAID_ADAPTER, list);
- if (length > (SYS_LOG_BUF_SIZE + SYS_LOG_BUF_RESERVE)) {
- pr_err("Requested mapping size is too large\n");
- return -EINVAL;
- }
- if (ioc->log_buffer == NULL) {
- pr_err("no log buffer\n");
- return -EINVAL;
- }
-
- pfn = virt_to_phys(ioc->log_buffer) >> PAGE_SHIFT;
-
- if (remap_pfn_range(vma, vma->vm_start, pfn, length, vma->vm_page_prot)) {
- pr_err("Failed to map memory to user space\n");
- return -EAGAIN;
- }
-
- return 0;
-}
-
-static const struct
-file_operations leapioraid_ctl_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = leapioraid_ctl_ioctl,
- .poll = leapioraid_ctl_poll,
- .fasync = leapioraid_ctl_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = leapioraid_ctl_ioctl_compat,
-#endif
- .mmap = my_mmap,
-};
-
-static struct miscdevice leapioraid_ctl_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = LEAPIORAID_DEV_NAME,
- .fops = &leapioraid_ctl_fops,
-};
-
-void leapioraid_ctl_init(void)
-{
- leapioraid_async_queue = NULL;
- if (misc_register(&leapioraid_ctl_dev) < 0)
- pr_err("%s can't register misc device\n",
- LEAPIORAID_DRIVER_NAME);
- init_waitqueue_head(&leapioraid_ctl_poll_wait);
-}
-
-void leapioraid_ctl_exit(void)
-{
- struct LEAPIORAID_ADAPTER *ioc;
-
- list_for_each_entry(ioc, &leapioraid_ioc_list, list) {
- kfree(ioc->event_log);
- }
- misc_deregister(&leapioraid_ctl_dev);
-}
diff --git a/drivers/scsi/leapioraid/leapioraid_func.c b/drivers/scsi/leapioraid/leapioraid_func.c
deleted file mode 100644
index 19fe5e96a9ad..000000000000
--- a/drivers/scsi/leapioraid/leapioraid_func.c
+++ /dev/null
@@ -1,7056 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This is the Fusion MPT base driver providing common API layer interface
- * for access to MPT (Message Passing Technology) firmware.
- *
- * Copyright (C) 2013-2021 LSI Corporation
- * Copyright (C) 2013-2021 Avago Technologies
- * Copyright (C) 2013-2021 Broadcom Inc.
- * (mailto:MPT-FusionLinux.pdl@broadcom.com)
- *
- * Copyright (C) 2024 LeapIO Tech Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/time.h>
-#include <linux/ktime.h>
-#include <linux/kthread.h>
-#include <asm/page.h>
-#include <linux/aer.h>
-#include "leapioraid_func.h"
-#include <linux/net.h>
-#include <net/sock.h>
-#include <linux/inet.h>
-
-static char *dest_ip = "127.0.0.1";
-module_param(dest_ip, charp, 0000);
-MODULE_PARM_DESC(dest_ip, "Destination IP address");
-
-static u16 port_no = 6666;
-module_param(port_no, ushort, 0000);
-MODULE_PARM_DESC(port_no, "Destination Port number");
-static struct sockaddr_in dest_addr;
-static struct socket *sock;
-static struct msghdr msg;
-
-#define LEAPIORAID_LOG_POLLING_INTERVAL 1
-static LEAPIORAID_CALLBACK leapioraid_callbacks[LEAPIORAID_MAX_CALLBACKS];
-#define LEAPIORAID_FAULT_POLLING_INTERVAL 1000
-#define LEAPIORAID_MAX_HBA_QUEUE_DEPTH 1024
-
-static int smp_affinity_enable = 1;
-module_param(smp_affinity_enable, int, 0444);
-MODULE_PARM_DESC(smp_affinity_enable,
- "SMP affinity feature enable/disable Default: enable(1)");
-
-static int max_msix_vectors = -1;
-module_param(max_msix_vectors, int, 0444);
-MODULE_PARM_DESC(max_msix_vectors, " max msix vectors");
-
-static int irqpoll_weight = -1;
-module_param(irqpoll_weight, int, 0444);
-MODULE_PARM_DESC(irqpoll_weight,
- "irq poll weight (default= one fourth of HBA queue depth)");
-
-static int leapioraid_fwfault_debug;
-
-static int perf_mode = -1;
-
-static int poll_queues;
-module_param(poll_queues, int, 0444);
-MODULE_PARM_DESC(poll_queues,
- "Number of queues to be use for io_uring poll mode.\n\t\t"
- "This parameter is effective only if host_tagset_enable=1. &\n\t\t"
- "when poll_queues are enabled then &\n\t\t"
- "perf_mode is set to latency mode. &\n\t\t");
-
-enum leapioraid_perf_mode {
- LEAPIORAID_PERF_MODE_DEFAULT = -1,
- LEAPIORAID_PERF_MODE_BALANCED = 0,
- LEAPIORAID_PERF_MODE_IOPS = 1,
- LEAPIORAID_PERF_MODE_LATENCY = 2,
-};
-
-static void
-leapioraid_base_clear_outstanding_leapioraid_commands(
- struct LEAPIORAID_ADAPTER *ioc);
-static
-int leapioraid_base_wait_on_iocstate(struct LEAPIORAID_ADAPTER *ioc,
- u32 ioc_state, int timeout);
-
-static int
-leapioraid_scsihost_set_fwfault_debug(
- const char *val, const struct kernel_param *kp)
-{
- int ret = param_set_int(val, kp);
- struct LEAPIORAID_ADAPTER *ioc;
-
- if (ret)
- return ret;
- pr_info("setting fwfault_debug(%d)\n",
- leapioraid_fwfault_debug);
- spin_lock(&leapioraid_gioc_lock);
- list_for_each_entry(ioc, &leapioraid_ioc_list, list)
- ioc->fwfault_debug = leapioraid_fwfault_debug;
- spin_unlock(&leapioraid_gioc_lock);
- return 0;
-}
-
-module_param_call(
- leapioraid_fwfault_debug,
- leapioraid_scsihost_set_fwfault_debug,
- param_get_int, &leapioraid_fwfault_debug, 0644);
-
-static inline u32
-leapioraid_base_readl_aero(
- const void __iomem *addr, u8 retry_count)
-{
- u32 i = 0, ret_val;
-
- do {
- ret_val = readl(addr);
- i++;
- } while (ret_val == 0 && i < retry_count);
- return ret_val;
-}
-
-u8
-leapioraid_base_check_cmd_timeout(
- struct LEAPIORAID_ADAPTER *ioc,
- U8 status, void *mpi_request, int sz)
-{
- u8 issue_reset = 0;
-
- if (!(status & LEAPIORAID_CMD_RESET))
- issue_reset = 1;
- pr_err("%s Command %s\n", ioc->name,
- ((issue_reset ==
- 0) ? "terminated due to Host Reset" : "Timeout"));
- leapioraid_debug_dump_mf(mpi_request, sz);
- return issue_reset;
-}
-
-static int
-leapioraid_remove_dead_ioc_func(void *arg)
-{
- struct LEAPIORAID_ADAPTER *ioc = (struct LEAPIORAID_ADAPTER *)arg;
- struct pci_dev *pdev;
-
- if (ioc == NULL)
- return -1;
- pdev = ioc->pdev;
- if (pdev == NULL)
- return -1;
-#if defined(DISABLE_RESET_SUPPORT)
- ssleep(2);
-#endif
-
- pci_stop_and_remove_bus_device(pdev);
- return 0;
-}
-
-u8
-leapioraid_base_pci_device_is_unplugged(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct pci_dev *pdev = ioc->pdev;
- struct pci_bus *bus = pdev->bus;
- int devfn = pdev->devfn;
- u32 vendor_id;
-
- if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendor_id))
- return 1;
- if (vendor_id == 0xffffffff || vendor_id == 0x00000000 ||
- vendor_id == 0x0000ffff || vendor_id == 0xffff0000)
- return 1;
- if ((vendor_id & 0xffff) == 0x0001)
- return 1;
- return 0;
-}
-
-u8
-leapioraid_base_pci_device_is_available(struct LEAPIORAID_ADAPTER *ioc)
-{
- if (ioc->pci_error_recovery
- || leapioraid_base_pci_device_is_unplugged(ioc))
- return 0;
- return 1;
-}
-
-static void
-leapioraid_base_sync_drv_fw_timestamp(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidIoUnitControlReq_t *mpi_request;
- struct LeapioraidIoUnitControlRep_t *mpi_reply;
- u16 smid;
- ktime_t current_time;
- u64 TimeStamp = 0;
- u8 issue_reset = 0;
-
- mutex_lock(&ioc->scsih_cmds.mutex);
- if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s: scsih_cmd in use %s\n", ioc->name, __func__);
- goto out;
- }
- ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING;
- smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx);
- if (!smid) {
- pr_err("%s: failed obtaining a smid %s\n", ioc->name, __func__);
- ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED;
- goto out;
- }
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->scsih_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL;
- mpi_request->Operation = 0x0F;
- mpi_request->IOCParameter = 0x81;
- current_time = ktime_get_real();
- TimeStamp = ktime_to_ms(current_time);
- mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
- mpi_request->IOCParameterValue2 = cpu_to_le32(TimeStamp >> 32);
- init_completion(&ioc->scsih_cmds.done);
- ioc->put_smid_default(ioc, smid);
- dinitprintk(ioc, pr_err(
- "%s Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
- ioc->name, TimeStamp));
- wait_for_completion_timeout(&ioc->scsih_cmds.done,
- 10 * HZ);
- if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- leapioraid_check_cmd_timeout(ioc,
- ioc->scsih_cmds.status,
- mpi_request,
- sizeof
- (struct LeapioraidSasIoUnitControlReq_t)
- / 4, issue_reset);
- goto issue_host_reset;
- }
- if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- mpi_reply = ioc->scsih_cmds.reply;
- dinitprintk(ioc, pr_err(
- "%s Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
- }
-issue_host_reset:
- if (issue_reset)
- leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED;
-out:
- mutex_unlock(&ioc->scsih_cmds.mutex);
-}
-
-static int
-leapioraid_udp_init(void)
-{
- int ret;
- u32 ip;
-
- if (sock)
- return 0;
- if (!in4_pton(dest_ip, -1, (u8 *) &ip, -1, NULL)) {
- pr_err("Invalid IP address: %s, set to default: 127.0.0.1\n",
- dest_ip);
- dest_ip = "127.0.0.1";
- }
- ret =
- sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, IPPROTO_UDP,
- &sock);
- memset(&dest_addr, 0, sizeof(dest_addr));
- dest_addr.sin_family = AF_INET;
- dest_addr.sin_addr.s_addr = ip;
- dest_addr.sin_port = htons(port_no);
- memset(&msg, 0, sizeof(msg));
- msg.msg_name = &dest_addr;
- msg.msg_namelen = sizeof(struct sockaddr_in);
- return ret;
-}
-
-static void
-leapioraid_udp_exit(void)
-{
- if (sock)
- sock_release(sock);
-}
-
-struct info
-{
- u32 user_position;
- u32 ioc_position;
-};
-int cooo;
-
-static void
-leapioraid_base_pcie_log_work(struct work_struct *work)
-{
- struct LEAPIORAID_ADAPTER *ioc =
- container_of(work,
- struct LEAPIORAID_ADAPTER, pcie_log_work.work);
- unsigned long flags;
- struct info *infom = (struct info *)(ioc->log_buffer + SYS_LOG_BUF_SIZE);
-
- if (cooo == 0) {
- infom->user_position =
- ioc->base_readl(&ioc->chip->HostLogBufPosition, 0);
- infom->ioc_position =
- ioc->base_readl(&ioc->chip->IocLogBufPosition, 0);
- cooo++;
- }
-
- writel(infom->user_position, &ioc->chip->HostLogBufPosition);
- infom->ioc_position = ioc->base_readl(&ioc->chip->IocLogBufPosition, 0);
-
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->pcie_log_work_q)
- queue_delayed_work(ioc->pcie_log_work_q,
- &ioc->pcie_log_work,
- msecs_to_jiffies(LEAPIORAID_LOG_POLLING_INTERVAL));
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
-}
-
-void
-leapioraid_base_start_log_watchdog(struct LEAPIORAID_ADAPTER *ioc)
-{
- unsigned long flags;
-
- if (ioc->pcie_log_work_q)
- return;
- leapioraid_udp_init();
- INIT_DELAYED_WORK(&ioc->pcie_log_work, leapioraid_base_pcie_log_work);
- snprintf(ioc->pcie_log_work_q_name,
- sizeof(ioc->pcie_log_work_q_name), "poll_%s%u_status",
- ioc->driver_name, ioc->id);
- ioc->pcie_log_work_q =
- create_singlethread_workqueue(ioc->pcie_log_work_q_name);
- if (!ioc->pcie_log_work_q) {
- pr_err("%s %s: failed (line=%d)\n", ioc->name,
- __func__, __LINE__);
- return;
- }
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->pcie_log_work_q)
- queue_delayed_work(ioc->pcie_log_work_q,
- &ioc->pcie_log_work,
- msecs_to_jiffies(LEAPIORAID_LOG_POLLING_INTERVAL));
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
-}
-
-void
-leapioraid_base_stop_log_watchdog(struct LEAPIORAID_ADAPTER *ioc)
-{
- unsigned long flags;
- struct workqueue_struct *wq;
-
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- wq = ioc->pcie_log_work_q;
- ioc->pcie_log_work_q = NULL;
- leapioraid_udp_exit();
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- if (wq) {
- if (!cancel_delayed_work_sync(&ioc->pcie_log_work))
- flush_workqueue(wq);
- destroy_workqueue(wq);
- }
-}
-
-static void
-leapioraid_base_fault_reset_work(struct work_struct *work)
-{
- struct LEAPIORAID_ADAPTER *ioc =
- container_of(work, struct LEAPIORAID_ADAPTER,
- fault_reset_work.work);
- unsigned long flags;
- u32 doorbell;
- int rc;
- struct task_struct *p;
-
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
- ioc->pci_error_recovery || ioc->remove_host)
- goto rearm_timer;
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- doorbell = leapioraid_base_get_iocstate(ioc, 0);
- if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_MASK) {
- pr_err(
- "%s SAS host is non-operational !!!!\n", ioc->name);
- if (ioc->non_operational_loop++ < 5) {
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
- flags);
- goto rearm_timer;
- }
- ioc->remove_host = 1;
- leapioraid_base_pause_mq_polling(ioc);
- ioc->schedule_dead_ioc_flush_running_cmds(ioc);
- p = kthread_run(leapioraid_remove_dead_ioc_func, ioc,
- "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
- if (IS_ERR(p))
- pr_err(
- "%s %s: Running leapioraid_dead_ioc thread failed !!!!\n",
- ioc->name, __func__);
- else
- pr_err(
- "%s %s: Running leapioraid_dead_ioc thread success !!!!\n",
- ioc->name, __func__);
- return;
- }
- if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP) {
- u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
- ioc->manu_pg11.CoreDumpTOSec :
- 15;
- timeout /= (LEAPIORAID_FAULT_POLLING_INTERVAL / 1000);
- if (ioc->ioc_coredump_loop == 0) {
- leapioraid_base_coredump_info(ioc, doorbell &
- LEAPIORAID_DOORBELL_DATA_MASK);
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
- flags);
- ioc->shost_recovery = 1;
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
- flags);
- leapioraid_base_pause_mq_polling(ioc);
- leapioraid_scsihost_clear_outstanding_scsi_tm_commands
- (ioc);
- leapioraid_base_mask_interrupts(ioc);
- leapioraid_base_clear_outstanding_leapioraid_commands(ioc);
- leapioraid_ctl_clear_outstanding_ioctls(ioc);
- }
- drsprintk(ioc,
- pr_info("%s %s: CoreDump loop %d.",
- ioc->name, __func__, ioc->ioc_coredump_loop));
- if (ioc->ioc_coredump_loop++ < timeout) {
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
- flags);
- goto rearm_timer;
- }
- }
- if (ioc->ioc_coredump_loop) {
- if ((doorbell & LEAPIORAID_IOC_STATE_MASK) !=
- LEAPIORAID_IOC_STATE_COREDUMP)
- pr_err(
- "%s %s: CoreDump completed. LoopCount: %d",
- ioc->name, __func__, ioc->ioc_coredump_loop);
- else
- pr_err(
- "%s %s: CoreDump Timed out. LoopCount: %d",
- ioc->name, __func__, ioc->ioc_coredump_loop);
- ioc->ioc_coredump_loop = 0xFF;
- }
- ioc->non_operational_loop = 0;
- if ((doorbell & LEAPIORAID_IOC_STATE_MASK) !=
- LEAPIORAID_IOC_STATE_OPERATIONAL) {
- rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_warn("%s %s: hard reset: %s\n", ioc->name,
- __func__, (rc == 0) ? "success" : "failed");
- doorbell = leapioraid_base_get_iocstate(ioc, 0);
- if ((doorbell & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_FAULT) {
- leapioraid_print_fault_code(ioc,
- doorbell &
- LEAPIORAID_DOORBELL_DATA_MASK);
- } else if ((doorbell & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP)
- leapioraid_base_coredump_info(ioc,
- doorbell &
- LEAPIORAID_DOORBELL_DATA_MASK);
- if (rc
- && (doorbell & LEAPIORAID_IOC_STATE_MASK) !=
- LEAPIORAID_IOC_STATE_OPERATIONAL)
- return;
- }
- ioc->ioc_coredump_loop = 0;
- if (ioc->time_sync_interval &&
- ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
- ioc->timestamp_update_count = 0;
- leapioraid_base_sync_drv_fw_timestamp(ioc);
- }
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-rearm_timer:
- if (ioc->fault_reset_work_q)
- queue_delayed_work(ioc->fault_reset_work_q,
- &ioc->fault_reset_work,
- msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL));
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
-}
-
-static void
-leapioraid_base_hba_hot_unplug_work(struct work_struct *work)
-{
- struct LEAPIORAID_ADAPTER *ioc =
- container_of(work, struct LEAPIORAID_ADAPTER,
- hba_hot_unplug_work.work);
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags);
- if (ioc->shost_recovery || ioc->pci_error_recovery)
- goto rearm_timer;
- if (leapioraid_base_pci_device_is_unplugged(ioc)) {
- if (ioc->remove_host) {
- pr_err("%s The host is removeing!!!\n",
- ioc->name);
- goto rearm_timer;
- }
- ioc->remove_host = 1;
- leapioraid_base_clear_outstanding_leapioraid_commands(ioc);
- leapioraid_base_pause_mq_polling(ioc);
- leapioraid_scsihost_clear_outstanding_scsi_tm_commands(ioc);
- leapioraid_ctl_clear_outstanding_ioctls(ioc);
- }
-rearm_timer:
- if (ioc->hba_hot_unplug_work_q)
- queue_delayed_work(ioc->hba_hot_unplug_work_q,
- &ioc->hba_hot_unplug_work,
- msecs_to_jiffies
- (1000));
- spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags);
-}
-
-void
-leapioraid_base_start_watchdog(struct LEAPIORAID_ADAPTER *ioc)
-{
- unsigned long flags;
-
- if (ioc->fault_reset_work_q)
- return;
- ioc->timestamp_update_count = 0;
- INIT_DELAYED_WORK(&ioc->fault_reset_work,
- leapioraid_base_fault_reset_work);
- snprintf(ioc->fault_reset_work_q_name,
- sizeof(ioc->fault_reset_work_q_name), "poll_%s%u_status",
- ioc->driver_name, ioc->id);
- ioc->fault_reset_work_q =
- create_singlethread_workqueue(ioc->fault_reset_work_q_name);
- if (!ioc->fault_reset_work_q) {
- pr_err("%s %s: failed (line=%d)\n",
- ioc->name, __func__, __LINE__);
- return;
- }
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->fault_reset_work_q)
- queue_delayed_work(ioc->fault_reset_work_q,
- &ioc->fault_reset_work,
- msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL));
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->open_pcie_trace)
- leapioraid_base_start_log_watchdog(ioc);
-}
-
-void
-leapioraid_base_stop_watchdog(struct LEAPIORAID_ADAPTER *ioc)
-{
- unsigned long flags;
- struct workqueue_struct *wq;
-
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- wq = ioc->fault_reset_work_q;
- ioc->fault_reset_work_q = NULL;
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- if (wq) {
- if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
- flush_workqueue(wq);
- destroy_workqueue(wq);
- }
- if (ioc->open_pcie_trace)
- leapioraid_base_stop_log_watchdog(ioc);
-}
-
-void
-leapioraid_base_start_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc)
-{
- unsigned long flags;
-
- if (ioc->hba_hot_unplug_work_q)
- return;
- INIT_DELAYED_WORK(&ioc->hba_hot_unplug_work,
- leapioraid_base_hba_hot_unplug_work);
- snprintf(ioc->hba_hot_unplug_work_q_name,
- sizeof(ioc->hba_hot_unplug_work_q_name),
- "poll_%s%u_hba_unplug", ioc->driver_name, ioc->id);
- ioc->hba_hot_unplug_work_q =
- create_singlethread_workqueue(ioc->hba_hot_unplug_work_q_name);
- if (!ioc->hba_hot_unplug_work_q) {
- pr_err("%s %s: failed (line=%d)\n",
- ioc->name, __func__, __LINE__);
- return;
- }
- spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags);
- if (ioc->hba_hot_unplug_work_q)
- queue_delayed_work(ioc->hba_hot_unplug_work_q,
- &ioc->hba_hot_unplug_work,
- msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL));
- spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags);
-}
-
-void
-leapioraid_base_stop_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc)
-{
- unsigned long flags;
- struct workqueue_struct *wq;
-
- spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags);
- wq = ioc->hba_hot_unplug_work_q;
- ioc->hba_hot_unplug_work_q = NULL;
- spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags);
- if (wq) {
- if (!cancel_delayed_work_sync(&ioc->hba_hot_unplug_work))
- flush_workqueue(wq);
- destroy_workqueue(wq);
- }
-}
-
-static void
-leapioraid_base_stop_smart_polling(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct workqueue_struct *wq;
-
- wq = ioc->smart_poll_work_q;
- ioc->smart_poll_work_q = NULL;
- if (wq) {
- if (!cancel_delayed_work(&ioc->smart_poll_work))
- flush_workqueue(wq);
- destroy_workqueue(wq);
- }
-}
-
-void
-leapioraid_base_fault_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code)
-{
- pr_err("%s fault_state(0x%04x)!\n",
- ioc->name, fault_code);
-}
-
-void
-leapioraid_base_coredump_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code)
-{
- pr_err("%s coredump_state(0x%04x)!\n",
- ioc->name, fault_code);
-}
-
-int
-leapioraid_base_wait_for_coredump_completion(struct LEAPIORAID_ADAPTER *ioc,
- const char *caller)
-{
- u8 timeout =
- (ioc->manu_pg11.CoreDumpTOSec) ? ioc->manu_pg11.CoreDumpTOSec : 15;
- int ioc_state =
- leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_FAULT,
- timeout);
-
- if (ioc_state)
- pr_err("%s %s: CoreDump timed out. (ioc_state=0x%x)\n",
- ioc->name, caller, ioc_state);
- else
- pr_info("%s %s: CoreDump completed. (ioc_state=0x%x)\n",
- ioc->name, caller, ioc_state);
- return ioc_state;
-}
-
-void
-leapioraid_halt_firmware(struct LEAPIORAID_ADAPTER *ioc, u8 set_fault)
-{
- u32 doorbell;
-
- if ((!ioc->fwfault_debug) && (!set_fault))
- return;
- if (!set_fault)
- dump_stack();
- doorbell =
- ioc->base_readl(&ioc->chip->Doorbell,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY);
- if ((doorbell & LEAPIORAID_IOC_STATE_MASK)
- == LEAPIORAID_IOC_STATE_FAULT) {
- leapioraid_print_fault_code(ioc, doorbell);
- } else if ((doorbell & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP)
- leapioraid_base_coredump_info(ioc,
- doorbell &
- LEAPIORAID_DOORBELL_DATA_MASK);
- else {
- writel(0xC0FFEE00, &ioc->chip->Doorbell);
- if (!set_fault)
- pr_err("%s Firmware is halted due to command timeout\n",
- ioc->name);
- }
- if (set_fault)
- return;
- if (ioc->fwfault_debug == 2) {
- for (;;)
- ;
- } else
- panic("panic in %s\n", __func__);
-}
-
-static void
-leapioraid_base_group_cpus_on_irq(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_adapter_reply_queue *reply_q;
- unsigned int i, cpu, group, nr_cpus, nr_msix, index = 0;
- int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
- int unmanaged_q_count = ioc->high_iops_queues + iopoll_q_count;
-
- cpu = cpumask_first(cpu_online_mask);
- nr_msix = ioc->reply_queue_count - unmanaged_q_count;
- nr_cpus = num_online_cpus();
- group = nr_cpus / nr_msix;
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (reply_q->msix_index < ioc->high_iops_queues ||
- reply_q->msix_index >= ioc->iopoll_q_start_index)
- continue;
- if (cpu >= nr_cpus)
- break;
- if (index < nr_cpus % nr_msix)
- group++;
- for (i = 0; i < group; i++) {
- ioc->cpu_msix_table[cpu] = reply_q->msix_index;
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
- index++;
- }
-}
-
-static void
-leapioraid_base_sas_ioc_info(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidDefaultRep_t *mpi_reply,
- struct LeapioraidReqHeader_t *request_hdr)
-{
- u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- char *desc = NULL;
- u16 frame_sz;
- char *func_str = NULL;
-
- if (request_hdr->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST ||
- request_hdr->Function == LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH
- || request_hdr->Function == LEAPIORAID_FUNC_EVENT_NOTIFICATION)
- return;
- if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE)
- return;
- switch (ioc_status) {
- case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION:
- desc = "invalid function";
- break;
- case LEAPIORAID_IOCSTATUS_BUSY:
- desc = "busy";
- break;
- case LEAPIORAID_IOCSTATUS_INVALID_SGL:
- desc = "invalid sgl";
- break;
- case LEAPIORAID_IOCSTATUS_INTERNAL_ERROR:
- desc = "internal error";
- break;
- case LEAPIORAID_IOCSTATUS_INVALID_VPID:
- desc = "invalid vpid";
- break;
- case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES:
- desc = "insufficient resources";
- break;
- case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER:
- desc = "insufficient power";
- break;
- case LEAPIORAID_IOCSTATUS_INVALID_FIELD:
- desc = "invalid field";
- break;
- case LEAPIORAID_IOCSTATUS_INVALID_STATE:
- desc = "invalid state";
- break;
- case LEAPIORAID_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
- desc = "op state not supported";
- break;
- case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_ACTION:
- desc = "config invalid action";
- break;
- case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_TYPE:
- desc = "config invalid type";
- break;
- case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_DATA:
- desc = "config invalid data";
- break;
- case LEAPIORAID_IOCSTATUS_CONFIG_NO_DEFAULTS:
- desc = "config no defaults";
- break;
- case LEAPIORAID_IOCSTATUS_CONFIG_CANT_COMMIT:
- desc = "config can not commit";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR:
- case LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
- case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
- case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN:
- case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN:
- case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR:
- case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR:
- case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED:
- case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
- case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
- case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED:
- case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED:
- break;
- case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR:
- if (!ioc->disable_eedp_support)
- desc = "eedp guard error";
- break;
- case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR:
- if (!ioc->disable_eedp_support)
- desc = "eedp ref tag error";
- break;
- case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR:
- if (!ioc->disable_eedp_support)
- desc = "eedp app tag error";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_INVALID_IO_INDEX:
- desc = "target invalid io index";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_ABORTED:
- desc = "target aborted";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
- desc = "target no conn retryable";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_NO_CONNECTION:
- desc = "target no connection";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
- desc = "target xfer count mismatch";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
- desc = "target data offset error";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
- desc = "target too much write data";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_IU_TOO_SHORT:
- desc = "target iu too short";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
- desc = "target ack nak timeout";
- break;
- case LEAPIORAID_IOCSTATUS_TARGET_NAK_RECEIVED:
- desc = "target nak received";
- break;
- case LEAPIORAID_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
- desc = "smp request failed";
- break;
- case LEAPIORAID_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
- desc = "smp data overrun";
- break;
- default:
- break;
- }
- if (!desc)
- return;
- switch (request_hdr->Function) {
- case LEAPIORAID_FUNC_CONFIG:
- frame_sz = sizeof(struct LeapioraidCfgReq_t) + ioc->sge_size;
- func_str = "config_page";
- break;
- case LEAPIORAID_FUNC_SCSI_TASK_MGMT:
- frame_sz = sizeof(struct LeapioraidSCSITmgReq_t);
- func_str = "task_mgmt";
- break;
- case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL:
- frame_sz = sizeof(struct LeapioraidSasIoUnitControlReq_t);
- func_str = "sas_iounit_ctl";
- break;
- case LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR:
- frame_sz = sizeof(struct LeapioraidSepReq_t);
- func_str = "enclosure";
- break;
- case LEAPIORAID_FUNC_IOC_INIT:
- frame_sz = sizeof(struct LeapioraidIOCInitReq_t);
- func_str = "ioc_init";
- break;
- case LEAPIORAID_FUNC_PORT_ENABLE:
- frame_sz = sizeof(struct LeapioraidPortEnableReq_t);
- func_str = "port_enable";
- break;
- case LEAPIORAID_FUNC_SMP_PASSTHROUGH:
- frame_sz =
- sizeof(struct LeapioraidSmpPassthroughReq_t) + ioc->sge_size;
- func_str = "smp_passthru";
- break;
- default:
- frame_sz = 32;
- func_str = "unknown";
- break;
- }
- pr_warn("%s ioc_status: %s(0x%04x), request(0x%p), (%s)\n",
- ioc->name, desc, ioc_status, request_hdr, func_str);
- leapioraid_debug_dump_mf(request_hdr, frame_sz / 4);
-}
-
-static void
-leapioraid_base_display_event_data(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventNotificationRep_t *mpi_reply)
-{
- char *desc = NULL;
- u16 event;
-
- if (!(ioc->logging_level & LEAPIORAID_DEBUG_EVENTS))
- return;
- event = le16_to_cpu(mpi_reply->Event);
- if (ioc->warpdrive_msg) {
- switch (event) {
- case LEAPIORAID_EVENT_IR_OPERATION_STATUS:
- case LEAPIORAID_EVENT_IR_VOLUME:
- case LEAPIORAID_EVENT_IR_PHYSICAL_DISK:
- case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST:
- case LEAPIORAID_EVENT_LOG_ENTRY_ADDED:
- return;
- }
- }
- switch (event) {
- case LEAPIORAID_EVENT_LOG_DATA:
- desc = "Log Data";
- break;
- case LEAPIORAID_EVENT_STATE_CHANGE:
- desc = "Status Change";
- break;
- case LEAPIORAID_EVENT_HARD_RESET_RECEIVED:
- desc = "Hard Reset Received";
- break;
- case LEAPIORAID_EVENT_EVENT_CHANGE:
- desc = "Event Change";
- break;
- case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE:
- desc = "Device Status Change";
- break;
- case LEAPIORAID_EVENT_IR_OPERATION_STATUS:
- desc = "IR Operation Status";
- break;
- case LEAPIORAID_EVENT_SAS_DISCOVERY:
- {
- struct LeapioraidEventDataSasDiscovery_t *event_data =
- (struct LeapioraidEventDataSasDiscovery_t *) mpi_reply->EventData;
- pr_info("%s SAS Discovery: (%s)",
- ioc->name,
- (event_data->ReasonCode ==
- LEAPIORAID_EVENT_SAS_DISC_RC_STARTED) ? "start" :
- "stop");
- if (event_data->DiscoveryStatus)
- pr_info("discovery_status(0x%08x)",
- le32_to_cpu(event_data->DiscoveryStatus));
- pr_info("\n");
- return;
- }
- case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE:
- desc = "SAS Broadcast Primitive";
- break;
- case LEAPIORAID_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
- desc = "SAS Init Device Status Change";
- break;
- case LEAPIORAID_EVENT_SAS_INIT_TABLE_OVERFLOW:
- desc = "SAS Init Table Overflow";
- break;
- case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
- desc = "SAS Topology Change List";
- break;
- case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
- desc = "SAS Enclosure Device Status Change";
- break;
- case LEAPIORAID_EVENT_IR_VOLUME:
- desc = "IR Volume";
- break;
- case LEAPIORAID_EVENT_IR_PHYSICAL_DISK:
- desc = "IR Physical Disk";
- break;
- case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST:
- desc = "IR Configuration Change List";
- break;
- case LEAPIORAID_EVENT_LOG_ENTRY_ADDED:
- desc = "Log Entry Added";
- break;
- case LEAPIORAID_EVENT_TEMP_THRESHOLD:
- desc = "Temperature Threshold";
- break;
- case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
- desc = "SAS Device Discovery Error";
- break;
- }
- if (!desc)
- return;
- pr_info("%s %s\n", ioc->name, desc);
-}
-
-static void
-leapioraid_base_sas_log_info(struct LEAPIORAID_ADAPTER *ioc, u32 log_info)
-{
- union loginfo_type {
- u32 loginfo;
- struct {
- u32 subcode:16;
- u32 code:8;
- u32 originator:4;
- u32 bus_type:4;
- } dw;
- };
- union loginfo_type sas_loginfo;
- char *originator_str = NULL;
-
- sas_loginfo.loginfo = log_info;
- if (sas_loginfo.dw.bus_type != 3)
- return;
- if (log_info == 0x31170000)
- return;
- if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
- 0x31140000 || log_info == 0x31130000))
- return;
- switch (sas_loginfo.dw.originator) {
- case 0:
- originator_str = "IOP";
- break;
- case 1:
- originator_str = "PL";
- break;
- case 2:
- if (ioc->warpdrive_msg)
- originator_str = "WarpDrive";
- else
- originator_str = "IR";
- break;
- }
- pr_warn("%s log_info(0x%08x):\n\t\t"
- "originator(%s), code(0x%02x), sub_code(0x%04x)\n",
- ioc->name,
- log_info,
- originator_str,
- sas_loginfo.dw.code,
- sas_loginfo.dw.subcode);
-}
-
-static void
-leapioraid_base_display_reply_info(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply)
-{
- struct LeapioraidDefaultRep_t *mpi_reply;
- u16 ioc_status;
- u32 loginfo = 0;
-
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (unlikely(!mpi_reply)) {
- pr_err(
- "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- return;
- }
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
- if ((ioc_status & LEAPIORAID_IOCSTATUS_MASK) &&
- (ioc->logging_level & LEAPIORAID_DEBUG_REPLY)) {
- leapioraid_base_sas_ioc_info(ioc, mpi_reply,
- leapioraid_base_get_msg_frame(ioc,
- smid));
- }
- if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
- loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
- leapioraid_base_sas_log_info(ioc, loginfo);
- }
-}
-
-u8
-leapioraid_base_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index,
- u32 reply)
-{
- struct LeapioraidDefaultRep_t *mpi_reply;
-
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (mpi_reply && mpi_reply->Function == LEAPIORAID_FUNC_EVENT_ACK)
- return leapioraid_check_for_pending_internal_cmds(ioc, smid);
- if (ioc->base_cmds.status == LEAPIORAID_CMD_NOT_USED)
- return 1;
- ioc->base_cmds.status |= LEAPIORAID_CMD_COMPLETE;
- if (mpi_reply) {
- ioc->base_cmds.status |= LEAPIORAID_CMD_REPLY_VALID;
- memcpy(ioc->base_cmds.reply, mpi_reply,
- mpi_reply->MsgLength * 4);
- }
- ioc->base_cmds.status &= ~LEAPIORAID_CMD_PENDING;
- complete(&ioc->base_cmds.done);
- return 1;
-}
-
-static u8
-leapioraid_base_async_event(
- struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, u32 reply)
-{
- struct LeapioraidEventNotificationRep_t *mpi_reply;
- struct LeapioraidEventAckReq_t *ack_request;
- u16 smid;
- struct leapioraid_event_ack_list *delayed_event_ack;
-
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (!mpi_reply)
- return 1;
- if (mpi_reply->Function != LEAPIORAID_FUNC_EVENT_NOTIFICATION)
- return 1;
- leapioraid_base_display_event_data(ioc, mpi_reply);
- if (!(mpi_reply->AckRequired & LEAPIORAID_EVENT_NOTIFICATION_ACK_REQUIRED))
- goto out;
- smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx);
- if (!smid) {
- delayed_event_ack =
- kzalloc(sizeof(*delayed_event_ack), GFP_ATOMIC);
- if (!delayed_event_ack)
- goto out;
- INIT_LIST_HEAD(&delayed_event_ack->list);
- delayed_event_ack->Event = mpi_reply->Event;
- delayed_event_ack->EventContext = mpi_reply->EventContext;
- list_add_tail(&delayed_event_ack->list,
- &ioc->delayed_event_ack_list);
- dewtprintk(ioc, pr_err(
- "%s DELAYED: EVENT ACK: event (0x%04x)\n",
- ioc->name,
- le16_to_cpu(mpi_reply->Event)));
- goto out;
- }
- ack_request = leapioraid_base_get_msg_frame(ioc, smid);
- memset(ack_request, 0, sizeof(struct LeapioraidEventAckReq_t));
- ack_request->Function = LEAPIORAID_FUNC_EVENT_ACK;
- ack_request->Event = mpi_reply->Event;
- ack_request->EventContext = mpi_reply->EventContext;
- ack_request->VF_ID = 0;
- ack_request->VP_ID = 0;
- ioc->put_smid_default(ioc, smid);
-out:
- leapioraid_scsihost_event_callback(ioc, msix_index, reply);
- leapioraid_ctl_event_callback(ioc, msix_index, reply);
- return 1;
-}
-
-inline
-struct leapioraid_scsiio_tracker *leapioraid_base_scsi_cmd_priv(
- struct scsi_cmnd *scmd)
-{
- return scsi_cmd_priv(scmd);
-}
-
-struct leapioraid_scsiio_tracker *leapioraid_get_st_from_smid(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- struct scsi_cmnd *cmd;
-
- if (WARN_ON(!smid) || WARN_ON(smid >= ioc->hi_priority_smid))
- return NULL;
- cmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid);
- if (cmd)
- return leapioraid_base_scsi_cmd_priv(cmd);
- return NULL;
-}
-
-static u8
-leapioraid_base_get_cb_idx(struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- int i;
- u16 ctl_smid = ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL;
- u16 discovery_smid =
- ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY;
- u8 cb_idx = 0xFF;
-
- if (smid < ioc->hi_priority_smid) {
- struct leapioraid_scsiio_tracker *st;
-
- if (smid < ctl_smid) {
- st = leapioraid_get_st_from_smid(ioc, smid);
- if (st)
- cb_idx = st->cb_idx;
- } else if (smid < discovery_smid)
- cb_idx = ioc->ctl_cb_idx;
- else
- cb_idx = ioc->scsih_cb_idx;
- } else if (smid < ioc->internal_smid) {
- i = smid - ioc->hi_priority_smid;
- cb_idx = ioc->hpr_lookup[i].cb_idx;
- } else if (smid <= ioc->hba_queue_depth) {
- i = smid - ioc->internal_smid;
- cb_idx = ioc->internal_lookup[i].cb_idx;
- }
- return cb_idx;
-}
-
-void
-leapioraid_base_pause_mq_polling(struct LEAPIORAID_ADAPTER *ioc)
-{
- int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
- int qid;
-
- for (qid = 0; qid < iopoll_q_count; qid++)
- atomic_set(&ioc->blk_mq_poll_queues[qid].pause, 1);
- for (qid = 0; qid < iopoll_q_count; qid++) {
- while (atomic_read(&ioc->blk_mq_poll_queues[qid].busy)) {
- cpu_relax();
- udelay(500);
- }
- }
-}
-
-void
-leapioraid_base_resume_mq_polling(struct LEAPIORAID_ADAPTER *ioc)
-{
- int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
- int qid;
-
- for (qid = 0; qid < iopoll_q_count; qid++)
- atomic_set(&ioc->blk_mq_poll_queues[qid].pause, 0);
-}
-
-void
-leapioraid_base_mask_interrupts(struct LEAPIORAID_ADAPTER *ioc)
-{
- u32 him_register;
-
- ioc->mask_interrupts = 1;
- him_register =
- ioc->base_readl(&ioc->chip->HostInterruptMask,
- LEAPIORAID_READL_RETRY_COUNT_OF_THREE);
- him_register |=
- 0x00000001 + 0x00000008 + 0x40000000;
- writel(him_register, &ioc->chip->HostInterruptMask);
- ioc->base_readl(&ioc->chip->HostInterruptMask,
- LEAPIORAID_READL_RETRY_COUNT_OF_THREE);
-}
-
-void
-leapioraid_base_unmask_interrupts(struct LEAPIORAID_ADAPTER *ioc)
-{
- u32 him_register;
-
- him_register =
- ioc->base_readl(&ioc->chip->HostInterruptMask,
- LEAPIORAID_READL_RETRY_COUNT_OF_THREE);
- him_register &= ~0x00000008;
- writel(him_register, &ioc->chip->HostInterruptMask);
- ioc->mask_interrupts = 0;
-}
-
-union leapioraid_reply_descriptor {
- u64 word;
- struct {
- u32 low;
- u32 high;
- } u;
-};
-
-static int
-leapioraid_base_process_reply_queue(
- struct leapioraid_adapter_reply_queue *reply_q)
-{
- union leapioraid_reply_descriptor rd;
- u64 completed_cmds;
- u8 request_descript_type;
- u16 smid;
- u8 cb_idx;
- u32 reply;
- u8 msix_index = reply_q->msix_index;
- struct LEAPIORAID_ADAPTER *ioc = reply_q->ioc;
- union LeapioraidRepDescUnion_t *rpf;
- u8 rc;
-
- completed_cmds = 0;
- if (!atomic_add_unless(&reply_q->busy, 1, 1))
- return completed_cmds;
- rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
- request_descript_type = rpf->Default.ReplyFlags
- & LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK;
- if (request_descript_type == LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED) {
- atomic_dec(&reply_q->busy);
- return 1;
- }
- cb_idx = 0xFF;
- do {
- rd.word = le64_to_cpu(rpf->Words);
- if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
- goto out;
- reply = 0;
- smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
- if (request_descript_type ==
- LEAPIORAID_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
- request_descript_type ==
- LEAPIORAID_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
- cb_idx = leapioraid_base_get_cb_idx(ioc, smid);
- if ((likely(cb_idx < LEAPIORAID_MAX_CALLBACKS)) &&
- (likely(leapioraid_callbacks[cb_idx] != NULL))) {
- rc = leapioraid_callbacks[cb_idx] (ioc, smid,
- msix_index, 0);
- if (rc)
- leapioraid_base_free_smid(ioc, smid);
- }
- } else if (request_descript_type ==
- LEAPIORAID_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
- reply =
- le32_to_cpu(rpf->AddressReply.ReplyFrameAddress);
- if (reply > ioc->reply_dma_max_address
- || reply < ioc->reply_dma_min_address)
- reply = 0;
- if (smid) {
- cb_idx = leapioraid_base_get_cb_idx(ioc, smid);
- if ((likely(cb_idx < LEAPIORAID_MAX_CALLBACKS)) &&
- (likely(leapioraid_callbacks[cb_idx] != NULL))) {
- rc = leapioraid_callbacks[cb_idx] (ioc,
- smid,
- msix_index,
- reply);
- if (reply)
- leapioraid_base_display_reply_info
- (ioc, smid, msix_index,
- reply);
- if (rc)
- leapioraid_base_free_smid(ioc,
- smid);
- }
- } else {
- leapioraid_base_async_event(ioc, msix_index, reply);
- }
- if (reply) {
- ioc->reply_free_host_index =
- (ioc->reply_free_host_index ==
- (ioc->reply_free_queue_depth - 1)) ?
- 0 : ioc->reply_free_host_index + 1;
- ioc->reply_free[ioc->reply_free_host_index] =
- cpu_to_le32(reply);
- wmb(); /* Make sure that all write ops are in order */
- writel(ioc->reply_free_host_index,
- &ioc->chip->ReplyFreeHostIndex);
- }
- }
- rpf->Words = cpu_to_le64(ULLONG_MAX);
- reply_q->reply_post_host_index =
- (reply_q->reply_post_host_index ==
- (ioc->reply_post_queue_depth - 1)) ? 0 :
- reply_q->reply_post_host_index + 1;
- request_descript_type =
- reply_q->reply_post_free[reply_q->reply_post_host_index].Default.ReplyFlags
- & LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK;
- completed_cmds++;
- if (completed_cmds >= ioc->thresh_hold) {
- if (ioc->combined_reply_queue) {
- writel(reply_q->reply_post_host_index |
- ((msix_index & 7) <<
- LEAPIORAID_RPHI_MSIX_INDEX_SHIFT),
- ioc->replyPostRegisterIndex[msix_index /
- 8]);
- } else {
- writel(reply_q->reply_post_host_index |
- (msix_index <<
- LEAPIORAID_RPHI_MSIX_INDEX_SHIFT),
- &ioc->chip->ReplyPostHostIndex);
- }
- if (!reply_q->is_blk_mq_poll_q &&
- !reply_q->irq_poll_scheduled) {
- reply_q->irq_poll_scheduled = true;
- irq_poll_sched(&reply_q->irqpoll);
- }
- atomic_dec(&reply_q->busy);
- return completed_cmds;
- }
- if (request_descript_type == LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED)
- goto out;
- if (!reply_q->reply_post_host_index)
- rpf = reply_q->reply_post_free;
- else
- rpf++;
- } while (1);
-out:
- if (!completed_cmds) {
- atomic_dec(&reply_q->busy);
- return completed_cmds;
- }
- wmb(); /* Make sure that all write ops are in order */
- if (ioc->combined_reply_queue) {
- writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
- LEAPIORAID_RPHI_MSIX_INDEX_SHIFT),
- ioc->replyPostRegisterIndex[msix_index / 8]);
- } else {
- writel(reply_q->reply_post_host_index | (msix_index <<
- LEAPIORAID_RPHI_MSIX_INDEX_SHIFT),
- &ioc->chip->ReplyPostHostIndex);
- }
- atomic_dec(&reply_q->busy);
- return completed_cmds;
-}
-
-int leapioraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
-{
- struct LEAPIORAID_ADAPTER *ioc =
- (struct LEAPIORAID_ADAPTER *)shost->hostdata;
- struct leapioraid_adapter_reply_queue *reply_q;
- int num_entries = 0;
- int qid = queue_num - ioc->iopoll_q_start_index;
-
- if (atomic_read(&ioc->blk_mq_poll_queues[qid].pause) ||
- !atomic_add_unless(&ioc->blk_mq_poll_queues[qid].busy, 1, 1))
- return 0;
- reply_q = ioc->blk_mq_poll_queues[qid].reply_q;
- num_entries = leapioraid_base_process_reply_queue(reply_q);
- atomic_dec(&ioc->blk_mq_poll_queues[qid].busy);
- return num_entries;
-}
-
-static irqreturn_t
-leapioraid_base_interrupt(int irq, void *bus_id)
-{
- struct leapioraid_adapter_reply_queue *reply_q = bus_id;
- struct LEAPIORAID_ADAPTER *ioc = reply_q->ioc;
-
- if (ioc->mask_interrupts)
- return IRQ_NONE;
- if (reply_q->irq_poll_scheduled)
- return IRQ_HANDLED;
- return ((leapioraid_base_process_reply_queue(reply_q) > 0) ?
- IRQ_HANDLED : IRQ_NONE);
-}
-
-static
-int leapioraid_base_irqpoll(struct irq_poll *irqpoll, int budget)
-{
- struct leapioraid_adapter_reply_queue *reply_q;
- int num_entries = 0;
-
- reply_q = container_of(irqpoll,
- struct leapioraid_adapter_reply_queue, irqpoll);
- if (reply_q->irq_line_enable) {
- disable_irq_nosync(reply_q->os_irq);
- reply_q->irq_line_enable = false;
- }
- num_entries = leapioraid_base_process_reply_queue(reply_q);
- if (num_entries < budget) {
- irq_poll_complete(irqpoll);
- reply_q->irq_poll_scheduled = false;
- reply_q->irq_line_enable = true;
- enable_irq(reply_q->os_irq);
- }
- return num_entries;
-}
-
-static void
-leapioraid_base_init_irqpolls(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_adapter_reply_queue *reply_q, *next;
-
- if (list_empty(&ioc->reply_queue_list))
- return;
- list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
- if (reply_q->is_blk_mq_poll_q)
- continue;
- irq_poll_init(&reply_q->irqpoll, ioc->thresh_hold,
- leapioraid_base_irqpoll);
- reply_q->irq_poll_scheduled = false;
- reply_q->irq_line_enable = true;
- reply_q->os_irq = pci_irq_vector(ioc->pdev,
- reply_q->msix_index);
- }
-}
-
-static inline int
-leapioraid_base_is_controller_msix_enabled(struct LEAPIORAID_ADAPTER *ioc)
-{
- return (ioc->facts.IOCCapabilities &
- LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
-}
-
-void
-leapioraid_base_sync_reply_irqs(struct LEAPIORAID_ADAPTER *ioc, u8 poll)
-{
- struct leapioraid_adapter_reply_queue *reply_q;
-
- if (!leapioraid_base_is_controller_msix_enabled(ioc))
- return;
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (ioc->shost_recovery || ioc->remove_host ||
- ioc->pci_error_recovery)
- return;
- if (reply_q->msix_index == 0)
- continue;
- if (reply_q->is_blk_mq_poll_q) {
- leapioraid_base_process_reply_queue(reply_q);
- continue;
- }
- synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
- if (reply_q->irq_poll_scheduled) {
- irq_poll_disable(&reply_q->irqpoll);
- irq_poll_enable(&reply_q->irqpoll);
- if (reply_q->irq_poll_scheduled) {
- reply_q->irq_poll_scheduled = false;
- reply_q->irq_line_enable = true;
- enable_irq(reply_q->os_irq);
- }
- }
- if (poll)
- leapioraid_base_process_reply_queue(reply_q);
- }
-}
-
-void
-leapioraid_base_release_callback_handler(u8 cb_idx)
-{
- leapioraid_callbacks[cb_idx] = NULL;
-}
-
-u8
-leapioraid_base_register_callback_handler(LEAPIORAID_CALLBACK cb_func)
-{
- u8 cb_idx;
-
- for (cb_idx = LEAPIORAID_MAX_CALLBACKS - 1; cb_idx; cb_idx--)
- if (leapioraid_callbacks[cb_idx] == NULL)
- break;
- leapioraid_callbacks[cb_idx] = cb_func;
- return cb_idx;
-}
-
-void
-leapioraid_base_initialize_callback_handler(void)
-{
- u8 cb_idx;
-
- for (cb_idx = 0; cb_idx < LEAPIORAID_MAX_CALLBACKS; cb_idx++)
- leapioraid_base_release_callback_handler(cb_idx);
-}
-
-static void
-leapioraid_base_build_zero_len_sge(
- struct LEAPIORAID_ADAPTER *ioc, void *paddr)
-{
- u32 flags_length = (u32) ((LEAPIORAID_SGE_FLAGS_LAST_ELEMENT |
- LEAPIORAID_SGE_FLAGS_END_OF_BUFFER |
- LEAPIORAID_SGE_FLAGS_END_OF_LIST |
- LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT) <<
- LEAPIORAID_SGE_FLAGS_SHIFT);
-
- ioc->base_add_sg_single(paddr, flags_length, -1);
-}
-
-static void
-leapioraid_base_add_sg_single_32(void *paddr, u32 flags_length,
- dma_addr_t dma_addr)
-{
- struct LeapioSGESimple32_t *sgel = paddr;
-
- flags_length |= (LEAPIORAID_SGE_FLAGS_32_BIT_ADDRESSING |
- LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS) <<
- LEAPIORAID_SGE_FLAGS_SHIFT;
- sgel->FlagsLength = cpu_to_le32(flags_length);
- sgel->Address = cpu_to_le32(dma_addr);
-}
-
-static void
-leapioraid_base_add_sg_single_64(void *paddr, u32 flags_length,
- dma_addr_t dma_addr)
-{
- struct LeapioSGESimple64_t *sgel = paddr;
-
- flags_length |= (LEAPIORAID_SGE_FLAGS_64_BIT_ADDRESSING |
- LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS) <<
- LEAPIORAID_SGE_FLAGS_SHIFT;
- sgel->FlagsLength = cpu_to_le32(flags_length);
- sgel->Address = cpu_to_le64(dma_addr);
-}
-
-static
-struct leapioraid_chain_tracker *leapioraid_base_get_chain_buffer_tracker(
- struct LEAPIORAID_ADAPTER *ioc,
- struct scsi_cmnd *scmd)
-{
- struct leapioraid_chain_tracker *chain_req;
- struct leapioraid_scsiio_tracker *st = leapioraid_base_scsi_cmd_priv(scmd);
- u16 smid = st->smid;
- u8 chain_offset =
- atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
-
- if (chain_offset == ioc->chains_needed_per_io)
- return NULL;
- chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
- atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
- return chain_req;
-}
-
-static void
-leapioraid_base_build_sg(struct LEAPIORAID_ADAPTER *ioc, void *psge,
- dma_addr_t data_out_dma, size_t data_out_sz,
- dma_addr_t data_in_dma, size_t data_in_sz)
-{
- u32 sgl_flags;
-
- if (!data_out_sz && !data_in_sz) {
- leapioraid_base_build_zero_len_sge(ioc, psge);
- return;
- }
- if (data_out_sz && data_in_sz) {
- sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT |
- LEAPIORAID_SGE_FLAGS_END_OF_BUFFER |
- LEAPIORAID_SGE_FLAGS_HOST_TO_IOC);
- sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT;
- ioc->base_add_sg_single(psge, sgl_flags |
- data_out_sz, data_out_dma);
- psge += ioc->sge_size;
- sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT |
- LEAPIORAID_SGE_FLAGS_LAST_ELEMENT |
- LEAPIORAID_SGE_FLAGS_END_OF_BUFFER |
- LEAPIORAID_SGE_FLAGS_END_OF_LIST);
- sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT;
- ioc->base_add_sg_single(psge, sgl_flags |
- data_in_sz, data_in_dma);
- } else if (data_out_sz) {
- sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT |
- LEAPIORAID_SGE_FLAGS_LAST_ELEMENT |
- LEAPIORAID_SGE_FLAGS_END_OF_BUFFER |
- LEAPIORAID_SGE_FLAGS_END_OF_LIST |
- LEAPIORAID_SGE_FLAGS_HOST_TO_IOC);
- sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT;
- ioc->base_add_sg_single(psge, sgl_flags |
- data_out_sz, data_out_dma);
- } else if (data_in_sz) {
- sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT |
- LEAPIORAID_SGE_FLAGS_LAST_ELEMENT |
- LEAPIORAID_SGE_FLAGS_END_OF_BUFFER |
- LEAPIORAID_SGE_FLAGS_END_OF_LIST);
- sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT;
- ioc->base_add_sg_single(psge, sgl_flags |
- data_in_sz, data_in_dma);
- }
-}
-
-u32
-leapioraid_base_mod64(u64 dividend, u32 divisor)
-{
- u32 remainder;
-
- if (!divisor) {
- pr_err("leapioraid : DIVISOR is zero, in div fn\n");
- return 0;
- }
- remainder = do_div(dividend, divisor);
- return remainder;
-}
-
-static void
-leapioraid_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset,
- u32 length, dma_addr_t dma_addr)
-{
- struct LEAPIORAID_IEEE_SGE_CHAIN64 *sgel = paddr;
-
- sgel->Flags = flags;
- sgel->NextChainOffset = chain_offset;
- sgel->Length = cpu_to_le32(length);
- sgel->Address = cpu_to_le64(dma_addr);
-}
-
-static void
-leapioraid_base_build_zero_len_sge_ieee(struct LEAPIORAID_ADAPTER *ioc,
- void *paddr)
-{
- u8 sgl_flags = (LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
- LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR |
- LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST);
-
- leapioraid_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
-}
-
-static int
-leapioraid_base_build_sg_scmd_ieee(struct LEAPIORAID_ADAPTER *ioc,
- struct scsi_cmnd *scmd, u16 smid)
-{
- struct LeapioraidSCSIIOReq_t *mpi_request;
- dma_addr_t chain_dma;
- struct scatterlist *sg_scmd;
- void *sg_local, *chain;
- u32 chain_offset;
- u32 chain_length;
- int sges_left;
- u32 sges_in_segment;
- u8 simple_sgl_flags;
- u8 simple_sgl_flags_last;
- u8 chain_sgl_flags;
- struct leapioraid_chain_tracker *chain_req;
-
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- simple_sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
- LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR;
- simple_sgl_flags_last = simple_sgl_flags |
- LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST;
- chain_sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
- LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR;
-
- sg_scmd = scsi_sglist(scmd);
- sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- pr_err_ratelimited
- ("sd %s: scsi_dma_map failed: request for %d bytes!\n",
- dev_name(&scmd->device->sdev_gendev), scsi_bufflen(scmd));
- return -ENOMEM;
- }
- sg_local = &mpi_request->SGL;
- sges_in_segment = (ioc->request_sz -
- offsetof(struct LeapioraidSCSIIOReq_t,
- SGL)) / ioc->sge_size_ieee;
- if (sges_left <= sges_in_segment)
- goto fill_in_last_segment;
- mpi_request->ChainOffset = (sges_in_segment - 1) +
- (offsetof(struct LeapioraidSCSIIOReq_t, SGL) / ioc->sge_size_ieee);
- while (sges_in_segment > 1) {
- leapioraid_base_add_sg_single_ieee(sg_local, simple_sgl_flags,
- 0, sg_dma_len(sg_scmd),
- sg_dma_address(sg_scmd));
-
- sg_scmd = sg_next(sg_scmd);
- sg_local += ioc->sge_size_ieee;
- sges_left--;
- sges_in_segment--;
- }
- chain_req = leapioraid_base_get_chain_buffer_tracker(ioc, scmd);
- if (!chain_req)
- return -1;
- chain = chain_req->chain_buffer;
- chain_dma = chain_req->chain_buffer_dma;
- do {
- sges_in_segment = (sges_left <=
- ioc->max_sges_in_chain_message) ? sges_left :
- ioc->max_sges_in_chain_message;
- chain_offset = (sges_left == sges_in_segment) ?
- 0 : sges_in_segment;
- chain_length = sges_in_segment * ioc->sge_size_ieee;
- if (chain_offset)
- chain_length += ioc->sge_size_ieee;
- leapioraid_base_add_sg_single_ieee(sg_local, chain_sgl_flags,
- chain_offset, chain_length,
- chain_dma);
- sg_local = chain;
- if (!chain_offset)
- goto fill_in_last_segment;
- while (sges_in_segment) {
- leapioraid_base_add_sg_single_ieee(sg_local,
- simple_sgl_flags, 0,
- sg_dma_len(sg_scmd),
- sg_dma_address
- (sg_scmd));
-
- sg_scmd = sg_next(sg_scmd);
- sg_local += ioc->sge_size_ieee;
- sges_left--;
- sges_in_segment--;
- }
- chain_req = leapioraid_base_get_chain_buffer_tracker(ioc, scmd);
- if (!chain_req)
- return -1;
- chain = chain_req->chain_buffer;
- chain_dma = chain_req->chain_buffer_dma;
- } while (1);
-fill_in_last_segment:
- while (sges_left > 0) {
- if (sges_left == 1)
- leapioraid_base_add_sg_single_ieee(sg_local,
- simple_sgl_flags_last,
- 0,
- sg_dma_len(sg_scmd),
- sg_dma_address
- (sg_scmd));
- else
- leapioraid_base_add_sg_single_ieee(sg_local,
- simple_sgl_flags, 0,
- sg_dma_len(sg_scmd),
- sg_dma_address
- (sg_scmd));
-
- sg_scmd = sg_next(sg_scmd);
- sg_local += ioc->sge_size_ieee;
- sges_left--;
- }
- return 0;
-}
-
-static void
-leapioraid_base_build_sg_ieee(struct LEAPIORAID_ADAPTER *ioc, void *psge,
- dma_addr_t data_out_dma, size_t data_out_sz,
- dma_addr_t data_in_dma, size_t data_in_sz)
-{
- u8 sgl_flags;
-
- if (!data_out_sz && !data_in_sz) {
- leapioraid_base_build_zero_len_sge_ieee(ioc, psge);
- return;
- }
- if (data_out_sz && data_in_sz) {
- sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
- LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR;
- leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0,
- data_out_sz, data_out_dma);
- psge += ioc->sge_size_ieee;
- sgl_flags |= LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST;
- leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0,
- data_in_sz, data_in_dma);
- } else if (data_out_sz) {
- sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
- LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST |
- LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR;
- leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0,
- data_out_sz, data_out_dma);
- } else if (data_in_sz) {
- sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
- LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST |
- LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR;
- leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0,
- data_in_sz, data_in_dma);
- }
-}
-
-#define leapioraid_convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
-static int
-leapioraid_base_config_dma_addressing(struct LEAPIORAID_ADAPTER *ioc,
- struct pci_dev *pdev)
-{
- struct sysinfo s;
- char *desc = "64";
- u64 consistant_dma_mask = DMA_BIT_MASK(64);
- u64 dma_mask = DMA_BIT_MASK(64);
-
- consistant_dma_mask = DMA_BIT_MASK(63);
- dma_mask = DMA_BIT_MASK(63);
- desc = "63";
- ioc->dma_mask = 63;
- if (ioc->use_32bit_dma)
- consistant_dma_mask = DMA_BIT_MASK(32);
- if (sizeof(dma_addr_t) > 4) {
- if (!dma_set_mask(&pdev->dev, dma_mask) &&
- !dma_set_coherent_mask(&pdev->dev, consistant_dma_mask)) {
- ioc->base_add_sg_single =
- &leapioraid_base_add_sg_single_64;
- ioc->sge_size = sizeof(struct LeapioSGESimple64_t);
- if (!ioc->use_32bit_dma)
- goto out;
- return 0;
- }
- }
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))
- && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- ioc->base_add_sg_single = &leapioraid_base_add_sg_single_32;
- ioc->sge_size = sizeof(struct LeapioSGESimple32_t);
- desc = "32";
- ioc->dma_mask = 32;
- } else
- return -ENODEV;
-out:
- si_meminfo(&s);
- pr_info("%s %s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- ioc->name, desc, leapioraid_convert_to_kb(s.totalram));
- return 0;
-}
-
-int
-leapioraid_base_check_and_get_msix_vectors(struct pci_dev *pdev)
-{
- int base;
- u16 message_control, msix_vector_count;
-
- base = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (!base)
- return -EINVAL;
- pci_read_config_word(pdev, base + 2, &message_control);
- msix_vector_count = (message_control & 0x3FF) + 1;
- return msix_vector_count;
-}
-
-enum leapioraid_pci_bus_speed {
- LEAPIORAID_PCIE_SPEED_2_5GT = 0x14,
- LEAPIORAID_PCIE_SPEED_5_0GT = 0x15,
- LEAPIORAID_PCIE_SPEED_8_0GT = 0x16,
- LEAPIORAID_PCIE_SPEED_16_0GT = 0x17,
- LEAPIORAID_PCI_SPEED_UNKNOWN = 0xff,
-};
-
-const unsigned char leapioraid_pcie_link_speed[] = {
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCIE_SPEED_2_5GT,
- LEAPIORAID_PCIE_SPEED_5_0GT,
- LEAPIORAID_PCIE_SPEED_8_0GT,
- LEAPIORAID_PCIE_SPEED_16_0GT,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN,
- LEAPIORAID_PCI_SPEED_UNKNOWN
-};
-
-static void
-leapioraid_base_check_and_enable_high_iops_queues(
- struct LEAPIORAID_ADAPTER *ioc,
- int hba_msix_vector_count,
- int iopoll_q_count)
-{
- u16 lnksta;
- enum leapioraid_pci_bus_speed speed;
-
- if (perf_mode == LEAPIORAID_PERF_MODE_IOPS ||
- perf_mode == LEAPIORAID_PERF_MODE_LATENCY || iopoll_q_count) {
- ioc->high_iops_queues = 0;
- return;
- }
- if (perf_mode == LEAPIORAID_PERF_MODE_DEFAULT) {
- pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
- speed = leapioraid_pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
- dev_info(&ioc->pdev->dev, "PCIe device speed is %s\n",
- speed == LEAPIORAID_PCIE_SPEED_2_5GT ? "2.5GHz" :
- speed == LEAPIORAID_PCIE_SPEED_5_0GT ? "5.0GHz" :
- speed == LEAPIORAID_PCIE_SPEED_8_0GT ? "8.0GHz" :
- speed == LEAPIORAID_PCIE_SPEED_16_0GT ? "16.0GHz" :
- "Unknown");
- if (speed < LEAPIORAID_PCIE_SPEED_16_0GT) {
- ioc->high_iops_queues = 0;
- return;
- }
- }
- if (!reset_devices &&
- hba_msix_vector_count == LEAPIORAID_GEN35_MAX_MSIX_QUEUES &&
- num_online_cpus() >= LEAPIORAID_HIGH_IOPS_REPLY_QUEUES &&
- max_msix_vectors == -1)
- ioc->high_iops_queues = LEAPIORAID_HIGH_IOPS_REPLY_QUEUES;
- else
- ioc->high_iops_queues = 0;
-}
-
-void
-leapioraid_base_disable_msix(struct LEAPIORAID_ADAPTER *ioc)
-{
- if (!ioc->msix_enable)
- return;
- pci_free_irq_vectors(ioc->pdev);
- kfree(ioc->blk_mq_poll_queues);
- ioc->msix_enable = 0;
-}
-
-void
-leapioraid_base_free_irq(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_adapter_reply_queue *reply_q, *next;
-
- if (list_empty(&ioc->reply_queue_list))
- return;
- list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
- list_del(&reply_q->list);
- if (reply_q->is_blk_mq_poll_q) {
- kfree(reply_q);
- continue;
- }
- irq_poll_disable(&reply_q->irqpoll);
- if (ioc->smp_affinity_enable)
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- reply_q->msix_index), NULL);
- free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
- reply_q);
- kfree(reply_q);
- }
-}
-
-static int
-leapioraid_base_request_irq(struct LEAPIORAID_ADAPTER *ioc, u8 index)
-{
- struct leapioraid_adapter_reply_queue *reply_q;
- int r;
- u8 qid;
-
- reply_q = kzalloc(sizeof(struct leapioraid_adapter_reply_queue),
- GFP_KERNEL);
- if (!reply_q)
- return -ENOMEM;
-
- reply_q->ioc = ioc;
- reply_q->msix_index = index;
- atomic_set(&reply_q->busy, 0);
- if (index >= ioc->iopoll_q_start_index) {
- qid = index - ioc->iopoll_q_start_index;
- snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%u-mq-poll%u",
- ioc->driver_name, ioc->id, qid);
- reply_q->is_blk_mq_poll_q = 1;
- ioc->blk_mq_poll_queues[qid].reply_q = reply_q;
- INIT_LIST_HEAD(&reply_q->list);
- list_add_tail(&reply_q->list, &ioc->reply_queue_list);
- return 0;
- }
- if (ioc->msix_enable)
- snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%u-msix%u",
- ioc->driver_name, ioc->id, index);
- else
- snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%d",
- ioc->driver_name, ioc->id);
- r = request_irq(pci_irq_vector(ioc->pdev, index), leapioraid_base_interrupt,
- IRQF_SHARED, reply_q->name, reply_q);
- if (r) {
- pr_err("%s unable to allocate interrupt %d!\n", reply_q->name,
- pci_irq_vector(ioc->pdev, index));
- kfree(reply_q);
- return -EBUSY;
- }
-
- INIT_LIST_HEAD(&reply_q->list);
- list_add_tail(&reply_q->list, &ioc->reply_queue_list);
- return 0;
-}
-
-static int leapioraid_base_alloc_irq_vectors(struct LEAPIORAID_ADAPTER *ioc)
-{
- int i, irq_flags = PCI_IRQ_MSIX;
- struct irq_affinity desc = {.pre_vectors = ioc->high_iops_queues };
- struct irq_affinity *descp = &desc;
- int nr_msix_vectors = ioc->iopoll_q_start_index;
-
- if (ioc->smp_affinity_enable)
- irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
- else
- descp = NULL;
- dinitprintk(ioc, pr_err(
- "%s high_iops_queues: %d,\n\t\t"
- "reply_queue_count: %d, nr_msix_vectors: %d\n",
- ioc->name,
- ioc->high_iops_queues,
- ioc->reply_queue_count,
- nr_msix_vectors));
- i = pci_alloc_irq_vectors_affinity(
- ioc->pdev,
- ioc->high_iops_queues,
- nr_msix_vectors, irq_flags, descp);
- return i;
-}
-
-static int
-leapioraid_base_enable_msix(struct LEAPIORAID_ADAPTER *ioc)
-{
- int r, i, msix_vector_count, local_max_msix_vectors;
- int iopoll_q_count = 0;
-
- ioc->msix_load_balance = false;
- msix_vector_count =
- leapioraid_base_check_and_get_msix_vectors(ioc->pdev);
- if (msix_vector_count <= 0) {
- dfailprintk(ioc, pr_info("%s msix not supported\n", ioc->name));
- goto try_ioapic;
- }
- dinitprintk(ioc, pr_err(
- "%s MSI-X vectors supported: %d, no of cores: %d\n",
- ioc->name, msix_vector_count, ioc->cpu_count));
- ioc->reply_queue_count = min_t(int, ioc->cpu_count, msix_vector_count);
- if (!ioc->rdpq_array_enable && max_msix_vectors == -1) {
- if (reset_devices)
- local_max_msix_vectors = 1;
- else
- local_max_msix_vectors = 8;
- } else
- local_max_msix_vectors = max_msix_vectors;
- if (local_max_msix_vectors == 0)
- goto try_ioapic;
- if (!ioc->combined_reply_queue) {
- pr_err(
- "%s combined reply queue is off, so enabling msix load balance\n",
- ioc->name);
- ioc->msix_load_balance = true;
- }
- if (ioc->msix_load_balance)
- ioc->smp_affinity_enable = 0;
- if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
- ioc->shost->host_tagset = 0;
- if (ioc->shost->host_tagset)
- iopoll_q_count = poll_queues;
- if (iopoll_q_count) {
- ioc->blk_mq_poll_queues = kcalloc(iopoll_q_count,
- sizeof(struct
- leapioraid_blk_mq_poll_queue),
- GFP_KERNEL);
- if (!ioc->blk_mq_poll_queues)
- iopoll_q_count = 0;
- }
- leapioraid_base_check_and_enable_high_iops_queues(ioc,
- msix_vector_count,
- iopoll_q_count);
- ioc->reply_queue_count =
- min_t(int, ioc->reply_queue_count + ioc->high_iops_queues,
- msix_vector_count);
- if (local_max_msix_vectors > 0)
- ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
- ioc->reply_queue_count);
- if (iopoll_q_count) {
- if (ioc->reply_queue_count < (iopoll_q_count + 1))
- iopoll_q_count = 0;
- ioc->reply_queue_count =
- min(ioc->reply_queue_count + iopoll_q_count,
- msix_vector_count);
- }
- ioc->iopoll_q_start_index = ioc->reply_queue_count - iopoll_q_count;
- r = leapioraid_base_alloc_irq_vectors(ioc);
- if (r < 0) {
- pr_warn(
- "%s pci_alloc_irq_vectors failed (r=%d) !!!\n",
- ioc->name, r);
- goto try_ioapic;
- }
- ioc->msix_enable = 1;
- for (i = 0; i < ioc->reply_queue_count; i++) {
- r = leapioraid_base_request_irq(ioc, i);
- if (r) {
- leapioraid_base_free_irq(ioc);
- leapioraid_base_disable_msix(ioc);
- goto try_ioapic;
- }
- }
- dinitprintk(ioc,
- pr_info("%s High IOPs queues : %s\n",
- ioc->name,
- ioc->high_iops_queues ? "enabled" : "disabled"));
- return 0;
-try_ioapic:
- ioc->high_iops_queues = 0;
- dinitprintk(ioc, pr_err(
- "%s High IOPs queues : disabled\n", ioc->name));
- ioc->reply_queue_count = 1;
- ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
- r = leapioraid_base_request_irq(ioc, 0);
- return r;
-}
-
-static void
-leapioraid_base_import_managed_irqs_affinity(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_adapter_reply_queue *reply_q;
- unsigned int cpu, nr_msix;
- int local_numa_node;
- unsigned int index = 0;
-
- nr_msix = ioc->reply_queue_count;
- if (!nr_msix)
- return;
- if (ioc->smp_affinity_enable) {
- if (ioc->high_iops_queues) {
- local_numa_node = dev_to_node(&ioc->pdev->dev);
- for (index = 0; index < ioc->high_iops_queues; index++) {
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- index),
- cpumask_of_node
- (local_numa_node));
- }
- }
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- const cpumask_t *mask;
-
- if (reply_q->msix_index < ioc->high_iops_queues ||
- reply_q->msix_index >= ioc->iopoll_q_start_index)
- continue;
- mask = pci_irq_get_affinity(ioc->pdev,
- reply_q->msix_index);
- if (!mask) {
- dinitprintk(ioc, pr_warn(
- "%s no affinity for msi %x\n",
- ioc->name,
- reply_q->msix_index));
- goto fall_back;
- }
- for_each_cpu_and(cpu, mask, cpu_online_mask) {
- if (cpu >= ioc->cpu_msix_table_sz)
- break;
- ioc->cpu_msix_table[cpu] = reply_q->msix_index;
- }
- }
- return;
- }
-fall_back:
- leapioraid_base_group_cpus_on_irq(ioc);
-}
-
-static void
-leapioraid_base_assign_reply_queues(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_adapter_reply_queue *reply_q;
- int reply_queue;
-
- if (!leapioraid_base_is_controller_msix_enabled(ioc))
- return;
- if (ioc->msix_load_balance)
- return;
- memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
- if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) {
- ioc->reply_queue_count = ioc->facts.MaxMSIxVectors;
- reply_queue = 0;
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- reply_q->msix_index = reply_queue;
- if (++reply_queue == ioc->reply_queue_count)
- reply_queue = 0;
- }
- }
- leapioraid_base_import_managed_irqs_affinity(ioc);
-}
-
-static int
-leapioraid_base_wait_for_doorbell_int(
- struct LEAPIORAID_ADAPTER *ioc, int timeout)
-{
- u32 cntdn, count;
- u32 int_status;
-
- count = 0;
- cntdn = 1000 * timeout;
- do {
- int_status =
- ioc->base_readl(&ioc->chip->HostInterruptStatus,
- LEAPIORAID_READL_RETRY_COUNT_OF_THREE);
- if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) {
- dhsprintk(ioc, pr_info(
- "%s %s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count,
- timeout));
- return 0;
- }
- usleep_range(1000, 1100);
- count++;
- } while (--cntdn);
- pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
- return -EFAULT;
-}
-
-static int
-leapioraid_base_spin_on_doorbell_int(struct LEAPIORAID_ADAPTER *ioc,
- int timeout)
-{
- u32 cntdn, count;
- u32 int_status;
-
- count = 0;
- cntdn = 2000 * timeout;
- do {
- int_status =
- ioc->base_readl(&ioc->chip->HostInterruptStatus,
- LEAPIORAID_READL_RETRY_COUNT_OF_THREE);
- if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) {
- dhsprintk(ioc, pr_info(
- "%s %s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count,
- timeout));
- return 0;
- }
- udelay(500);
- count++;
- } while (--cntdn);
- pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
- return -EFAULT;
-}
-
-static int
-leapioraid_base_wait_for_doorbell_ack(struct LEAPIORAID_ADAPTER *ioc,
- int timeout)
-{
- u32 cntdn, count;
- u32 int_status;
- u32 doorbell;
-
- count = 0;
- cntdn = 1000 * timeout;
- do {
- int_status =
- ioc->base_readl(&ioc->chip->HostInterruptStatus,
- LEAPIORAID_READL_RETRY_COUNT_OF_THREE);
- if (!(int_status & LEAPIORAID_HIS_SYS2IOC_DB_STATUS)) {
- dhsprintk(ioc, pr_info(
- "%s %s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count,
- timeout));
- return 0;
- } else if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) {
- doorbell =
- ioc->base_readl(&ioc->chip->Doorbell,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY);
- if ((doorbell & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_FAULT) {
- leapioraid_print_fault_code(ioc, doorbell);
- return -EFAULT;
- }
- if ((doorbell & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP) {
- leapioraid_base_coredump_info(ioc, doorbell);
- return -EFAULT;
- }
- } else if (int_status == 0xFFFFFFFF)
- goto out;
- usleep_range(1000, 1100);
- count++;
- } while (--cntdn);
-out:
- pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
- return -EFAULT;
-}
-
-static int
-leapioraid_base_wait_for_doorbell_not_used(struct LEAPIORAID_ADAPTER *ioc,
- int timeout)
-{
- u32 cntdn, count;
- u32 doorbell_reg;
-
- count = 0;
- cntdn = 1000 * timeout;
- do {
- doorbell_reg =
- ioc->base_readl(&ioc->chip->Doorbell,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY);
- if (!(doorbell_reg & LEAPIORAID_DOORBELL_USED)) {
- dhsprintk(ioc, pr_info(
- "%s %s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count,
- timeout));
- return 0;
- }
- usleep_range(1000, 1100);
- count++;
- } while (--cntdn);
- pr_err("%s %s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
- ioc->name, __func__, count, doorbell_reg);
- return -EFAULT;
-}
-
-static int
-leapioraid_base_handshake_req_reply_wait(struct LEAPIORAID_ADAPTER *ioc,
- int request_bytes, u32 *request,
- int reply_bytes, u16 *reply,
- int timeout)
-{
- struct LeapioraidDefaultRep_t *default_reply
- = (struct LeapioraidDefaultRep_t *) reply;
- int i;
- u8 failed;
- __le32 *mfp;
-
- if ((ioc->base_readl(&ioc->chip->Doorbell,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) & LEAPIORAID_DOORBELL_USED)) {
- pr_err("%s doorbell is in use (line=%d)\n", ioc->name, __LINE__);
- return -EFAULT;
- }
- if (ioc->base_readl(&ioc->chip->HostInterruptStatus,
- LEAPIORAID_READL_RETRY_COUNT_OF_THREE) &
- LEAPIORAID_HIS_IOC2SYS_DB_STATUS)
- writel(0, &ioc->chip->HostInterruptStatus);
- writel(((LEAPIORAID_FUNC_HANDSHAKE << LEAPIORAID_DOORBELL_FUNCTION_SHIFT)
- | ((request_bytes / 4) << LEAPIORAID_DOORBELL_ADD_DWORDS_SHIFT)),
- &ioc->chip->Doorbell);
- if ((leapioraid_base_spin_on_doorbell_int(ioc, 5))) {
- pr_err("%s doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
- return -EFAULT;
- }
- writel(0, &ioc->chip->HostInterruptStatus);
- if ((leapioraid_base_wait_for_doorbell_ack(ioc, 5))) {
- pr_err("%s doorbell handshake ack failed (line=%d)\n",
- ioc->name, __LINE__);
- return -EFAULT;
- }
- for (i = 0, failed = 0; i < request_bytes / 4 && !failed; i++) {
- writel((u32) (request[i]), &ioc->chip->Doorbell);
- if ((leapioraid_base_wait_for_doorbell_ack(ioc, 5)))
- failed = 1;
- }
- if (failed) {
- pr_err("%s doorbell handshake sending request failed (line=%d)\n",
- ioc->name, __LINE__);
- return -EFAULT;
- }
- if ((leapioraid_base_wait_for_doorbell_int(ioc, timeout))) {
- pr_err("%s doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
- return -EFAULT;
- }
- reply[0] =
- (u16) (ioc->base_readl(&ioc->chip->Doorbell,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY)
- & LEAPIORAID_DOORBELL_DATA_MASK);
- writel(0, &ioc->chip->HostInterruptStatus);
- if ((leapioraid_base_wait_for_doorbell_int(ioc, 5))) {
- pr_err("%s doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
- return -EFAULT;
- }
- reply[1] =
- (u16) (ioc->base_readl(&ioc->chip->Doorbell,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY)
- & LEAPIORAID_DOORBELL_DATA_MASK);
- writel(0, &ioc->chip->HostInterruptStatus);
- for (i = 2; i < default_reply->MsgLength * 2; i++) {
- if ((leapioraid_base_wait_for_doorbell_int(ioc, 5))) {
- pr_err("%s doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
- return -EFAULT;
- }
- if (i >= reply_bytes / 2)
- ioc->base_readl(&ioc->chip->Doorbell,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY);
- else
- reply[i] =
- (u16) (ioc->base_readl(&ioc->chip->Doorbell,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY)
- & LEAPIORAID_DOORBELL_DATA_MASK);
- writel(0, &ioc->chip->HostInterruptStatus);
- }
- if (leapioraid_base_wait_for_doorbell_int(ioc, 5)) {
- pr_err("%s doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
- return -EFAULT;
- }
- if (leapioraid_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
- dhsprintk(ioc,
- pr_info("%s doorbell is in use (line=%d)\n",
- ioc->name, __LINE__));
- }
- writel(0, &ioc->chip->HostInterruptStatus);
- if (ioc->logging_level & LEAPIORAID_DEBUG_INIT) {
- mfp = (__le32 *) reply;
- pr_info("%s \toffset:data\n", ioc->name);
- for (i = 0; i < reply_bytes / 4; i++)
- pr_info("%s \t[0x%02x]:%08x\n",
- ioc->name, i * 4, le32_to_cpu(mfp[i]));
- }
- return 0;
-}
-
-static int
-leapioraid_base_wait_on_iocstate(
- struct LEAPIORAID_ADAPTER *ioc, u32 ioc_state,
- int timeout)
-{
- u32 count, cntdn;
- u32 current_state;
-
- count = 0;
- cntdn = 1000 * timeout;
- do {
- current_state = leapioraid_base_get_iocstate(ioc, 1);
- if (current_state == ioc_state)
- return 0;
- if (count && current_state == LEAPIORAID_IOC_STATE_FAULT)
- break;
- usleep_range(1000, 1100);
- count++;
- } while (--cntdn);
- return current_state;
-}
-
-static inline void
-leapioraid_base_dump_reg_set(struct LEAPIORAID_ADAPTER *ioc)
-{
- unsigned int i, sz = 256;
- u32 __iomem *reg = (u32 __iomem *) ioc->chip;
-
- pr_info("%s System Register set:\n", ioc->name);
- for (i = 0; i < (sz / sizeof(u32)); i++)
- pr_info("%08x: %08x\n", (i * 4), readl(®[i]));
-}
-
-int
-leapioraid_base_unlock_and_get_host_diagnostic(
- struct LEAPIORAID_ADAPTER *ioc,
- u32 *host_diagnostic)
-{
- u32 count;
-
- *host_diagnostic = 0;
- count = 0;
- do {
- drsprintk(ioc, pr_info("%s write magic sequence\n", ioc->name));
- writel(0x0, &ioc->chip->WriteSequence);
- writel(0xF, &ioc->chip->WriteSequence);
- writel(0x4, &ioc->chip->WriteSequence);
- writel(0xB, &ioc->chip->WriteSequence);
- writel(0x2, &ioc->chip->WriteSequence);
- writel(0x7, &ioc->chip->WriteSequence);
- writel(0xD, &ioc->chip->WriteSequence);
- msleep(100);
- if (count++ > 20) {
- pr_err("%s Giving up writing magic sequence after 20 retries\n",
- ioc->name);
- leapioraid_base_dump_reg_set(ioc);
- return -EFAULT;
- }
- *host_diagnostic =
- ioc->base_readl(&ioc->chip->HostDiagnostic,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY);
- drsprintk(ioc, pr_info(
- "%s wrote magic sequence: cnt(%d), host_diagnostic(0x%08x)\n",
- ioc->name, count, *host_diagnostic));
- } while ((*host_diagnostic & 0x00000080) == 0);
- return 0;
-}
-
-void
-leapioraid_base_lock_host_diagnostic(struct LEAPIORAID_ADAPTER *ioc)
-{
- drsprintk(ioc, pr_info("%s disable writes to the diagnostic register\n",
- ioc->name));
- writel(0x0, &ioc->chip->WriteSequence);
-}
-
-static int
-leapioraid_base_diag_reset(struct LEAPIORAID_ADAPTER *ioc)
-{
- u32 host_diagnostic;
- u32 ioc_state;
- u32 count;
- u32 hcb_size;
-
- pr_info("%s sending diag reset !!\n", ioc->name);
- drsprintk(ioc,
- pr_info("%s Locking pci cfg space access\n",
- ioc->name));
- pci_cfg_access_lock(ioc->pdev);
- drsprintk(ioc, pr_info("%s clear interrupts\n",
- ioc->name));
- mutex_lock(&ioc->hostdiag_unlock_mutex);
- if (leapioraid_base_unlock_and_get_host_diagnostic
- (ioc, &host_diagnostic)) {
- mutex_unlock(&ioc->hostdiag_unlock_mutex);
- goto out;
- }
- hcb_size =
- ioc->base_readl(&ioc->chip->HCBSize, LEAPIORAID_READL_RETRY_COUNT_OF_THREE);
- drsprintk(ioc,
- pr_info("%s diag reset: issued\n",
- ioc->name));
- writel(host_diagnostic | LEAPIORAID_DIAG_RESET_ADAPTER,
- &ioc->chip->HostDiagnostic);
-#if defined(DISABLE_RESET_SUPPORT)
- count = 0;
- do {
- msleep(50);
- host_diagnostic =
- ioc->base_readl(&ioc->chip->HostDiagnostic,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY);
- if (host_diagnostic == 0xFFFFFFFF)
- goto out;
- else if (count++ >= 300)
- goto out;
- if (!(count % 20))
- pr_info("waiting on diag reset bit to clear, count = %d\n",
- (count / 20));
- } while (host_diagnostic & LEAPIORAID_DIAG_RESET_ADAPTER);
-#else
- msleep(50);
- for (count = 0; count < (300000 / 256); count++) {
- host_diagnostic =
- ioc->base_readl(&ioc->chip->HostDiagnostic,
- LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY);
- if (host_diagnostic == 0xFFFFFFFF) {
- pr_err("%s Invalid host diagnostic register value\n",
- ioc->name);
- leapioraid_base_dump_reg_set(ioc);
- goto out;
- }
- if (!(host_diagnostic & LEAPIORAID_DIAG_RESET_ADAPTER))
- break;
-
- msleep(256);
- }
-#endif
- if (host_diagnostic & 0x00000100) {
- drsprintk(ioc, pr_info(
- "%s restart IOC assuming HCB Address points to good F/W\n",
- ioc->name));
- host_diagnostic &= ~0x00001800;
- host_diagnostic |= 0x00000800;
- writel(host_diagnostic, &ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_err(
- "%s re-enable the HCDW\n", ioc->name));
- writel(hcb_size | 0x00000001,
- &ioc->chip->HCBSize);
- }
- drsprintk(ioc, pr_info("%s restart the adapter\n",
- ioc->name));
- writel(host_diagnostic & ~0x00000002,
- &ioc->chip->HostDiagnostic);
- leapioraid_base_lock_host_diagnostic(ioc);
- mutex_unlock(&ioc->hostdiag_unlock_mutex);
- drsprintk(ioc, pr_info("%s Wait for FW to go to the READY state\n",
- ioc->name));
- ioc_state =
- leapioraid_base_wait_on_iocstate(
- ioc, LEAPIORAID_IOC_STATE_READY, 20);
- if (ioc_state) {
- pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
- leapioraid_base_dump_reg_set(ioc);
- goto out;
- }
- drsprintk(ioc, pr_err(
- "%s Unlocking pci cfg space access\n", ioc->name));
- pci_cfg_access_unlock(ioc->pdev);
- if (ioc->open_pcie_trace)
- leapioraid_base_trace_log_init(ioc);
- pr_info("%s diag reset: SUCCESS\n", ioc->name);
- return 0;
-out:
- drsprintk(ioc, pr_err(
- "%s Unlocking pci cfg space access\n", ioc->name));
- pci_cfg_access_unlock(ioc->pdev);
- pr_err("%s diag reset: FAILED\n", ioc->name);
- mutex_unlock(&ioc->hostdiag_unlock_mutex);
- return -EFAULT;
-}
-
-static int
-leapioraid_base_wait_for_iocstate(
- struct LEAPIORAID_ADAPTER *ioc, int timeout)
-{
- u32 ioc_state;
- int rc;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- if (!leapioraid_base_pci_device_is_available(ioc))
- return 0;
- ioc_state = leapioraid_base_get_iocstate(ioc, 0);
- dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n",
- ioc->name, __func__, ioc_state));
- if (((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_READY) ||
- (ioc_state & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_OPERATIONAL)
- return 0;
- if (ioc_state & LEAPIORAID_DOORBELL_USED) {
- dhsprintk(ioc,
- pr_info("%s unexpected doorbell active!\n", ioc->name));
- goto issue_diag_reset;
- }
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) {
- leapioraid_print_fault_code(ioc, ioc_state &
- LEAPIORAID_DOORBELL_DATA_MASK);
- goto issue_diag_reset;
- } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP) {
- pr_err("%s %s: Skipping the diag reset here. (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
- return -EFAULT;
- }
- ioc_state =
- leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_READY,
- timeout);
- if (ioc_state) {
- pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
- return -EFAULT;
- }
-issue_diag_reset:
- rc = leapioraid_base_diag_reset(ioc);
- return rc;
-}
-
-int
-leapioraid_base_check_for_fault_and_issue_reset(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- u32 ioc_state;
- int rc = -EFAULT;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- if (!leapioraid_base_pci_device_is_available(ioc))
- return rc;
- ioc_state = leapioraid_base_get_iocstate(ioc, 0);
- dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n",
- ioc->name, __func__, ioc_state));
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) {
- leapioraid_print_fault_code(ioc, ioc_state &
- LEAPIORAID_DOORBELL_DATA_MASK);
- leapioraid_base_mask_interrupts(ioc);
- rc = leapioraid_base_diag_reset(ioc);
- } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP) {
- leapioraid_base_coredump_info(ioc,
- ioc_state &
- LEAPIORAID_DOORBELL_DATA_MASK);
- leapioraid_base_wait_for_coredump_completion(ioc, __func__);
- leapioraid_base_mask_interrupts(ioc);
- rc = leapioraid_base_diag_reset(ioc);
- }
- return rc;
-}
-
-static int
-leapioraid_base_get_ioc_facts(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidIOCFactsReq_t mpi_request;
- struct LeapioraidIOCFactsRep_t mpi_reply;
- struct leapioraid_facts *facts;
- int mpi_reply_sz, mpi_request_sz, r;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- r = leapioraid_base_wait_for_iocstate(ioc, 10);
- if (r) {
- pr_err(
- "%s %s: failed getting to correct state\n", ioc->name,
- __func__);
- return r;
- }
- mpi_reply_sz = sizeof(struct LeapioraidIOCFactsRep_t);
- mpi_request_sz = sizeof(struct LeapioraidIOCFactsReq_t);
- memset(&mpi_request, 0, mpi_request_sz);
- mpi_request.Function = LEAPIORAID_FUNC_IOC_FACTS;
- r = leapioraid_base_handshake_req_reply_wait(ioc, mpi_request_sz,
- (u32 *) &mpi_request,
- mpi_reply_sz,
- (u16 *) &mpi_reply, 5);
- if (r != 0) {
- pr_err("%s %s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
- return r;
- }
- facts = &ioc->facts;
- memset(facts, 0, sizeof(struct leapioraid_facts));
- facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
- facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
- facts->IOCNumber = mpi_reply.IOCNumber;
- pr_info("%s IOC Number : %d\n", ioc->name, facts->IOCNumber);
- ioc->IOCNumber = facts->IOCNumber;
- facts->VP_ID = mpi_reply.VP_ID;
- facts->VF_ID = mpi_reply.VF_ID;
- facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
- facts->MaxChainDepth = mpi_reply.MaxChainDepth;
- facts->WhoInit = mpi_reply.WhoInit;
- facts->NumberOfPorts = mpi_reply.NumberOfPorts;
- facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
- if (ioc->msix_enable && (facts->MaxMSIxVectors <= 16))
- ioc->combined_reply_queue = 0;
- facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
- facts->MaxReplyDescriptorPostQueueDepth =
- le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
- facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
- facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
- if ((facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
- ioc->ir_firmware = 1;
- if ((facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE)
- && (!reset_devices))
- ioc->rdpq_array_capable = 1;
- else
- ioc->rdpq_array_capable = 0;
- if (facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_ATOMIC_REQ)
- ioc->atomic_desc_capable = 1;
- else
- ioc->atomic_desc_capable = 0;
-
- facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
- facts->IOCRequestFrameSize = le16_to_cpu(mpi_reply.IOCRequestFrameSize);
- facts->IOCMaxChainSegmentSize =
- le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
- facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
- facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
- ioc->shost->max_id = -1;
- facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
- facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
- facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
- facts->HighPriorityCredit = le16_to_cpu(mpi_reply.HighPriorityCredit);
- facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
- facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
- facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
- ioc->page_size = 1 << facts->CurrentHostPageSize;
- if (ioc->page_size == 1) {
- pr_err(
- "%s CurrentHostPageSize is 0: Setting host page to 4k\n",
- ioc->name);
- ioc->page_size = 1 << 12;
- }
- dinitprintk(ioc,
- pr_info("%s CurrentHostPageSize(%d)\n",
- ioc->name, facts->CurrentHostPageSize));
- dinitprintk(ioc,
- pr_info("%s hba queue depth(%d), max chains per io(%d)\n",
- ioc->name, facts->RequestCredit, facts->MaxChainDepth));
- dinitprintk(ioc,
- pr_info("%s request frame size(%d), reply frame size(%d)\n",
- ioc->name,
- facts->IOCRequestFrameSize * 4,
- facts->ReplyFrameSize * 4));
- return 0;
-}
-
-static void
-leapioraid_base_unmap_resources(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct pci_dev *pdev = ioc->pdev;
-
- pr_info("%s %s\n", ioc->name, __func__);
- leapioraid_base_free_irq(ioc);
- leapioraid_base_disable_msix(ioc);
- kfree(ioc->replyPostRegisterIndex);
- mutex_lock(&ioc->pci_access_mutex);
- if (ioc->chip_phys) {
- iounmap(ioc->chip);
- ioc->chip_phys = 0;
- }
-
- pci_release_selected_regions(ioc->pdev, ioc->bars);
- pci_disable_device(pdev);
- mutex_unlock(&ioc->pci_access_mutex);
-}
-
-int
-leapioraid_base_map_resources(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct pci_dev *pdev = ioc->pdev;
- u32 memap_sz;
- u32 pio_sz;
- int i, r = 0, rc;
- u64 pio_chip = 0;
- phys_addr_t chip_phys = 0;
- struct leapioraid_adapter_reply_queue *reply_q;
- int iopoll_q_count = 0;
-
- dinitprintk(ioc, pr_info("%s %s\n",
- ioc->name, __func__));
-
- ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (pci_enable_device_mem(pdev)) {
- pr_warn("%s pci_enable_device_mem: failed\n", ioc->name);
- return -ENODEV;
- }
- if (pci_request_selected_regions(pdev, ioc->bars, ioc->driver_name)) {
- pr_warn("%s pci_request_selected_regions: failed\n", ioc->name);
- r = -ENODEV;
- goto out_fail;
- }
-
- pci_set_master(pdev);
-
- if (leapioraid_base_config_dma_addressing(ioc, pdev) != 0) {
- pr_warn("%s no suitable DMA mask for %s\n",
- ioc->name, pci_name(pdev));
- r = -ENODEV;
- goto out_fail;
- }
- for (i = 0, memap_sz = 0, pio_sz = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
- if (pio_sz)
- continue;
- pio_chip = (u64) pci_resource_start(pdev, i);
- pio_sz = pci_resource_len(pdev, i);
- } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
- if (memap_sz)
- continue;
- ioc->chip_phys = pci_resource_start(pdev, i);
- chip_phys = ioc->chip_phys;
- memap_sz = pci_resource_len(pdev, i);
- ioc->chip = ioremap(ioc->chip_phys, memap_sz);
- if (ioc->chip == NULL) {
- pr_err("%s unable to map adapter memory!\n",
- ioc->name);
- r = -EINVAL;
- goto out_fail;
- }
- }
- }
- leapioraid_base_mask_interrupts(ioc);
- r = leapioraid_base_get_ioc_facts(ioc);
- if (r) {
- rc = leapioraid_base_check_for_fault_and_issue_reset(ioc);
- if (rc || (leapioraid_base_get_ioc_facts(ioc)))
- goto out_fail;
- }
- if (!ioc->rdpq_array_enable_assigned) {
- ioc->rdpq_array_enable = ioc->rdpq_array_capable;
- ioc->rdpq_array_enable_assigned = 1;
- }
- r = leapioraid_base_enable_msix(ioc);
- if (r)
- goto out_fail;
- iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
- for (i = 0; i < iopoll_q_count; i++) {
- atomic_set(&ioc->blk_mq_poll_queues[i].busy, 0);
- atomic_set(&ioc->blk_mq_poll_queues[i].pause, 0);
- }
- if (!ioc->is_driver_loading)
- leapioraid_base_init_irqpolls(ioc);
- if (ioc->combined_reply_queue) {
- ioc->replyPostRegisterIndex = kcalloc(ioc->nc_reply_index_count,
- sizeof(resource_size_t *),
- GFP_KERNEL);
- if (!ioc->replyPostRegisterIndex) {
- pr_err("%s allocation for reply Post Register Index failed!!!\n",
- ioc->name);
- r = -ENOMEM;
- goto out_fail;
- }
-
- for (i = 0; i < ioc->nc_reply_index_count; i++) {
- ioc->replyPostRegisterIndex[i] = (resource_size_t *)
- ((u8 *) &ioc->chip->Doorbell +
- 0x0000030C +
- (i * 0x10));
- }
- }
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
- pr_info("%s enabled: index: %d\n",
- reply_q->name, reply_q->msix_index);
- continue;
- }
- pr_info("%s %s: IRQ %d\n",
- reply_q->name,
- ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
- "IO-APIC enabled"), pci_irq_vector(ioc->pdev,
- reply_q->msix_index));
- }
- pr_info("%s iomem(%pap), mapped(0x%p), size(%d)\n",
- ioc->name, &chip_phys, ioc->chip, memap_sz);
- pr_info("%s ioport(0x%016llx), size(%d)\n",
- ioc->name, (unsigned long long)pio_chip, pio_sz);
-
- pci_save_state(pdev);
- return 0;
-out_fail:
- leapioraid_base_unmap_resources(ioc);
- return r;
-}
-
-void *leapioraid_base_get_msg_frame(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- return (void *)(ioc->request + (smid * ioc->request_sz));
-}
-
-void *leapioraid_base_get_sense_buffer(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
-}
-
-__le32
-leapioraid_base_get_sense_buffer_dma(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
- SCSI_SENSE_BUFFERSIZE));
-}
-
-__le64
-leapioraid_base_get_sense_buffer_dma_64(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid)
-{
- return cpu_to_le64(ioc->sense_dma + ((smid - 1) *
- SCSI_SENSE_BUFFERSIZE));
-}
-
-void *leapioraid_base_get_reply_virt_addr(struct LEAPIORAID_ADAPTER *ioc,
- u32 phys_addr)
-{
- if (!phys_addr)
- return NULL;
- return ioc->reply + (phys_addr - (u32) ioc->reply_dma);
-}
-
-static inline u8
-leapioraid_base_get_msix_index(
- struct LEAPIORAID_ADAPTER *ioc, struct scsi_cmnd *scmd)
-{
- if (ioc->msix_load_balance)
- return ioc->reply_queue_count ?
- leapioraid_base_mod64(atomic64_add_return(1, &ioc->total_io_cnt),
- ioc->reply_queue_count) : 0;
- if (scmd && ioc->shost->nr_hw_queues > 1) {
- u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
-
- return blk_mq_unique_tag_to_hwq(tag) + ioc->high_iops_queues;
- }
- return ioc->cpu_msix_table[raw_smp_processor_id()];
-}
-
-inline unsigned long
-leapioraid_base_sdev_nr_inflight_request(struct LEAPIORAID_ADAPTER *ioc,
- struct scsi_cmnd *scmd)
-{
- return scsi_device_busy(scmd->device);
-}
-
-static inline u8
-leapioraid_base_get_high_iops_msix_index(struct LEAPIORAID_ADAPTER *ioc,
- struct scsi_cmnd *scmd)
-{
- if (leapioraid_base_sdev_nr_inflight_request(ioc, scmd) >
- LEAPIORAID_DEVICE_HIGH_IOPS_DEPTH)
- return
- leapioraid_base_mod64((atomic64_add_return
- (1,
- &ioc->high_iops_outstanding) /
- LEAPIORAID_HIGH_IOPS_BATCH_COUNT),
- LEAPIORAID_HIGH_IOPS_REPLY_QUEUES);
- return leapioraid_base_get_msix_index(ioc, scmd);
-}
-
-u16
-leapioraid_base_get_smid(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx)
-{
- unsigned long flags;
- struct leapioraid_request_tracker *request;
- u16 smid;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (list_empty(&ioc->internal_free_list)) {
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- pr_err("%s %s: smid not available\n",
- ioc->name, __func__);
- return 0;
- }
- request = list_entry(ioc->internal_free_list.next,
- struct leapioraid_request_tracker, tracker_list);
- request->cb_idx = cb_idx;
- smid = request->smid;
- list_del(&request->tracker_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return smid;
-}
-
-u16
-leapioraid_base_get_smid_scsiio(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx,
- struct scsi_cmnd *scmd)
-{
- struct leapioraid_scsiio_tracker *request;
- u16 smid;
- u32 tag = scsi_cmd_to_rq(scmd)->tag;
- u32 unique_tag;
-
- unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
- tag = blk_mq_unique_tag_to_tag(unique_tag);
- ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
- request = leapioraid_base_scsi_cmd_priv(scmd);
- smid = tag + 1;
- request->cb_idx = cb_idx;
- request->smid = smid;
- request->scmd = scmd;
- return smid;
-}
-
-u16
-leapioraid_base_get_smid_hpr(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx)
-{
- unsigned long flags;
- struct leapioraid_request_tracker *request;
- u16 smid;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (list_empty(&ioc->hpr_free_list)) {
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return 0;
- }
- request = list_entry(ioc->hpr_free_list.next,
- struct leapioraid_request_tracker, tracker_list);
- request->cb_idx = cb_idx;
- smid = request->smid;
- list_del(&request->tracker_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return smid;
-}
-
-static void
-leapioraid_base_recovery_check(struct LEAPIORAID_ADAPTER *ioc)
-{
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
- }
-}
-
-void
-leapioraid_base_clear_st(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_scsiio_tracker *st)
-{
- if (!st)
- return;
- if (WARN_ON(st->smid == 0))
- return;
- st->cb_idx = 0xFF;
- st->direct_io = 0;
- st->scmd = NULL;
- atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
-}
-
-void
-leapioraid_base_free_smid(struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- unsigned long flags;
- int i;
- struct leapioraid_scsiio_tracker *st;
- void *request;
-
- if (smid < ioc->hi_priority_smid) {
- st = leapioraid_get_st_from_smid(ioc, smid);
- if (!st) {
- leapioraid_base_recovery_check(ioc);
- return;
- }
- request = leapioraid_base_get_msg_frame(ioc, smid);
- memset(request, 0, ioc->request_sz);
- leapioraid_base_clear_st(ioc, st);
- leapioraid_base_recovery_check(ioc);
- ioc->io_queue_num[smid - 1] = 0xFFFF;
- return;
- }
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (smid < ioc->internal_smid) {
- i = smid - ioc->hi_priority_smid;
- ioc->hpr_lookup[i].cb_idx = 0xFF;
- list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
- } else if (smid <= ioc->hba_queue_depth) {
- i = smid - ioc->internal_smid;
- ioc->internal_lookup[i].cb_idx = 0xFF;
- list_add(&ioc->internal_lookup[i].tracker_list,
- &ioc->internal_free_list);
- }
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-}
-
-#if defined(writeq) && defined(CONFIG_64BIT)
-static inline void
-leapioraid_base_writeq(
- __u64 b, void __iomem *addr, spinlock_t *writeq_lock)
-{
- writeq(b, addr);
-}
-#else
-static inline void
-leapioraid_base_writeq(
- __u64 b, void __iomem *addr, spinlock_t *writeq_lock)
-{
- unsigned long flags;
- __u64 data_out = b;
-
- spin_lock_irqsave(writeq_lock, flags);
- writel((u32) (data_out), addr);
- writel((u32) (data_out >> 32), (addr + 4));
- spin_unlock_irqrestore(writeq_lock, flags);
-}
-#endif
-
-static u8
-leapioraid_base_set_and_get_msix_index(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- struct leapioraid_scsiio_tracker *st;
-
- st = (smid <
- ioc->hi_priority_smid) ? (leapioraid_get_st_from_smid(ioc,
- smid))
- : (NULL);
- if (st == NULL)
- return leapioraid_base_get_msix_index(ioc, NULL);
- st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
- return st->msix_io;
-}
-
-static void
-leapioraid_base_put_smid_scsi_io(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u16 handle)
-{
- union LeapioraidReqDescUnion_t descriptor;
- u64 *request = (u64 *) &descriptor;
-
- descriptor.SCSIIO.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO;
- descriptor.SCSIIO.MSIxIndex
- = leapioraid_base_set_and_get_msix_index(ioc, smid);
- descriptor.SCSIIO.SMID = cpu_to_le16(smid);
- descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
- descriptor.SCSIIO.LMID = 0;
- leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
- &ioc->scsi_lookup_lock);
-}
-
-static void
-leapioraid_base_put_smid_fast_path(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u16 handle)
-{
- union LeapioraidReqDescUnion_t descriptor;
- u64 *request = (u64 *) &descriptor;
-
- descriptor.SCSIIO.RequestFlags =
- LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
- descriptor.SCSIIO.MSIxIndex
- = leapioraid_base_set_and_get_msix_index(ioc, smid);
- descriptor.SCSIIO.SMID = cpu_to_le16(smid);
- descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
- descriptor.SCSIIO.LMID = 0;
- leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
- &ioc->scsi_lookup_lock);
-}
-
-static void
-leapioraid_base_put_smid_hi_priority(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u16 msix_task)
-{
- union LeapioraidReqDescUnion_t descriptor;
- u64 *request;
-
- request = (u64 *) &descriptor;
- descriptor.HighPriority.RequestFlags =
- LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
- descriptor.HighPriority.MSIxIndex = msix_task;
- descriptor.HighPriority.SMID = cpu_to_le16(smid);
- descriptor.HighPriority.LMID = 0;
- descriptor.HighPriority.Reserved1 = 0;
- leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
- &ioc->scsi_lookup_lock);
-}
-
-static void
-leapioraid_base_put_smid_default(struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- union LeapioraidReqDescUnion_t descriptor;
- u64 *request;
-
- request = (u64 *) &descriptor;
- descriptor.Default.RequestFlags =
- LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
- descriptor.Default.MSIxIndex
- = leapioraid_base_set_and_get_msix_index(ioc, smid);
- descriptor.Default.SMID = cpu_to_le16(smid);
- descriptor.Default.LMID = 0;
- descriptor.Default.DescriptorTypeDependent = 0;
- leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
- &ioc->scsi_lookup_lock);
-}
-
-static void
-leapioraid_base_put_smid_scsi_io_atomic(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid, u16 handle)
-{
- struct LeapioraidAtomicReqDesc_t descriptor;
- u32 *request = (u32 *) &descriptor;
-
- descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO;
- descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid);
- descriptor.SMID = cpu_to_le16(smid);
- writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
-}
-
-static void
-leapioraid_base_put_smid_fast_path_atomic(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid, u16 handle)
-{
- struct LeapioraidAtomicReqDesc_t descriptor;
- u32 *request = (u32 *) &descriptor;
-
- descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
- descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid);
- descriptor.SMID = cpu_to_le16(smid);
- writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
-}
-
-static void
-leapioraid_base_put_smid_hi_priority_atomic(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid, u16 msix_task)
-{
- struct LeapioraidAtomicReqDesc_t descriptor;
- u32 *request = (u32 *) &descriptor;
-
- descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
- descriptor.MSIxIndex = msix_task;
- descriptor.SMID = cpu_to_le16(smid);
- writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
-}
-
-static void
-leapioraid_base_put_smid_default_atomic(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid)
-{
- struct LeapioraidAtomicReqDesc_t descriptor;
- u32 *request = (u32 *)(&descriptor);
-
- descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
- descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid);
- descriptor.SMID = cpu_to_le16(smid);
- writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
-}
-
-static int
-leapioraid_base_display_fwpkg_version(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidFWImgHeader_t *fw_img_hdr;
- struct LeapioraidComptImgHeader_t *cmp_img_hdr;
- struct LeapioraidFWUploadReq_t *mpi_request;
- struct LeapioraidFWUploadRep_t mpi_reply;
- int r = 0, issue_diag_reset = 0;
- u32 package_version = 0;
- void *fwpkg_data = NULL;
- dma_addr_t fwpkg_data_dma;
- u16 smid, ioc_status;
- size_t data_length;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) {
- pr_err("%s %s: internal command already in use\n", ioc->name,
- __func__);
- return -EAGAIN;
- }
- data_length = sizeof(struct LeapioraidFWImgHeader_t);
- fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
- &fwpkg_data_dma, GFP_ATOMIC);
- if (!fwpkg_data)
- return -ENOMEM;
-
- smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- r = -EAGAIN;
- goto out;
- }
- ioc->base_cmds.status = LEAPIORAID_CMD_PENDING;
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->base_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioraidFWUploadReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_FW_UPLOAD;
- mpi_request->ImageType = 0x01;
- mpi_request->ImageSize = data_length;
- ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
- data_length);
- init_completion(&ioc->base_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->base_cmds.done, 15 * HZ);
- dinitprintk(ioc, pr_info("%s %s: complete\n",
- ioc->name, __func__));
- if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- pr_err("%s %s: timeout\n",
- ioc->name, __func__);
- leapioraid_debug_dump_mf(mpi_request,
- sizeof(struct LeapioraidFWUploadReq_t) / 4);
- issue_diag_reset = 1;
- } else {
- memset(&mpi_reply, 0, sizeof(struct LeapioraidFWUploadRep_t));
- if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- memcpy(&mpi_reply, ioc->base_cmds.reply,
- sizeof(struct LeapioraidFWUploadRep_t));
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) {
- fw_img_hdr =
- (struct LeapioraidFWImgHeader_t *) fwpkg_data;
- if (le32_to_cpu(fw_img_hdr->Signature) ==
- 0xEB000042) {
- cmp_img_hdr =
- (struct LeapioraidComptImgHeader_t
- *) (fwpkg_data);
- package_version =
- le32_to_cpu(cmp_img_hdr->ApplicationSpecific);
- } else
- package_version =
- le32_to_cpu(fw_img_hdr->PackageVersion.Word);
- if (package_version)
- pr_err(
- "%s FW Package Version(%02d.%02d.%02d.%02d)\n",
- ioc->name,
- ((package_version) & 0xFF000000)
- >> 24,
- ((package_version) & 0x00FF0000)
- >> 16,
- ((package_version) & 0x0000FF00)
- >> 8,
- (package_version) & 0x000000FF);
- } else {
- leapioraid_debug_dump_mf(&mpi_reply,
- sizeof(struct LeapioraidFWUploadRep_t) /
- 4);
- }
- }
- }
- ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED;
-out:
- if (fwpkg_data)
- dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
- fwpkg_data_dma);
- if (issue_diag_reset) {
- if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED)
- return -EFAULT;
- if (leapioraid_base_check_for_fault_and_issue_reset(ioc))
- return -EFAULT;
- r = -EAGAIN;
- }
- return r;
-}
-
-static void
-leapioraid_base_display_ioc_capabilities(struct LEAPIORAID_ADAPTER *ioc)
-{
- int i = 0;
- char desc[17] = { 0 };
- u8 revision;
- u32 iounit_pg1_flags;
-
- pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
- strscpy(desc, ioc->manu_pg0.ChipName, sizeof(desc));
- pr_info("%s %s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
- ioc->name, desc,
- (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
- (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
- (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
- ioc->facts.FWVersion.Word & 0x000000FF, revision);
- pr_info("%s Protocol=(", ioc->name);
- if (ioc->facts.ProtocolFlags & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
- pr_info("Initiator");
- i++;
- }
- if (ioc->facts.ProtocolFlags & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_TARGET) {
- pr_info("%sTarget", i ? "," : "");
- i++;
- }
- i = 0;
- pr_info("), ");
- pr_info("Capabilities=(");
- if ((!ioc->warpdrive_msg) && (ioc->facts.IOCCapabilities &
- LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) {
- pr_info("Raid");
- i++;
- }
- if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_TLR) {
- pr_info("%sTLR", i ? "," : "");
- i++;
- }
- if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_MULTICAST) {
- pr_info("%sMulticast", i ? "," : "");
- i++;
- }
- if (ioc->facts.IOCCapabilities &
- LEAPIORAID_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
- pr_info("%sBIDI Target", i ? "," : "");
- i++;
- }
- if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_EEDP) {
- pr_info("%sEEDP", i ? "," : "");
- i++;
- }
- if (ioc->facts.IOCCapabilities &
- LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
- pr_info("%sTask Set Full", i ? "," : "");
- i++;
- }
- iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
- if (!(iounit_pg1_flags & LEAPIORAID_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
- pr_info("%sNCQ", i ? "," : "");
- i++;
- }
- pr_info(")\n");
-}
-
-static int
-leapioraid_base_update_ioc_page1_inlinewith_perf_mode(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidIOCP1_t ioc_pg1;
- struct LeapioraidCfgRep_t mpi_reply;
- int rc;
-
- rc = leapioraid_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
- if (rc)
- return rc;
- memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(struct LeapioraidIOCP1_t));
- switch (perf_mode) {
- case LEAPIORAID_PERF_MODE_DEFAULT:
- case LEAPIORAID_PERF_MODE_BALANCED:
- if (ioc->high_iops_queues) {
- pr_err(
- "%s Enable int coalescing only for first %d reply queues\n",
- ioc->name, LEAPIORAID_HIGH_IOPS_REPLY_QUEUES);
- ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
- ((1 <<
- LEAPIORAID_HIGH_IOPS_REPLY_QUEUES
- / 8) - 1));
- rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply,
- &ioc_pg1);
- if (rc)
- return rc;
- pr_err("%s performance mode: balanced\n", ioc->name);
- return 0;
- }
- fallthrough;
- case LEAPIORAID_PERF_MODE_LATENCY:
- ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
- ioc_pg1.Flags |= cpu_to_le32(0x00000001);
- ioc_pg1.ProductSpecific = 0;
- rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
- if (rc)
- return rc;
- pr_err("%s performance mode: latency\n", ioc->name);
- break;
- case LEAPIORAID_PERF_MODE_IOPS:
- pr_err(
- "%s performance mode: iops with coalescing timeout: 0x%x\n",
- ioc->name, le32_to_cpu(ioc_pg1.CoalescingTimeout));
- ioc_pg1.Flags |= cpu_to_le32(0x00000001);
- ioc_pg1.ProductSpecific = 0;
- rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
- if (rc)
- return rc;
- break;
- }
- return 0;
-}
-
-static int
-leapioraid_base_assign_fw_reported_qd(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL;
- int sz;
- int rc = 0;
-
- ioc->max_wideport_qd = LEAPIORAID_SAS_QUEUE_DEPTH;
- ioc->max_narrowport_qd = LEAPIORAID_SAS_QUEUE_DEPTH;
- ioc->max_sata_qd = LEAPIORAID_SATA_QUEUE_DEPTH;
-
- sz = offsetof(struct LeapioraidSasIOUnitP1_t, PhyData);
- sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
- if (!sas_iounit_pg1) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return rc;
- }
- rc = leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply,
- sas_iounit_pg1, sz);
- if (rc) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- ioc->max_wideport_qd =
- (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
- le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
- LEAPIORAID_SAS_QUEUE_DEPTH;
- ioc->max_narrowport_qd =
- (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
- le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
- LEAPIORAID_SAS_QUEUE_DEPTH;
- ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
- sas_iounit_pg1->SATAMaxQDepth : LEAPIORAID_SATA_QUEUE_DEPTH;
-out:
- dinitprintk(ioc, pr_err(
- "%s MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x\n",
- ioc->name, ioc->max_wideport_qd,
- ioc->max_narrowport_qd, ioc->max_sata_qd));
- kfree(sas_iounit_pg1);
- return rc;
-}
-
-static int
-leapioraid_base_static_config_pages(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidCfgRep_t mpi_reply;
- u32 iounit_pg1_flags;
- int rc;
-
- rc = leapioraid_config_get_manufacturing_pg0(ioc, &mpi_reply,
- &ioc->manu_pg0);
- if (rc)
- return rc;
- if (ioc->ir_firmware) {
- rc = leapioraid_config_get_manufacturing_pg10(ioc, &mpi_reply,
- &ioc->manu_pg10);
- if (rc)
- return rc;
- }
- rc = leapioraid_config_get_manufacturing_pg11(ioc, &mpi_reply,
- &ioc->manu_pg11);
- if (rc)
- return rc;
-
- ioc->time_sync_interval =
- ioc->manu_pg11.TimeSyncInterval & 0x7F;
- if (ioc->time_sync_interval) {
- if (ioc->manu_pg11.TimeSyncInterval & 0x80)
- ioc->time_sync_interval =
- ioc->time_sync_interval * 3600;
- else
- ioc->time_sync_interval =
- ioc->time_sync_interval * 60;
- dinitprintk(ioc, pr_info(
- "%s Driver-FW TimeSync interval is %d seconds.\n\t\t"
- "ManuPg11 TimeSync Unit is in %s's",
- ioc->name,
- ioc->time_sync_interval,
- ((ioc->manu_pg11.TimeSyncInterval & 0x80)
- ? "Hour" : "Minute")));
- }
- rc = leapioraid_base_assign_fw_reported_qd(ioc);
- if (rc)
- return rc;
- rc = leapioraid_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
- if (rc)
- return rc;
- rc = leapioraid_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
- if (rc)
- return rc;
- rc = leapioraid_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
- if (rc)
- return rc;
- rc = leapioraid_config_get_iounit_pg0(ioc, &mpi_reply,
- &ioc->iounit_pg0);
- if (rc)
- return rc;
- rc = leapioraid_config_get_iounit_pg1(ioc, &mpi_reply,
- &ioc->iounit_pg1);
- if (rc)
- return rc;
- rc = leapioraid_config_get_iounit_pg8(ioc, &mpi_reply,
- &ioc->iounit_pg8);
- if (rc)
- return rc;
- leapioraid_base_display_ioc_capabilities(ioc);
- iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
- if ((ioc->facts.IOCCapabilities &
- LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
- iounit_pg1_flags &=
- ~LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
- else
- iounit_pg1_flags |=
- LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
- ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
- rc = leapioraid_config_set_iounit_pg1(ioc, &mpi_reply,
- &ioc->iounit_pg1);
- if (rc)
- return rc;
- if (ioc->iounit_pg8.NumSensors)
- ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
-
- rc = leapioraid_base_update_ioc_page1_inlinewith_perf_mode(ioc);
- if (rc)
- return rc;
-
- return 0;
-}
-
-void
-leapioraid_free_enclosure_list(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_enclosure_node *enclosure_dev, *enclosure_dev_next;
-
- list_for_each_entry_safe(enclosure_dev,
- enclosure_dev_next, &ioc->enclosure_list,
- list) {
- list_del(&enclosure_dev->list);
- kfree(enclosure_dev);
- }
-}
-
-static void
-leapioraid_base_release_memory_pools(struct LEAPIORAID_ADAPTER *ioc)
-{
- int i, j;
- int dma_alloc_count = 0;
- struct leapioraid_chain_tracker *ct;
- int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
-
- dexitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- if (ioc->request) {
- dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
- ioc->request, ioc->request_dma);
- dexitprintk(ioc,
- pr_info("%s request_pool(0x%p): free\n",
- ioc->name, ioc->request));
- ioc->request = NULL;
- }
- if (ioc->sense) {
- dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
- dma_pool_destroy(ioc->sense_dma_pool);
- dexitprintk(ioc, pr_info("%s sense_pool(0x%p): free\n",
- ioc->name, ioc->sense));
- ioc->sense = NULL;
- }
- if (ioc->reply) {
- dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
- dma_pool_destroy(ioc->reply_dma_pool);
- dexitprintk(ioc, pr_info("%s reply_pool(0x%p): free\n",
- ioc->name, ioc->reply));
- ioc->reply = NULL;
- }
- if (ioc->reply_free) {
- dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
- ioc->reply_free_dma);
- dma_pool_destroy(ioc->reply_free_dma_pool);
- dexitprintk(ioc, pr_info("%s reply_free_pool(0x%p): free\n",
- ioc->name, ioc->reply_free));
- ioc->reply_free = NULL;
- }
- if (ioc->reply_post) {
- dma_alloc_count = DIV_ROUND_UP(count,
- LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK);
- for (i = 0; i < count; i++) {
- if (i % LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
- && dma_alloc_count) {
- if (ioc->reply_post[i].reply_post_free) {
- dma_pool_free(ioc->reply_post_free_dma_pool,
- ioc->reply_post[i].reply_post_free,
- ioc->reply_post[i].reply_post_free_dma);
- pr_err(
- "%s reply_post_free_pool(0x%p): free\n",
- ioc->name,
- ioc->reply_post[i].reply_post_free);
- ioc->reply_post[i].reply_post_free =
- NULL;
- }
- --dma_alloc_count;
- }
- }
- dma_pool_destroy(ioc->reply_post_free_dma_pool);
- if (ioc->reply_post_free_array && ioc->rdpq_array_enable) {
- dma_pool_free(ioc->reply_post_free_array_dma_pool,
- ioc->reply_post_free_array,
- ioc->reply_post_free_array_dma);
- ioc->reply_post_free_array = NULL;
- }
- dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
- kfree(ioc->reply_post);
- }
- if (ioc->config_page) {
- dexitprintk(ioc, pr_err(
- "%s config_page(0x%p): free\n", ioc->name,
- ioc->config_page));
- dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
- ioc->config_page, ioc->config_page_dma);
- }
- kfree(ioc->hpr_lookup);
- kfree(ioc->internal_lookup);
- if (ioc->chain_lookup) {
- for (i = 0; i < ioc->scsiio_depth; i++) {
- for (j = ioc->chains_per_prp_buffer;
- j < ioc->chains_needed_per_io; j++) {
- ct = &ioc->chain_lookup[i].chains_per_smid[j];
- if (ct && ct->chain_buffer)
- dma_pool_free(ioc->chain_dma_pool,
- ct->chain_buffer,
- ct->chain_buffer_dma);
- }
- kfree(ioc->chain_lookup[i].chains_per_smid);
- }
- dma_pool_destroy(ioc->chain_dma_pool);
- kfree(ioc->chain_lookup);
- ioc->chain_lookup = NULL;
- }
- kfree(ioc->io_queue_num);
- ioc->io_queue_num = NULL;
-}
-
-static int
-leapioraid_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
-{
- dma_addr_t end_address;
-
- end_address = start_address + pool_sz - 1;
- if (upper_32_bits(start_address) == upper_32_bits(end_address))
- return 1;
- else
- return 0;
-}
-
-static inline int
-leapioraid_base_reduce_hba_queue_depth(struct LEAPIORAID_ADAPTER *ioc)
-{
- int reduce_sz = 64;
-
- if ((ioc->hba_queue_depth - reduce_sz) >
- (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) {
- ioc->hba_queue_depth -= reduce_sz;
- return 0;
- } else
- return -ENOMEM;
-}
-
-static int
-leapioraid_base_allocate_reply_post_free_array(struct LEAPIORAID_ADAPTER *ioc,
- int reply_post_free_array_sz)
-{
- ioc->reply_post_free_array_dma_pool =
- dma_pool_create("reply_post_free_array pool",
- &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
- if (!ioc->reply_post_free_array_dma_pool) {
- dinitprintk(ioc,
- pr_err
- ("reply_post_free_array pool: dma_pool_create failed\n"));
- return -ENOMEM;
- }
- ioc->reply_post_free_array =
- dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
- GFP_KERNEL, &ioc->reply_post_free_array_dma);
- if (!ioc->reply_post_free_array) {
- dinitprintk(ioc,
- pr_err
- ("reply_post_free_array pool: dma_pool_alloc failed\n"));
- return -EAGAIN;
- }
- if (!leapioraid_check_same_4gb_region(ioc->reply_post_free_array_dma,
- reply_post_free_array_sz)) {
- dinitprintk(ioc, pr_err(
- "Bad Reply Free Pool! Reply Free (0x%p)\n\t\t"
- "Reply Free dma = (0x%llx)\n",
- ioc->reply_free,
- (unsigned long long)ioc->reply_free_dma));
- ioc->use_32bit_dma = 1;
- return -EAGAIN;
- }
- return 0;
-}
-
-static int
-base_alloc_rdpq_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz)
-{
- int i = 0;
- u32 dma_alloc_count = 0;
- int reply_post_free_sz = ioc->reply_post_queue_depth *
- sizeof(struct LeapioraidDefaultRepDesc_t);
- int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
-
- ioc->reply_post =
- kcalloc(count, sizeof(struct leapioraid_reply_post_struct), GFP_KERNEL);
- if (!ioc->reply_post) {
- pr_err("%s reply_post_free pool: kcalloc failed\n", ioc->name);
- return -ENOMEM;
- }
- dma_alloc_count = DIV_ROUND_UP(
- count, LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK);
- ioc->reply_post_free_dma_pool =
- dma_pool_create("reply_post_free pool", &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->reply_post_free_dma_pool) {
- pr_err("reply_post_free pool: dma_pool_create failed\n");
- return -ENOMEM;
- }
- for (i = 0; i < count; i++) {
- if ((i % LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
- ioc->reply_post[i].reply_post_free =
- dma_pool_zalloc(ioc->reply_post_free_dma_pool,
- GFP_KERNEL,
- &ioc->reply_post[i].reply_post_free_dma);
- if (!ioc->reply_post[i].reply_post_free) {
- pr_err("reply_post_free pool: dma_pool_alloc failed\n");
- return -EAGAIN;
- }
- if (!leapioraid_check_same_4gb_region
- (ioc->reply_post[i].reply_post_free_dma, sz)) {
- dinitprintk(ioc, pr_err(
- "%s bad Replypost free pool(0x%p) dma = (0x%llx)\n",
- ioc->name,
- ioc->reply_post[i].reply_post_free,
- (unsigned long long)
- ioc->reply_post[i].reply_post_free_dma));
- ioc->use_32bit_dma = 1;
- return -EAGAIN;
- }
- dma_alloc_count--;
- } else {
- ioc->reply_post[i].reply_post_free =
- (union LeapioraidRepDescUnion_t *)
- ((long)ioc->reply_post[i - 1].reply_post_free
- + reply_post_free_sz);
- ioc->reply_post[i].reply_post_free_dma = (dma_addr_t)
- (ioc->reply_post[i - 1].reply_post_free_dma +
- reply_post_free_sz);
- }
- }
- return 0;
-}
-
-static int
-leapioraid_base_allocate_chain_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz)
-{
- int i = 0, j = 0;
- struct leapioraid_chain_tracker *ctr;
-
- ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
- ioc->chain_segment_sz, 16, 0);
- if (!ioc->chain_dma_pool) {
- pr_err("%s chain_dma_pool: dma_pool_create failed\n", ioc->name);
- return -ENOMEM;
- }
- for (i = 0; i < ioc->scsiio_depth; i++) {
- for (j = ioc->chains_per_prp_buffer;
- j < ioc->chains_needed_per_io; j++) {
- ctr = &ioc->chain_lookup[i].chains_per_smid[j];
- ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
- GFP_KERNEL,
- &ctr->chain_buffer_dma);
- if (!ctr->chain_buffer)
- return -EAGAIN;
- if (!leapioraid_check_same_4gb_region
- (ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
- pr_err(
- "%s buffers not in same 4G! buff=(0x%p) dma=(0x%llx)\n",
- ioc->name,
- ctr->chain_buffer,
- (unsigned long long)ctr->chain_buffer_dma);
- ioc->use_32bit_dma = 1;
- return -EAGAIN;
- }
- }
- }
- dinitprintk(ioc, pr_info(
- "%s chain_lookup depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->scsiio_depth,
- ioc->chain_segment_sz,
- ((ioc->scsiio_depth *
- (ioc->chains_needed_per_io -
- ioc->chains_per_prp_buffer) *
- ioc->chain_segment_sz)) / 1024));
- return 0;
-}
-
-static int
-leapioraid_base_allocate_sense_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz)
-{
- ioc->sense_dma_pool =
- dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
- if (!ioc->sense_dma_pool) {
- pr_err("%s sense pool: dma_pool_create failed\n", ioc->name);
- return -ENOMEM;
- }
- ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
- GFP_KERNEL, &ioc->sense_dma);
- if (!ioc->sense) {
- pr_err("%s sense pool: dma_pool_alloc failed\n", ioc->name);
- return -EAGAIN;
- }
- if (!leapioraid_check_same_4gb_region(ioc->sense_dma, sz)) {
- dinitprintk(ioc,
- pr_err("Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
- ioc->sense,
- (unsigned long long)ioc->sense_dma));
- ioc->use_32bit_dma = 1;
- return -EAGAIN;
- }
- pr_err(
- "%s sense pool(0x%p) - dma(0x%llx): depth(%d),\n\t\t"
- "element_size(%d), pool_size (%d kB)\n",
- ioc->name,
- ioc->sense,
- (unsigned long long)ioc->sense_dma,
- ioc->scsiio_depth,
- SCSI_SENSE_BUFFERSIZE, sz / 1024);
- return 0;
-}
-
-static int
-leapioraid_base_allocate_reply_free_dma_pool(struct LEAPIORAID_ADAPTER *ioc,
- int sz)
-{
- ioc->reply_free_dma_pool =
- dma_pool_create("reply_free pool", &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->reply_free_dma_pool) {
- pr_err("%s reply_free pool: dma_pool_create failed\n", ioc->name);
- return -ENOMEM;
- }
- ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
- GFP_KERNEL, &ioc->reply_free_dma);
- if (!ioc->reply_free) {
- pr_err("%s reply_free pool: dma_pool_alloc failed\n", ioc->name);
- return -EAGAIN;
- }
- if (!leapioraid_check_same_4gb_region(ioc->reply_free_dma, sz)) {
- dinitprintk(ioc, pr_err(
- "Bad Reply Free Pool! Reply Free (0x%p)\n\t\t"
- "Reply Free dma = (0x%llx)\n",
- ioc->reply_free,
- (unsigned long long)ioc->reply_free_dma));
- ioc->use_32bit_dma = 1;
- return -EAGAIN;
- }
- memset(ioc->reply_free, 0, sz);
- dinitprintk(ioc, pr_info(
- "%s reply_free pool(0x%p): depth(%d),\n\t\t"
- "element_size(%d), pool_size(%d kB)\n",
- ioc->name,
- ioc->reply_free,
- ioc->reply_free_queue_depth, 4,
- sz / 1024));
- dinitprintk(ioc,
- pr_info("%s reply_free_dma (0x%llx)\n",
- ioc->name, (unsigned long long)ioc->reply_free_dma));
- return 0;
-}
-
-static int
-leapioraid_base_allocate_reply_pool(struct LEAPIORAID_ADAPTER *ioc, int sz)
-{
- ioc->reply_dma_pool = dma_pool_create("reply pool",
- &ioc->pdev->dev, sz, 4, 0);
- if (!ioc->reply_dma_pool) {
- pr_err("%s reply pool: dma_pool_create failed\n", ioc->name);
- return -ENOMEM;
- }
- ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
- &ioc->reply_dma);
- if (!ioc->reply) {
- pr_err("%s reply pool: dma_pool_alloc failed\n", ioc->name);
- return -EAGAIN;
- }
- if (!leapioraid_check_same_4gb_region(ioc->reply_dma, sz)) {
- dinitprintk(ioc,
- pr_err("Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
- ioc->reply,
- (unsigned long long)ioc->reply_dma));
- ioc->use_32bit_dma = 1;
- return -EAGAIN;
- }
- ioc->reply_dma_min_address = (u32) (ioc->reply_dma);
- ioc->reply_dma_max_address = (u32) (ioc->reply_dma) + sz;
- pr_err(
- "%s reply pool(0x%p) - dma(0x%llx): depth(%d)\n\t\t"
- "frame_size(%d), pool_size(%d kB)\n",
- ioc->name,
- ioc->reply,
- (unsigned long long)ioc->reply_dma,
- ioc->reply_free_queue_depth,
- ioc->reply_sz,
- sz / 1024);
- return 0;
-}
-
-static int
-leapioraid_base_allocate_memory_pools(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_facts *facts;
- u16 max_sge_elements;
- u16 chains_needed_per_io;
- u32 sz, total_sz, reply_post_free_sz, rc = 0;
- u32 retry_sz;
- u32 rdpq_sz = 0, sense_sz = 0, reply_post_free_array_sz = 0;
- u16 max_request_credit;
- unsigned short sg_tablesize;
- u16 sge_size;
- int i = 0;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- retry_sz = 0;
- facts = &ioc->facts;
- sg_tablesize = LEAPIORAID_SG_DEPTH;
- if (reset_devices)
- sg_tablesize = min_t(unsigned short, sg_tablesize,
- LEAPIORAID_KDUMP_MIN_PHYS_SEGMENTS);
- if (sg_tablesize < LEAPIORAID_MIN_PHYS_SEGMENTS)
- sg_tablesize = LEAPIORAID_MIN_PHYS_SEGMENTS;
- else if (sg_tablesize > LEAPIORAID_MAX_PHYS_SEGMENTS) {
- sg_tablesize = min_t(unsigned short, sg_tablesize,
- LEAPIORAID_MAX_SG_SEGMENTS);
- pr_warn(
- "%s sg_tablesize(%u) is bigger than kernel defined %s(%u)\n",
- ioc->name,
- sg_tablesize, LEAPIORAID_MAX_PHYS_SEGMENTS_STRING,
- LEAPIORAID_MAX_PHYS_SEGMENTS);
- }
- ioc->shost->sg_tablesize = sg_tablesize;
- ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
- (facts->RequestCredit / 4));
- if (ioc->internal_depth < LEAPIORAID_INTERNAL_CMDS_COUNT) {
- if (facts->RequestCredit <= (LEAPIORAID_INTERNAL_CMDS_COUNT +
- LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) {
- pr_err(
- "%s RequestCredits not enough, it has %d credits\n",
- ioc->name,
- facts->RequestCredit);
- return -ENOMEM;
- }
- ioc->internal_depth = 10;
- }
- ioc->hi_priority_depth = ioc->internal_depth - (5);
- if (reset_devices)
- max_request_credit = min_t(u16, facts->RequestCredit,
- (LEAPIORAID_KDUMP_SCSI_IO_DEPTH +
- ioc->internal_depth));
- else
- max_request_credit = min_t(u16, facts->RequestCredit,
- LEAPIORAID_MAX_HBA_QUEUE_DEPTH);
-retry:
- ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
- ioc->request_sz = facts->IOCRequestFrameSize * 4;
- ioc->reply_sz = facts->ReplyFrameSize * 4;
- if (facts->IOCMaxChainSegmentSize)
- ioc->chain_segment_sz =
- facts->IOCMaxChainSegmentSize * LEAPIORAID_MAX_CHAIN_ELEMT_SZ;
- else
- ioc->chain_segment_sz =
- LEAPIORAID_DEFAULT_NUM_FWCHAIN_ELEMTS * LEAPIORAID_MAX_CHAIN_ELEMT_SZ;
- sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
-retry_allocation:
- total_sz = 0;
- max_sge_elements =
- ioc->request_sz -
- ((sizeof(struct LeapioraidSCSIIOReq_t) -
- sizeof(union LEAPIORAID_IEEE_SGE_IO_UNION)) + 2 * sge_size);
- ioc->max_sges_in_main_message = max_sge_elements / sge_size;
- max_sge_elements = ioc->chain_segment_sz - sge_size;
- ioc->max_sges_in_chain_message = max_sge_elements / sge_size;
- chains_needed_per_io = ((ioc->shost->sg_tablesize -
- ioc->max_sges_in_main_message) /
- ioc->max_sges_in_chain_message)
- + 1;
- if (chains_needed_per_io > facts->MaxChainDepth) {
- chains_needed_per_io = facts->MaxChainDepth;
- ioc->shost->sg_tablesize = min_t(u16,
- ioc->max_sges_in_main_message +
- (ioc->max_sges_in_chain_message *
- chains_needed_per_io),
- ioc->shost->sg_tablesize);
- }
- ioc->chains_needed_per_io = chains_needed_per_io;
- ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
- ioc->reply_post_queue_depth = ioc->hba_queue_depth +
- ioc->reply_free_queue_depth + 1;
- if (ioc->reply_post_queue_depth % 16)
- ioc->reply_post_queue_depth +=
- 16 - (ioc->reply_post_queue_depth % 16);
- if (ioc->reply_post_queue_depth >
- facts->MaxReplyDescriptorPostQueueDepth) {
- ioc->reply_post_queue_depth =
- facts->MaxReplyDescriptorPostQueueDepth -
- (facts->MaxReplyDescriptorPostQueueDepth % 16);
- ioc->hba_queue_depth =
- ((ioc->reply_post_queue_depth - 64) / 2) - 1;
- ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
- }
- pr_info(
- "%s scatter gather: sge_in_main_msg(%d),\n\t\t"
- "sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
- ioc->name,
- ioc->max_sges_in_main_message,
- ioc->max_sges_in_chain_message,
- ioc->shost->sg_tablesize,
- ioc->chains_needed_per_io);
- ioc->scsiio_depth = ioc->hba_queue_depth -
- ioc->hi_priority_depth - ioc->internal_depth;
- ioc->shost->can_queue =
- ioc->scsiio_depth - LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT;
- dinitprintk(ioc, pr_info("%s scsi host: can_queue depth (%d)\n", ioc->name,
- ioc->shost->can_queue));
- sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
- sz += (ioc->hi_priority_depth * ioc->request_sz);
- sz += (ioc->internal_depth * ioc->request_sz);
- ioc->request_dma_sz = sz;
- ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
- &ioc->request_dma, GFP_KERNEL);
- if (!ioc->request) {
- if (ioc->scsiio_depth < LEAPIORAID_SAS_QUEUE_DEPTH) {
- rc = -ENOMEM;
- goto out;
- }
- retry_sz = 64;
- if ((ioc->hba_queue_depth - retry_sz) >
- (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) {
- ioc->hba_queue_depth -= retry_sz;
- goto retry_allocation;
- } else {
- rc = -ENOMEM;
- goto out;
- }
- }
- memset(ioc->request, 0, sz);
- if (retry_sz)
- pr_err(
- "%s request pool: dma_alloc_consistent succeed:\n\t\t"
- "hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
- ioc->name,
- ioc->hba_queue_depth,
- ioc->chains_needed_per_io,
- ioc->request_sz,
- sz / 1024);
- ioc->hi_priority =
- ioc->request + ((ioc->scsiio_depth + 1) * ioc->request_sz);
- ioc->hi_priority_dma =
- ioc->request_dma + ((ioc->scsiio_depth + 1) * ioc->request_sz);
- ioc->internal =
- ioc->hi_priority + (ioc->hi_priority_depth * ioc->request_sz);
- ioc->internal_dma =
- ioc->hi_priority_dma + (ioc->hi_priority_depth * ioc->request_sz);
- pr_info(
- "%s request pool(0x%p) - dma(0x%llx):\n\t\t"
- "depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name,
- ioc->request,
- (unsigned long long)ioc->request_dma,
- ioc->hba_queue_depth,
- ioc->request_sz,
- (ioc->hba_queue_depth * ioc->request_sz) / 1024);
- total_sz += sz;
- ioc->io_queue_num = kcalloc(ioc->scsiio_depth, sizeof(u16), GFP_KERNEL);
- if (!ioc->io_queue_num) {
- rc = -ENOMEM;
- goto out;
- }
- dinitprintk(ioc, pr_info("%s scsiio(0x%p): depth(%d)\n",
- ioc->name, ioc->request, ioc->scsiio_depth));
- ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
- sizeof(struct leapioraid_request_tracker), GFP_KERNEL);
- if (!ioc->hpr_lookup) {
- rc = -ENOMEM;
- goto out;
- }
- ioc->hi_priority_smid = ioc->scsiio_depth + 1;
- dinitprintk(ioc, pr_info(
- "%s hi_priority(0x%p): depth(%d), start smid(%d)\n",
- ioc->name, ioc->hi_priority, ioc->hi_priority_depth,
- ioc->hi_priority_smid));
- ioc->internal_lookup =
- kcalloc(ioc->internal_depth, sizeof(struct leapioraid_request_tracker),
- GFP_KERNEL);
- if (!ioc->internal_lookup) {
- pr_err("%s internal_lookup: kcalloc failed\n",
- ioc->name);
- rc = -ENOMEM;
- goto out;
- }
- ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
- dinitprintk(ioc, pr_info(
- "%s internal(0x%p): depth(%d), start smid(%d)\n",
- ioc->name, ioc->internal, ioc->internal_depth,
- ioc->internal_smid));
- sz = ioc->scsiio_depth * sizeof(struct leapioraid_chain_lookup);
- ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
- if (!ioc->chain_lookup) {
- if ((max_request_credit - 64) >
- (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) {
- max_request_credit -= 64;
- leapioraid_base_release_memory_pools(ioc);
- goto retry;
- } else {
- pr_err(
- "%s chain_lookup: __get_free_pages failed\n",
- ioc->name);
- rc = -ENOMEM;
- goto out;
- }
- }
- sz = ioc->chains_needed_per_io * sizeof(struct leapioraid_chain_tracker);
- for (i = 0; i < ioc->scsiio_depth; i++) {
- ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
- if (!ioc->chain_lookup[i].chains_per_smid) {
- if ((max_request_credit - 64) >
- (ioc->internal_depth +
- LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) {
- max_request_credit -= 64;
- leapioraid_base_release_memory_pools(ioc);
- goto retry;
- } else {
- pr_err("%s chain_lookup: kzalloc failed\n", ioc->name);
- rc = -ENOMEM;
- goto out;
- }
- }
- }
- ioc->chains_per_prp_buffer = 0;
- rc = leapioraid_base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
- if (rc == -ENOMEM)
- return -ENOMEM;
- else if (rc == -EAGAIN) {
- if (ioc->use_32bit_dma && ioc->dma_mask > 32)
- goto try_32bit_dma;
- else {
- if ((max_request_credit - 64) >
- (ioc->internal_depth +
- LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) {
- max_request_credit -= 64;
- leapioraid_base_release_memory_pools(ioc);
- goto retry_allocation;
- } else {
- pr_err("%s chain_lookup: dma_pool_alloc failed\n", ioc->name);
- return -ENOMEM;
- }
- }
- }
- total_sz += ioc->chain_segment_sz *
- ((ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
- ioc->scsiio_depth);
- sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
- rc = leapioraid_base_allocate_sense_dma_pool(ioc, sense_sz);
- if (rc == -ENOMEM)
- return -ENOMEM;
- else if (rc == -EAGAIN)
- goto try_32bit_dma;
- total_sz += sense_sz;
- sz = ioc->reply_free_queue_depth * ioc->reply_sz;
- rc = leapioraid_base_allocate_reply_pool(ioc, sz);
- if (rc == -ENOMEM)
- return -ENOMEM;
- else if (rc == -EAGAIN)
- goto try_32bit_dma;
- total_sz += sz;
- sz = ioc->reply_free_queue_depth * 4;
- rc = leapioraid_base_allocate_reply_free_dma_pool(ioc, sz);
- if (rc == -ENOMEM)
- return -ENOMEM;
- else if (rc == -EAGAIN)
- goto try_32bit_dma;
- total_sz += sz;
- reply_post_free_sz = ioc->reply_post_queue_depth *
- sizeof(struct LeapioraidDefaultRepDesc_t);
- rdpq_sz = reply_post_free_sz * LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK;
- if ((leapioraid_base_is_controller_msix_enabled(ioc)
- && !ioc->rdpq_array_enable)
- || (ioc->reply_queue_count < LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK))
- rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
- rc = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
- if (rc == -ENOMEM)
- return -ENOMEM;
- else if (rc == -EAGAIN)
- goto try_32bit_dma;
- else {
- if (ioc->rdpq_array_enable && rc == 0) {
- reply_post_free_array_sz = ioc->reply_queue_count *
- sizeof(struct LeapioraidIOCInitRDPQArrayEntry);
- rc = leapioraid_base_allocate_reply_post_free_array(
- ioc, reply_post_free_array_sz);
- if (rc == -ENOMEM)
- return -ENOMEM;
- else if (rc == -EAGAIN)
- goto try_32bit_dma;
- }
- }
- total_sz += rdpq_sz;
- ioc->config_page_sz = 512;
- ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
- ioc->config_page_sz,
- &ioc->config_page_dma,
- GFP_KERNEL);
- if (!ioc->config_page) {
- pr_err("%s config page: dma_pool_alloc failed\n", ioc->name);
- rc = -ENOMEM;
- goto out;
- }
- pr_err("%s config page(0x%p) - dma(0x%llx): size(%d)\n",
- ioc->name, ioc->config_page,
- (unsigned long long)ioc->config_page_dma,
- ioc->config_page_sz);
- total_sz += ioc->config_page_sz;
- pr_info("%s Allocated physical memory: size(%d kB)\n",
- ioc->name, total_sz / 1024);
- pr_info(
- "%s Current IOC Queue Depth(%d), Max Queue Depth(%d)\n",
- ioc->name,
- ioc->shost->can_queue,
- facts->RequestCredit);
- return 0;
-try_32bit_dma:
- leapioraid_base_release_memory_pools(ioc);
- if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
- if (leapioraid_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
- pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
- pci_name(ioc->pdev));
- return -ENODEV;
- }
- } else if (leapioraid_base_reduce_hba_queue_depth(ioc) != 0)
- return -ENOMEM;
- goto retry_allocation;
-out:
- return rc;
-}
-
-static void
-leapioraid_base_flush_ios_and_panic(
- struct LEAPIORAID_ADAPTER *ioc, u16 fault_code)
-{
- ioc->adapter_over_temp = 1;
- leapioraid_base_stop_smart_polling(ioc);
- leapioraid_base_stop_watchdog(ioc);
- leapioraid_base_stop_hba_unplug_watchdog(ioc);
- leapioraid_base_pause_mq_polling(ioc);
- leapioraid_scsihost_flush_running_cmds(ioc);
- leapioraid_print_fault_code(ioc, fault_code);
-}
-
-u32
-leapioraid_base_get_iocstate(struct LEAPIORAID_ADAPTER *ioc, int cooked)
-{
- u32 s, sc;
-
- s = ioc->base_readl(
- &ioc->chip->Doorbell, LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY);
- sc = s & LEAPIORAID_IOC_STATE_MASK;
- if (sc != LEAPIORAID_IOC_STATE_MASK) {
- if ((sc == LEAPIORAID_IOC_STATE_FAULT) &&
- ((s & LEAPIORAID_DOORBELL_DATA_MASK) ==
- LEAPIORAID_IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED)) {
- leapioraid_base_flush_ios_and_panic(ioc,
- s &
- LEAPIORAID_DOORBELL_DATA_MASK);
- panic("TEMPERATURE FAULT: STOPPING; panic in %s\n",
- __func__);
- }
- }
- return cooked ? sc : s;
-}
-
-static int
-leapioraid_base_send_ioc_reset(
- struct LEAPIORAID_ADAPTER *ioc, u8 reset_type, int timeout)
-{
- u32 ioc_state;
- int r = 0;
- unsigned long flags;
-
- if (reset_type != LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET) {
- pr_err("%s %s: unknown reset_type\n",
- ioc->name, __func__);
- return -EFAULT;
- }
- if (!(ioc->facts.IOCCapabilities &
- LEAPIORAID_IOCFACTS_CAPABILITY_EVENT_REPLAY))
- return -EFAULT;
- pr_info("%s sending message unit reset !!\n",
- ioc->name);
- writel(reset_type << LEAPIORAID_DOORBELL_FUNCTION_SHIFT,
- &ioc->chip->Doorbell);
- if ((leapioraid_base_wait_for_doorbell_ack(ioc, 15)))
- r = -EFAULT;
- ioc_state = leapioraid_base_get_iocstate(ioc, 0);
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP
- && (ioc->is_driver_loading == 1
- || ioc->fault_reset_work_q == NULL)) {
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- leapioraid_base_coredump_info(ioc, ioc_state);
- leapioraid_base_wait_for_coredump_completion(ioc, __func__);
- r = -EFAULT;
- goto out;
- }
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- if (r != 0)
- goto out;
- ioc_state =
- leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_READY,
- timeout);
- if (ioc_state) {
- pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
- r = -EFAULT;
- goto out;
- }
-out:
- pr_info("%s message unit reset: %s\n",
- ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
- return r;
-}
-
-int
-leapioraid_wait_for_ioc_to_operational(struct LEAPIORAID_ADAPTER *ioc,
- int wait_count)
-{
- int wait_state_count = 0;
- u32 ioc_state;
-
- if (leapioraid_base_pci_device_is_unplugged(ioc))
- return -EFAULT;
- ioc_state = leapioraid_base_get_iocstate(ioc, 1);
- while (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) {
- if (leapioraid_base_pci_device_is_unplugged(ioc))
- return -EFAULT;
- if (ioc->is_driver_loading)
- return -ETIME;
- if (wait_state_count++ == wait_count) {
- pr_err(
- "%s %s: failed due to ioc not operational\n",
- ioc->name, __func__);
- return -EFAULT;
- }
- ssleep(1);
- ioc_state = leapioraid_base_get_iocstate(ioc, 1);
- pr_info("%s %s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
- }
- if (wait_state_count)
- pr_info("%s %s: ioc is operational\n",
- ioc->name, __func__);
- return 0;
-}
-
-int
-leapioraid_base_sas_iounit_control(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidSasIoUnitControlRep_t *mpi_reply,
- struct LeapioraidSasIoUnitControlReq_t *mpi_request)
-{
- u16 smid;
- u8 issue_reset;
- int rc;
- void *request;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- mutex_lock(&ioc->base_cmds.mutex);
- if (ioc->base_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: base_cmd in use\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto out;
- }
- rc = leapioraid_wait_for_ioc_to_operational(ioc, 10);
- if (rc)
- goto out;
- smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto out;
- }
- rc = 0;
- ioc->base_cmds.status = LEAPIORAID_CMD_PENDING;
- request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->base_cmds.smid = smid;
- memcpy(request, mpi_request, sizeof(struct LeapioraidSasIoUnitControlReq_t));
- if (mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_HARD_RESET ||
- mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_LINK_RESET)
- ioc->ioc_link_reset_in_progress = 1;
- init_completion(&ioc->base_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->base_cmds.done,
- msecs_to_jiffies(10000));
- if ((mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_HARD_RESET ||
- mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_LINK_RESET) &&
- ioc->ioc_link_reset_in_progress)
- ioc->ioc_link_reset_in_progress = 0;
- if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- leapioraid_check_cmd_timeout(ioc,
- ioc->base_cmds.status, mpi_request,
- sizeof
- (struct LeapioraidSasIoUnitControlReq_t)
- / 4, issue_reset);
- goto issue_host_reset;
- }
- if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID)
- memcpy(mpi_reply, ioc->base_cmds.reply,
- sizeof(struct LeapioraidSasIoUnitControlRep_t));
- else
- memset(mpi_reply, 0, sizeof(struct LeapioraidSasIoUnitControlRep_t));
- ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED;
- goto out;
-issue_host_reset:
- if (issue_reset)
- leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED;
- rc = -EFAULT;
-out:
- mutex_unlock(&ioc->base_cmds.mutex);
- return rc;
-}
-
-int
-leapioraid_base_scsi_enclosure_processor(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidSepRep_t *mpi_reply,
- struct LeapioraidSepReq_t *mpi_request)
-{
- u16 smid;
- u8 issue_reset;
- int rc;
- void *request;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- mutex_lock(&ioc->base_cmds.mutex);
- if (ioc->base_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: base_cmd in use\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto out;
- }
- rc = leapioraid_wait_for_ioc_to_operational(ioc, 10);
- if (rc)
- goto out;
- smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto out;
- }
- rc = 0;
- ioc->base_cmds.status = LEAPIORAID_CMD_PENDING;
- request = leapioraid_base_get_msg_frame(ioc, smid);
- memset(request, 0, ioc->request_sz);
- ioc->base_cmds.smid = smid;
- memcpy(request, mpi_request, sizeof(struct LeapioraidSepReq_t));
- init_completion(&ioc->base_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->base_cmds.done,
- msecs_to_jiffies(10000));
- if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- leapioraid_check_cmd_timeout(ioc,
- ioc->base_cmds.status, mpi_request,
- sizeof(struct LeapioraidSepReq_t) / 4,
- issue_reset);
- goto issue_host_reset;
- }
- if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID)
- memcpy(mpi_reply, ioc->base_cmds.reply,
- sizeof(struct LeapioraidSepRep_t));
- else
- memset(mpi_reply, 0, sizeof(struct LeapioraidSepRep_t));
- ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED;
- goto out;
-issue_host_reset:
- if (issue_reset)
- leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED;
- rc = -EFAULT;
-out:
- mutex_unlock(&ioc->base_cmds.mutex);
- return rc;
-}
-
-static int
-leapioraid_base_get_port_facts(struct LEAPIORAID_ADAPTER *ioc, int port)
-{
- struct LeapioraidPortFactsReq_t mpi_request;
- struct LeapioraidPortFactsRep_t mpi_reply;
- struct leapioraid_port_facts *pfacts;
- int mpi_reply_sz, mpi_request_sz, r;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- mpi_reply_sz = sizeof(struct LeapioraidPortFactsRep_t);
- mpi_request_sz = sizeof(struct LeapioraidPortFactsReq_t);
- memset(&mpi_request, 0, mpi_request_sz);
- mpi_request.Function = LEAPIORAID_FUNC_PORT_FACTS;
- mpi_request.PortNumber = port;
- r = leapioraid_base_handshake_req_reply_wait(ioc, mpi_request_sz,
- (u32 *) &mpi_request,
- mpi_reply_sz,
- (u16 *) &mpi_reply, 5);
- if (r != 0) {
- pr_err("%s %s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
- return r;
- }
- pfacts = &ioc->pfacts[port];
- memset(pfacts, 0, sizeof(struct leapioraid_port_facts));
- pfacts->PortNumber = mpi_reply.PortNumber;
- pfacts->VP_ID = mpi_reply.VP_ID;
- pfacts->VF_ID = mpi_reply.VF_ID;
- pfacts->MaxPostedCmdBuffers =
- le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
- return 0;
-}
-
-static int
-leapioraid_base_send_ioc_init(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidIOCInitReq_t mpi_request;
- struct LeapioraidIOCInitRep_t mpi_reply;
- int i, r = 0;
- ktime_t current_time;
- u16 ioc_status;
- u32 reply_post_free_ary_sz;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- memset(&mpi_request, 0, sizeof(struct LeapioraidIOCInitReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_IOC_INIT;
- mpi_request.WhoInit = LEAPIORAID_WHOINIT_HOST_DRIVER;
- mpi_request.VF_ID = 0;
- mpi_request.VP_ID = 0;
- mpi_request.MsgVersion = cpu_to_le16(0x0206);
- mpi_request.HeaderVersion = cpu_to_le16(0x3A00);
- mpi_request.HostPageSize = 12;
- if (leapioraid_base_is_controller_msix_enabled(ioc))
- mpi_request.HostMSIxVectors = ioc->reply_queue_count;
- mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz / 4);
- mpi_request.ReplyDescriptorPostQueueDepth =
- cpu_to_le16(ioc->reply_post_queue_depth);
- mpi_request.ReplyFreeQueueDepth =
- cpu_to_le16(ioc->reply_free_queue_depth);
- mpi_request.SenseBufferAddressHigh =
- cpu_to_le32((u64) ioc->sense_dma >> 32);
- mpi_request.SystemReplyAddressHigh =
- cpu_to_le32((u64) ioc->reply_dma >> 32);
- mpi_request.SystemRequestFrameBaseAddress =
- cpu_to_le64((u64) ioc->request_dma);
- mpi_request.ReplyFreeQueueAddress =
- cpu_to_le64((u64) ioc->reply_free_dma);
- if (ioc->rdpq_array_enable) {
- reply_post_free_ary_sz = ioc->reply_queue_count *
- sizeof(struct LeapioraidIOCInitRDPQArrayEntry);
- memset(ioc->reply_post_free_array, 0, reply_post_free_ary_sz);
- for (i = 0; i < ioc->reply_queue_count; i++)
- ioc->reply_post_free_array[i].RDPQBaseAddress =
- cpu_to_le64((u64) ioc->reply_post[i].reply_post_free_dma);
- mpi_request.MsgFlags = LEAPIORAID_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
- mpi_request.ReplyDescriptorPostQueueAddress =
- cpu_to_le64((u64) ioc->reply_post_free_array_dma);
- } else {
- mpi_request.ReplyDescriptorPostQueueAddress =
- cpu_to_le64((u64) ioc->reply_post[0].reply_post_free_dma);
- }
- mpi_request.ConfigurationFlags |= 0x0002;
- current_time = ktime_get_real();
- mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
- if (ioc->logging_level & LEAPIORAID_DEBUG_INIT) {
-
- pr_info("%s \toffset:data\n", ioc->name);
- leapioraid_debug_dump_mf(&mpi_request,
- sizeof(struct LeapioraidIOCInitReq_t) / 4);
-
- }
- r = leapioraid_base_handshake_req_reply_wait(ioc,
- sizeof
- (struct LeapioraidIOCInitReq_t),
- (u32 *) &mpi_request,
- sizeof
- (struct LeapioraidIOCInitRep_t),
- (u16 *) &mpi_reply, 30);
- if (r != 0) {
- pr_err("%s %s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
- return r;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) {
- pr_err("%s %s: failed\n", ioc->name,
- __func__);
- r = -EIO;
- }
- ioc->timestamp_update_count = 0;
- return r;
-}
-
-int
-leapioraid_base_trace_log_init(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidIOCLogReq_t mpi_request;
- struct LeapioraidIOCLogRep_t mpi_reply;
- u16 ioc_status;
- u32 r;
-
- dinitprintk(ioc,
- pr_info("%s %s\n", ioc->name, __func__));
- if (ioc->log_buffer == NULL) {
- ioc->log_buffer =
- dma_alloc_coherent(&ioc->pdev->dev,
- (SYS_LOG_BUF_SIZE + SYS_LOG_BUF_RESERVE),
- &ioc->log_buffer_dma, GFP_KERNEL);
- if (!ioc->log_buffer) {
- pr_err("%s: Failed to allocate log_buffer at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -ENOMEM;
- }
-
- }
- memset(ioc->log_buffer, 0, (SYS_LOG_BUF_SIZE + SYS_LOG_BUF_RESERVE));
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidIOCLogReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_LOG_INIT;
- mpi_request.BufAddr = ioc->log_buffer_dma;
- mpi_request.BufSize = SYS_LOG_BUF_SIZE;
- r = leapioraid_base_handshake_req_reply_wait(ioc,
- sizeof
- (struct LeapioraidIOCLogReq_t),
- (u32 *) &mpi_request,
- sizeof
- (struct LeapioraidIOCLogRep_t),
- (u16 *) &mpi_reply, 30);
- if (r != 0) {
- pr_err("%s %s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
- return r;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) {
- pr_err("%s %s: failed\n", ioc->name,
- __func__);
- r = -EIO;
- }
- return r;
-}
-
-static int
-leapioraid_base_trace_log_exit(struct LEAPIORAID_ADAPTER *ioc)
-{
- if (ioc->log_buffer)
- dma_free_coherent(&ioc->pdev->dev, SYS_LOG_BUF_SIZE,
- ioc->log_buffer, ioc->log_buffer_dma);
- return 0;
-}
-
-u8
-leapioraid_port_enable_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply)
-{
- struct LeapioraidDefaultRep_t *mpi_reply;
- u16 ioc_status;
-
- if (ioc->port_enable_cmds.status == LEAPIORAID_CMD_NOT_USED)
- return 1;
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (!mpi_reply)
- return 1;
- if (mpi_reply->Function != LEAPIORAID_FUNC_PORT_ENABLE)
- return 1;
- ioc->port_enable_cmds.status &= ~LEAPIORAID_CMD_PENDING;
- ioc->port_enable_cmds.status |= LEAPIORAID_CMD_COMPLETE;
- ioc->port_enable_cmds.status |= LEAPIORAID_CMD_REPLY_VALID;
- memcpy(ioc->port_enable_cmds.reply, mpi_reply,
- mpi_reply->MsgLength * 4);
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS)
- ioc->port_enable_failed = 1;
- if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_COMPLETE_ASYNC) {
- ioc->port_enable_cmds.status &= ~LEAPIORAID_CMD_COMPLETE_ASYNC;
- if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) {
- leapioraid_port_enable_complete(ioc);
- return 1;
- }
-
- ioc->start_scan_failed = ioc_status;
- ioc->start_scan = 0;
- return 1;
- }
- complete(&ioc->port_enable_cmds.done);
- return 1;
-}
-
-static int
-leapioraid_base_send_port_enable(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidPortEnableReq_t *mpi_request;
- struct LeapioraidPortEnableRep_t *mpi_reply;
- int r = 0;
- u16 smid;
- u16 ioc_status;
-
- pr_info("%s sending port enable !!\n", ioc->name);
- if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) {
- pr_err(
- "%s %s: internal command already in use\n", ioc->name,
- __func__);
- return -EAGAIN;
- }
- smid = leapioraid_base_get_smid(ioc, ioc->port_enable_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- return -EAGAIN;
- }
- ioc->port_enable_cmds.status = LEAPIORAID_CMD_PENDING;
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->port_enable_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioraidPortEnableReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_PORT_ENABLE;
- init_completion(&ioc->port_enable_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300 * HZ);
- if (!(ioc->port_enable_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- pr_err("%s %s: timeout\n",
- ioc->name, __func__);
- leapioraid_debug_dump_mf(mpi_request,
- sizeof(struct LeapioraidPortEnableReq_t) / 4);
- if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_RESET)
- r = -EFAULT;
- else
- r = -ETIME;
- goto out;
- }
- mpi_reply = ioc->port_enable_cmds.reply;
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err(
- "%s %s: failed with (ioc_status=0x%08x)\n", ioc->name,
- __func__, ioc_status);
- r = -EFAULT;
- goto out;
- }
-out:
- ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED;
- pr_info("%s port enable: %s\n", ioc->name, ((r == 0) ?
- "SUCCESS"
- :
- "FAILED"));
- return r;
-}
-
-int
-leapioraid_port_enable(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidPortEnableReq_t *mpi_request;
- u16 smid;
-
- pr_info("%s sending port enable !!\n", ioc->name);
- if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) {
- pr_err(
- "%s %s: internal command already in use\n", ioc->name,
- __func__);
- return -EAGAIN;
- }
- smid = leapioraid_base_get_smid(ioc, ioc->port_enable_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- return -EAGAIN;
- }
- ioc->drv_internal_flags |= LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED;
- ioc->port_enable_cmds.status = LEAPIORAID_CMD_PENDING;
- ioc->port_enable_cmds.status |= LEAPIORAID_CMD_COMPLETE_ASYNC;
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->port_enable_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioraidPortEnableReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_PORT_ENABLE;
- ioc->put_smid_default(ioc, smid);
- return 0;
-}
-
-static int
-leapioraid_base_determine_wait_on_discovery(struct LEAPIORAID_ADAPTER *ioc)
-{
- if (ioc->ir_firmware)
- return 1;
- if (!ioc->bios_pg3.BiosVersion)
- return 0;
- if ((ioc->bios_pg2.CurrentBootDeviceForm &
- LEAPIORAID_BIOSPAGE2_FORM_MASK) ==
- LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
- (ioc->bios_pg2.ReqBootDeviceForm &
- LEAPIORAID_BIOSPAGE2_FORM_MASK) ==
- LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
- (ioc->bios_pg2.ReqAltBootDeviceForm &
- LEAPIORAID_BIOSPAGE2_FORM_MASK) ==
- LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
- return 0;
- return 1;
-}
-
-static void
-leapioraid_base_unmask_events(struct LEAPIORAID_ADAPTER *ioc, u16 event)
-{
- u32 desired_event;
-
- if (event >= 128)
- return;
- desired_event = (1 << (event % 32));
- if (event < 32)
- ioc->event_masks[0] &= ~desired_event;
- else if (event < 64)
- ioc->event_masks[1] &= ~desired_event;
- else if (event < 96)
- ioc->event_masks[2] &= ~desired_event;
- else if (event < 128)
- ioc->event_masks[3] &= ~desired_event;
-}
-
-static int
-leapioraid_base_event_notification(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidEventNotificationReq_t *mpi_request;
- u16 smid;
- int r = 0;
- int i, issue_diag_reset = 0;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) {
- pr_err(
- "%s %s: internal command already in use\n", ioc->name,
- __func__);
- return -EAGAIN;
- }
- smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- return -EAGAIN;
- }
- ioc->base_cmds.status = LEAPIORAID_CMD_PENDING;
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->base_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioraidEventNotificationReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_EVENT_NOTIFICATION;
- mpi_request->VF_ID = 0;
- mpi_request->VP_ID = 0;
- for (i = 0; i < LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
- mpi_request->EventMasks[i] = cpu_to_le32(ioc->event_masks[i]);
- init_completion(&ioc->base_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->base_cmds.done, 30 * HZ);
- if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- pr_err("%s %s: timeout\n",
- ioc->name, __func__);
- leapioraid_debug_dump_mf(mpi_request,
- sizeof(struct LeapioraidEventNotificationReq_t) / 4);
- if (ioc->base_cmds.status & LEAPIORAID_CMD_RESET)
- r = -EFAULT;
- else
- issue_diag_reset = 1;
- } else
- dinitprintk(ioc, pr_info("%s %s: complete\n",
- ioc->name, __func__));
- ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED;
- if (issue_diag_reset) {
- if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED)
- return -EFAULT;
- if (leapioraid_base_check_for_fault_and_issue_reset(ioc))
- return -EFAULT;
- r = -EAGAIN;
- }
- return r;
-}
-
-void
-leapioraid_base_validate_event_type(struct LEAPIORAID_ADAPTER *ioc,
- u32 *event_type)
-{
- int i, j;
- u32 event_mask, desired_event;
- u8 send_update_to_fw;
-
- for (i = 0, send_update_to_fw = 0; i <
- LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
- event_mask = ~event_type[i];
- desired_event = 1;
- for (j = 0; j < 32; j++) {
- if (!(event_mask & desired_event) &&
- (ioc->event_masks[i] & desired_event)) {
- ioc->event_masks[i] &= ~desired_event;
- send_update_to_fw = 1;
- }
- desired_event = (desired_event << 1);
- }
- }
- if (!send_update_to_fw)
- return;
- mutex_lock(&ioc->base_cmds.mutex);
- leapioraid_base_event_notification(ioc);
- mutex_unlock(&ioc->base_cmds.mutex);
-}
-
-int
-leapioraid_base_make_ioc_ready(struct LEAPIORAID_ADAPTER *ioc,
- enum reset_type type)
-{
- u32 ioc_state;
- int rc;
- int count;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- if (!leapioraid_base_pci_device_is_available(ioc))
- return 0;
- ioc_state = leapioraid_base_get_iocstate(ioc, 0);
- dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n",
- ioc->name, __func__, ioc_state));
- count = 0;
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_RESET) {
- while ((ioc_state & LEAPIORAID_IOC_STATE_MASK) !=
- LEAPIORAID_IOC_STATE_READY) {
- if (count++ == 10) {
- pr_err(
- "%s %s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
- return -EFAULT;
- }
- ssleep(1);
- ioc_state = leapioraid_base_get_iocstate(ioc, 0);
- }
- }
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_READY)
- return 0;
- if (ioc_state & LEAPIORAID_DOORBELL_USED) {
- pr_info("%s unexpected doorbell active!\n",
- ioc->name);
- goto issue_diag_reset;
- }
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) {
- leapioraid_print_fault_code(ioc, ioc_state &
- LEAPIORAID_DOORBELL_DATA_MASK);
- goto issue_diag_reset;
- }
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP) {
- if (ioc->ioc_coredump_loop != 0xFF) {
- leapioraid_base_coredump_info(ioc, ioc_state &
- LEAPIORAID_DOORBELL_DATA_MASK);
- leapioraid_base_wait_for_coredump_completion(ioc,
- __func__);
- }
- goto issue_diag_reset;
- }
- if (type == FORCE_BIG_HAMMER)
- goto issue_diag_reset;
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_OPERATIONAL)
- if (!
- (leapioraid_base_send_ioc_reset
- (ioc, LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET, 15))) {
- return 0;
- }
-issue_diag_reset:
- rc = leapioraid_base_diag_reset(ioc);
- return rc;
-}
-
-static int
-leapioraid_base_make_ioc_operational(struct LEAPIORAID_ADAPTER *ioc)
-{
- int r, rc, i, index;
- unsigned long flags;
- u32 reply_address;
- u16 smid;
- struct leapioraid_tr_list *delayed_tr, *delayed_tr_next;
- struct leapioraid_sc_list *delayed_sc, *delayed_sc_next;
- struct leapioraid_event_ack_list *delayed_event_ack, *delayed_event_ack_next;
- struct leapioraid_adapter_reply_queue *reply_q;
- union LeapioraidRepDescUnion_t *reply_post_free_contig;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- list_for_each_entry_safe(delayed_tr, delayed_tr_next,
- &ioc->delayed_tr_list, list) {
- list_del(&delayed_tr->list);
- kfree(delayed_tr);
- }
- list_for_each_entry_safe(delayed_tr, delayed_tr_next,
- &ioc->delayed_tr_volume_list, list) {
- list_del(&delayed_tr->list);
- kfree(delayed_tr);
- }
- list_for_each_entry_safe(delayed_tr, delayed_tr_next,
- &ioc->delayed_internal_tm_list, list) {
- list_del(&delayed_tr->list);
- kfree(delayed_tr);
- }
- list_for_each_entry_safe(delayed_sc, delayed_sc_next,
- &ioc->delayed_sc_list, list) {
- list_del(&delayed_sc->list);
- kfree(delayed_sc);
- }
- list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
- &ioc->delayed_event_ack_list, list) {
- list_del(&delayed_event_ack->list);
- kfree(delayed_event_ack);
- }
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- INIT_LIST_HEAD(&ioc->hpr_free_list);
- smid = ioc->hi_priority_smid;
- for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
- ioc->hpr_lookup[i].cb_idx = 0xFF;
- ioc->hpr_lookup[i].smid = smid;
- list_add_tail(&ioc->hpr_lookup[i].tracker_list,
- &ioc->hpr_free_list);
- }
- INIT_LIST_HEAD(&ioc->internal_free_list);
- smid = ioc->internal_smid;
- for (i = 0; i < ioc->internal_depth; i++, smid++) {
- ioc->internal_lookup[i].cb_idx = 0xFF;
- ioc->internal_lookup[i].smid = smid;
- list_add_tail(&ioc->internal_lookup[i].tracker_list,
- &ioc->internal_free_list);
- }
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- for (i = 0, reply_address = (u32) ioc->reply_dma;
- i < ioc->reply_free_queue_depth; i++, reply_address +=
- ioc->reply_sz) {
- ioc->reply_free[i] = cpu_to_le32(reply_address);
- }
- if (ioc->is_driver_loading)
- leapioraid_base_assign_reply_queues(ioc);
- index = 0;
- reply_post_free_contig = ioc->reply_post[0].reply_post_free;
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (ioc->rdpq_array_enable) {
- reply_q->reply_post_free =
- ioc->reply_post[index++].reply_post_free;
- } else {
- reply_q->reply_post_free = reply_post_free_contig;
- reply_post_free_contig += ioc->reply_post_queue_depth;
- }
- reply_q->reply_post_host_index = 0;
- for (i = 0; i < ioc->reply_post_queue_depth; i++)
- reply_q->reply_post_free[i].Words =
- cpu_to_le64(ULLONG_MAX);
- if (!leapioraid_base_is_controller_msix_enabled(ioc))
- goto skip_init_reply_post_free_queue;
- }
-skip_init_reply_post_free_queue:
- r = leapioraid_base_send_ioc_init(ioc);
- if (r) {
- if (!ioc->is_driver_loading)
- return r;
- rc = leapioraid_base_check_for_fault_and_issue_reset(ioc);
- if (rc || (leapioraid_base_send_ioc_init(ioc)))
- return r;
- }
- ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
- writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (ioc->combined_reply_queue) {
- for (i = 0; i < ioc->nc_reply_index_count; i++)
- writel((reply_q->msix_index & 7) <<
- LEAPIORAID_RPHI_MSIX_INDEX_SHIFT,
- ioc->replyPostRegisterIndex[i]);
- } else {
- writel(reply_q->msix_index << LEAPIORAID_RPHI_MSIX_INDEX_SHIFT,
- &ioc->chip->ReplyPostHostIndex);
- }
- if (!leapioraid_base_is_controller_msix_enabled(ioc))
- goto skip_init_reply_post_host_index;
- }
-skip_init_reply_post_host_index:
- leapioraid_base_unmask_interrupts(ioc);
- r = leapioraid_base_display_fwpkg_version(ioc);
- if (r)
- return r;
- r = leapioraid_base_static_config_pages(ioc);
- if (r)
- return r;
- r = leapioraid_base_event_notification(ioc);
- if (r)
- return r;
- leapioraid_base_start_hba_unplug_watchdog(ioc);
- if (!ioc->shost_recovery) {
- ioc->wait_for_discovery_to_complete =
- leapioraid_base_determine_wait_on_discovery(ioc);
- return r;
- }
- r = leapioraid_base_send_port_enable(ioc);
- if (r)
- return r;
- return r;
-}
-
-void
-leapioraid_base_free_resources(struct LEAPIORAID_ADAPTER *ioc)
-{
- dexitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- if (!ioc->chip_phys)
- return;
- leapioraid_base_mask_interrupts(ioc);
- ioc->shost_recovery = 1;
- leapioraid_base_make_ioc_ready(ioc, SOFT_RESET);
- ioc->shost_recovery = 0;
- leapioraid_base_unmap_resources(ioc);
-}
-
-int
-leapioraid_base_attach(struct LEAPIORAID_ADAPTER *ioc)
-{
- int r, rc, i;
- int cpu_id, last_cpu_id = 0;
-
- dinitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- ioc->cpu_count = num_online_cpus();
- for_each_online_cpu(cpu_id)
- last_cpu_id = cpu_id;
- ioc->cpu_msix_table_sz = last_cpu_id + 1;
- ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
- ioc->reply_queue_count = 1;
- if (!ioc->cpu_msix_table) {
- r = -ENOMEM;
- goto out_free_resources;
- }
- ioc->rdpq_array_enable_assigned = 0;
- ioc->use_32bit_dma = 0;
- ioc->dma_mask = 64;
- ioc->base_readl = &leapioraid_base_readl_aero;
- ioc->smp_affinity_enable = smp_affinity_enable;
- r = leapioraid_base_map_resources(ioc);
- if (r)
- goto out_free_resources;
- pci_set_drvdata(ioc->pdev, ioc->shost);
- r = leapioraid_base_get_ioc_facts(ioc);
- if (r) {
- rc = leapioraid_base_check_for_fault_and_issue_reset(ioc);
- if (rc || (leapioraid_base_get_ioc_facts(ioc)))
- goto out_free_resources;
- }
-
- ioc->build_sg_scmd = &leapioraid_base_build_sg_scmd_ieee;
- ioc->build_sg = &leapioraid_base_build_sg_ieee;
- ioc->build_zero_len_sge =
- &leapioraid_base_build_zero_len_sge_ieee;
- ioc->sge_size_ieee = sizeof(struct LEAPIORAID_IEEE_SGE_SIMPLE64);
- if (ioc->high_iops_queues)
- ioc->get_msix_index_for_smlio =
- &leapioraid_base_get_high_iops_msix_index;
- else
- ioc->get_msix_index_for_smlio = &leapioraid_base_get_msix_index;
-
- if (ioc->atomic_desc_capable) {
- ioc->put_smid_default =
- &leapioraid_base_put_smid_default_atomic;
- ioc->put_smid_scsi_io =
- &leapioraid_base_put_smid_scsi_io_atomic;
- ioc->put_smid_fast_path =
- &leapioraid_base_put_smid_fast_path_atomic;
- ioc->put_smid_hi_priority =
- &leapioraid_base_put_smid_hi_priority_atomic;
- } else {
- ioc->put_smid_default = &leapioraid_base_put_smid_default;
- ioc->put_smid_scsi_io = &leapioraid_base_put_smid_scsi_io;
- ioc->put_smid_fast_path = &leapioraid_base_put_smid_fast_path;
- ioc->put_smid_hi_priority =
- &leapioraid_base_put_smid_hi_priority;
- }
- ioc->build_sg_mpi = &leapioraid_base_build_sg;
- ioc->build_zero_len_sge_mpi = &leapioraid_base_build_zero_len_sge;
- r = leapioraid_base_make_ioc_ready(ioc, SOFT_RESET);
- if (r)
- goto out_free_resources;
- if (ioc->open_pcie_trace) {
- r = leapioraid_base_trace_log_init(ioc);
- if (r) {
- pr_err("log init failed\n");
- goto out_free_resources;
- }
- }
- ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
- sizeof(struct leapioraid_port_facts), GFP_KERNEL);
- if (!ioc->pfacts) {
- r = -ENOMEM;
- goto out_free_resources;
- }
- for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
- r = leapioraid_base_get_port_facts(ioc, i);
- if (r) {
- rc = leapioraid_base_check_for_fault_and_issue_reset
- (ioc);
- if (rc || (leapioraid_base_get_port_facts(ioc, i)))
- goto out_free_resources;
- }
- }
- r = leapioraid_base_allocate_memory_pools(ioc);
- if (r)
- goto out_free_resources;
- if (irqpoll_weight > 0)
- ioc->thresh_hold = irqpoll_weight;
- else
- ioc->thresh_hold = ioc->hba_queue_depth / 4;
- leapioraid_base_init_irqpolls(ioc);
- init_waitqueue_head(&ioc->reset_wq);
- ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
- if (ioc->facts.MaxDevHandle % 8)
- ioc->pd_handles_sz++;
- ioc->pd_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL);
- if (!ioc->pd_handles) {
- r = -ENOMEM;
- goto out_free_resources;
- }
- ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL);
- if (!ioc->blocking_handles) {
- r = -ENOMEM;
- goto out_free_resources;
- }
- ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
- if (ioc->facts.MaxDevHandle % 8)
- ioc->pend_os_device_add_sz++;
- ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
- GFP_KERNEL);
- if (!ioc->pend_os_device_add)
- goto out_free_resources;
- ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
- ioc->device_remove_in_progress =
- kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
- if (!ioc->device_remove_in_progress)
- goto out_free_resources;
- ioc->tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8);
- ioc->tm_tr_retry = kzalloc(ioc->tm_tr_retry_sz, GFP_KERNEL);
- if (!ioc->tm_tr_retry)
- goto out_free_resources;
- ioc->fwfault_debug = leapioraid_fwfault_debug;
- mutex_init(&ioc->base_cmds.mutex);
- ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
- ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED;
- ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
- ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED;
- ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
- ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_init(&ioc->transport_cmds.mutex);
- ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
- ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_init(&ioc->scsih_cmds.mutex);
- ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
- ioc->tm_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_init(&ioc->tm_cmds.mutex);
- ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
- ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_init(&ioc->config_cmds.mutex);
- ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
- ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- ioc->ctl_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_init(&ioc->ctl_cmds.mutex);
-
- if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
- !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
- !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
- !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
- r = -ENOMEM;
- goto out_free_resources;
- }
- for (i = 0; i < LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
- ioc->event_masks[i] = -1;
- leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_SAS_DISCOVERY);
- leapioraid_base_unmask_events(ioc,
- LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE);
- leapioraid_base_unmask_events(ioc,
- LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
- leapioraid_base_unmask_events(ioc,
- LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE);
- leapioraid_base_unmask_events(ioc,
- LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
- leapioraid_base_unmask_events(ioc,
- LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST);
- leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_VOLUME);
- leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_PHYSICAL_DISK);
- leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_OPERATION_STATUS);
- leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_LOG_ENTRY_ADDED);
- leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_TEMP_THRESHOLD);
- leapioraid_base_unmask_events(ioc,
- LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
- r = leapioraid_base_make_ioc_operational(ioc);
- if (r == -EAGAIN)
- r = leapioraid_base_make_ioc_operational(ioc);
- if (r)
- goto out_free_resources;
- memcpy(&ioc->prev_fw_facts, &ioc->facts,
- sizeof(struct leapioraid_facts));
- ioc->non_operational_loop = 0;
- ioc->ioc_coredump_loop = 0;
- ioc->got_task_abort_from_ioctl = 0;
- ioc->got_task_abort_from_sysfs = 0;
- return 0;
-out_free_resources:
- ioc->remove_host = 1;
- leapioraid_base_free_resources(ioc);
- leapioraid_base_release_memory_pools(ioc);
- pci_set_drvdata(ioc->pdev, NULL);
- kfree(ioc->cpu_msix_table);
- kfree(ioc->pd_handles);
- kfree(ioc->blocking_handles);
- kfree(ioc->tm_tr_retry);
- kfree(ioc->device_remove_in_progress);
- kfree(ioc->pend_os_device_add);
- kfree(ioc->tm_cmds.reply);
- kfree(ioc->transport_cmds.reply);
- kfree(ioc->scsih_cmds.reply);
- kfree(ioc->config_cmds.reply);
- kfree(ioc->base_cmds.reply);
- kfree(ioc->port_enable_cmds.reply);
- kfree(ioc->ctl_cmds.reply);
- kfree(ioc->ctl_cmds.sense);
- kfree(ioc->pfacts);
- ioc->ctl_cmds.reply = NULL;
- ioc->base_cmds.reply = NULL;
- ioc->tm_cmds.reply = NULL;
- ioc->scsih_cmds.reply = NULL;
- ioc->transport_cmds.reply = NULL;
- ioc->config_cmds.reply = NULL;
- ioc->pfacts = NULL;
- return r;
-}
-
-void
-leapioraid_base_detach(struct LEAPIORAID_ADAPTER *ioc)
-{
- dexitprintk(ioc, pr_info("%s %s\n", ioc->name,
- __func__));
- if (ioc->open_pcie_trace)
- leapioraid_base_trace_log_exit(ioc);
- leapioraid_base_stop_watchdog(ioc);
- leapioraid_base_stop_hba_unplug_watchdog(ioc);
- leapioraid_base_free_resources(ioc);
- leapioraid_base_release_memory_pools(ioc);
- leapioraid_free_enclosure_list(ioc);
- pci_set_drvdata(ioc->pdev, NULL);
- kfree(ioc->cpu_msix_table);
- kfree(ioc->pd_handles);
- kfree(ioc->blocking_handles);
- kfree(ioc->tm_tr_retry);
- kfree(ioc->device_remove_in_progress);
- kfree(ioc->pend_os_device_add);
- kfree(ioc->pfacts);
- kfree(ioc->ctl_cmds.reply);
- kfree(ioc->ctl_cmds.sense);
- kfree(ioc->base_cmds.reply);
- kfree(ioc->port_enable_cmds.reply);
- kfree(ioc->tm_cmds.reply);
- kfree(ioc->transport_cmds.reply);
- kfree(ioc->scsih_cmds.reply);
- kfree(ioc->config_cmds.reply);
-}
-
-static void
-leapioraid_base_clear_outstanding_leapioraid_commands(struct LEAPIORAID_ADAPTER
- *ioc)
-{
- struct leapioraid_internal_qcmd *scsih_qcmd, *scsih_qcmd_next;
- unsigned long flags;
-
- if (ioc->transport_cmds.status & LEAPIORAID_CMD_PENDING) {
- ioc->transport_cmds.status |= LEAPIORAID_CMD_RESET;
- leapioraid_base_free_smid(ioc, ioc->transport_cmds.smid);
- complete(&ioc->transport_cmds.done);
- }
- if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) {
- ioc->base_cmds.status |= LEAPIORAID_CMD_RESET;
- leapioraid_base_free_smid(ioc, ioc->base_cmds.smid);
- complete(&ioc->base_cmds.done);
- }
- if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) {
- ioc->port_enable_failed = 1;
- ioc->port_enable_cmds.status |= LEAPIORAID_CMD_RESET;
- leapioraid_base_free_smid(ioc, ioc->port_enable_cmds.smid);
- if (ioc->is_driver_loading) {
- ioc->start_scan_failed =
- LEAPIORAID_IOCSTATUS_INTERNAL_ERROR;
- ioc->start_scan = 0;
- } else
- complete(&ioc->port_enable_cmds.done);
- }
- if (ioc->config_cmds.status & LEAPIORAID_CMD_PENDING) {
- ioc->config_cmds.status |= LEAPIORAID_CMD_RESET;
- leapioraid_base_free_smid(ioc, ioc->config_cmds.smid);
- ioc->config_cmds.smid = USHORT_MAX;
- complete(&ioc->config_cmds.done);
- }
- spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags);
- list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next,
- &ioc->scsih_q_intenal_cmds, list) {
- if ((scsih_qcmd->status) & LEAPIORAID_CMD_PENDING) {
- scsih_qcmd->status |= LEAPIORAID_CMD_RESET;
- leapioraid_base_free_smid(ioc, scsih_qcmd->smid);
- }
- }
- spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags);
-}
-
-static void
-leapioraid_base_reset_handler(struct LEAPIORAID_ADAPTER *ioc, int reset_phase)
-{
- leapioraid_scsihost_reset_handler(ioc, reset_phase);
- leapioraid_ctl_reset_handler(ioc, reset_phase);
- switch (reset_phase) {
- case LEAPIORAID_IOC_PRE_RESET_PHASE:
- dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n",
- ioc->name, __func__));
- break;
- case LEAPIORAID_IOC_AFTER_RESET_PHASE:
- dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n",
- ioc->name, __func__));
- leapioraid_base_clear_outstanding_leapioraid_commands(ioc);
- break;
- case LEAPIORAID_IOC_DONE_RESET_PHASE:
- dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n",
- ioc->name, __func__));
- break;
- }
-}
-
-void
-leapioraid_wait_for_commands_to_complete(struct LEAPIORAID_ADAPTER *ioc)
-{
- u32 ioc_state;
- unsigned long flags;
- u16 i;
- struct leapioraid_scsiio_tracker *st;
-
- ioc->pending_io_count = 0;
- if (!leapioraid_base_pci_device_is_available(ioc)) {
- pr_err("%s %s: pci error recovery reset or pci device unplug occurred\n",
- ioc->name, __func__);
- return;
- }
- ioc_state = leapioraid_base_get_iocstate(ioc, 0);
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) !=
- LEAPIORAID_IOC_STATE_OPERATIONAL)
- return;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = 1; i <= ioc->scsiio_depth; i++) {
- st = leapioraid_get_st_from_smid(ioc, i);
- if (st && st->smid != 0) {
- if (st->cb_idx != 0xFF)
- ioc->pending_io_count++;
- }
- }
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- if (!ioc->pending_io_count)
- return;
- wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
-}
-
-static int
-leapioraid_base_check_ioc_facts_changes(struct LEAPIORAID_ADAPTER *ioc)
-{
- u16 pd_handles_sz, tm_tr_retry_sz;
- void *pd_handles = NULL, *blocking_handles = NULL;
- void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
- u8 *tm_tr_retry = NULL;
- struct leapioraid_facts *old_facts = &ioc->prev_fw_facts;
-
- if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
- pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
- if (ioc->facts.MaxDevHandle % 8)
- pd_handles_sz++;
- pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
- GFP_KERNEL);
- if (!pd_handles) {
- pr_err(
- "%s Unable to allocate the memory for pd_handles of sz: %d\n",
- ioc->name, pd_handles_sz);
- return -ENOMEM;
- }
- memset(pd_handles + ioc->pd_handles_sz, 0,
- (pd_handles_sz - ioc->pd_handles_sz));
- ioc->pd_handles = pd_handles;
- blocking_handles =
- krealloc(ioc->blocking_handles, pd_handles_sz, GFP_KERNEL);
- if (!blocking_handles) {
- pr_err(
- "%s Unable to allocate the memory for blocking_handles of sz: %d\n",
- ioc->name, pd_handles_sz);
- return -ENOMEM;
- }
- memset(blocking_handles + ioc->pd_handles_sz, 0,
- (pd_handles_sz - ioc->pd_handles_sz));
- ioc->blocking_handles = blocking_handles;
- ioc->pd_handles_sz = pd_handles_sz;
- pend_os_device_add =
- krealloc(ioc->pend_os_device_add, pd_handles_sz,
- GFP_KERNEL);
- if (!pend_os_device_add) {
- pr_err(
- "%s Unable to allocate the memory for pend_os_device_add of sz: %d\n",
- ioc->name, pd_handles_sz);
- return -ENOMEM;
- }
- memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
- (pd_handles_sz - ioc->pend_os_device_add_sz));
- ioc->pend_os_device_add = pend_os_device_add;
- ioc->pend_os_device_add_sz = pd_handles_sz;
- device_remove_in_progress =
- krealloc(ioc->device_remove_in_progress, pd_handles_sz,
- GFP_KERNEL);
- if (!device_remove_in_progress) {
- pr_err(
- "%s Unable to allocate the memory for device_remove_in_progress of sz: %d\n",
- ioc->name, pd_handles_sz);
- return -ENOMEM;
- }
- memset(device_remove_in_progress +
- ioc->device_remove_in_progress_sz, 0,
- (pd_handles_sz - ioc->device_remove_in_progress_sz));
- ioc->device_remove_in_progress = device_remove_in_progress;
- ioc->device_remove_in_progress_sz = pd_handles_sz;
- tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8);
- tm_tr_retry = krealloc(ioc->tm_tr_retry, tm_tr_retry_sz,
- GFP_KERNEL);
- if (!tm_tr_retry) {
- pr_err(
- "%s Unable to allocate the memory for tm_tr_retry of sz: %d\n",
- ioc->name, tm_tr_retry_sz);
- return -ENOMEM;
- }
- memset(tm_tr_retry + ioc->tm_tr_retry_sz, 0,
- (tm_tr_retry_sz - ioc->tm_tr_retry_sz));
- ioc->tm_tr_retry = tm_tr_retry;
- ioc->tm_tr_retry_sz = tm_tr_retry_sz;
- }
- memcpy(&ioc->prev_fw_facts, &ioc->facts,
- sizeof(struct leapioraid_facts));
- return 0;
-}
-
-int
-leapioraid_base_hard_reset_handler(
- struct LEAPIORAID_ADAPTER *ioc,
- enum reset_type type)
-{
- int r;
- unsigned long flags;
-
- dtmprintk(ioc, pr_info("%s %s: enter\n", ioc->name,
- __func__));
- if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
- do {
- ssleep(1);
- } while (ioc->shost_recovery == 1);
- dtmprintk(ioc,
- pr_info("%s %s: exit\n", ioc->name,
- __func__));
- return ioc->ioc_reset_status;
- }
- if (!leapioraid_base_pci_device_is_available(ioc)) {
- pr_err(
- "%s %s: pci error recovery reset or pci device unplug occurred\n",
- ioc->name, __func__);
- if (leapioraid_base_pci_device_is_unplugged(ioc)) {
- leapioraid_base_pause_mq_polling(ioc);
- ioc->schedule_dead_ioc_flush_running_cmds(ioc);
- leapioraid_base_resume_mq_polling(ioc);
- }
- r = 0;
- goto out_unlocked;
- }
- leapioraid_halt_firmware(ioc, 0);
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- ioc->shost_recovery = 1;
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- leapioraid_base_get_iocstate(ioc, 0);
- leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_PRE_RESET_PHASE);
- leapioraid_wait_for_commands_to_complete(ioc);
- leapioraid_base_mask_interrupts(ioc);
- leapioraid_base_pause_mq_polling(ioc);
- r = leapioraid_base_make_ioc_ready(ioc, type);
- if (r)
- goto out;
- leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_AFTER_RESET_PHASE);
- if (ioc->is_driver_loading && ioc->port_enable_failed) {
- ioc->remove_host = 1;
- r = -EFAULT;
- goto out;
- }
- r = leapioraid_base_get_ioc_facts(ioc);
- if (r)
- goto out;
- r = leapioraid_base_check_ioc_facts_changes(ioc);
- if (r) {
- pr_err(
- "%s Some of the parameters got changed in this\n\t\t"
- "new firmware image and it requires system reboot\n",
- ioc->name);
- goto out;
- }
- if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
- panic(
- "%s: Issue occurred with flashing controller firmware.\n\t\t"
- "Please reboot the system and ensure that the correct\n\t\t"
- "firmware version is running\n",
- ioc->name);
- r = leapioraid_base_make_ioc_operational(ioc);
- if (!r)
- leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_DONE_RESET_PHASE);
-out:
- pr_info("%s %s: %s\n",
- ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"));
- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- ioc->ioc_reset_status = r;
- ioc->shost_recovery = 0;
- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
- ioc->ioc_reset_count++;
- mutex_unlock(&ioc->reset_in_progress_mutex);
-#if defined(DISABLE_RESET_SUPPORT)
- if (r != 0) {
- struct task_struct *p;
-
- ioc->remove_host = 1;
- ioc->schedule_dead_ioc_flush_running_cmds(ioc);
- p = kthread_run(leapioraid_remove_dead_ioc_func, ioc,
- "leapioraid_dead_ioc_%d", ioc->id);
- if (IS_ERR(p))
- pr_err(
- "%s %s: Running leapioraid_dead_ioc thread failed !!!!\n",
- ioc->name, __func__);
- else
- pr_err(
- "%s %s: Running leapioraid_dead_ioc thread success !!!!\n",
- ioc->name, __func__);
- }
-#else
- if (r != 0)
- ioc->schedule_dead_ioc_flush_running_cmds(ioc);
-#endif
- leapioraid_base_resume_mq_polling(ioc);
-out_unlocked:
- dtmprintk(ioc, pr_info("%s %s: exit\n", ioc->name,
- __func__));
- return r;
-}
-
-struct config_request {
- u16 sz;
- void *page;
- dma_addr_t page_dma;
-};
-
-static void
-leapioraid_config_display_some_debug(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- char *calling_function_name,
- struct LeapioraidDefaultRep_t *mpi_reply)
-{
- struct LeapioraidCfgReq_t *mpi_request;
- char *desc = NULL;
-
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- switch (mpi_request->Header.PageType & LEAPIORAID_CONFIG_PAGETYPE_MASK) {
- case LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT:
- desc = "io_unit";
- break;
- case LEAPIORAID_CONFIG_PAGETYPE_IOC:
- desc = "ioc";
- break;
- case LEAPIORAID_CONFIG_PAGETYPE_BIOS:
- desc = "bios";
- break;
- case LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME:
- desc = "raid_volume";
- break;
- case LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING:
- desc = "manufacturing";
- break;
- case LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK:
- desc = "physdisk";
- break;
- case LEAPIORAID_CONFIG_PAGETYPE_EXTENDED:
- switch (mpi_request->ExtPageType) {
- case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT:
- desc = "sas_io_unit";
- break;
- case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER:
- desc = "sas_expander";
- break;
- case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE:
- desc = "sas_device";
- break;
- case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY:
- desc = "sas_phy";
- break;
- case LEAPIORAID_CONFIG_EXTPAGETYPE_LOG:
- desc = "log";
- break;
- case LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE:
- desc = "enclosure";
- break;
- case LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG:
- desc = "raid_config";
- break;
- case LEAPIORAID_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
- desc = "driver_mapping";
- break;
- case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PORT:
- desc = "sas_port";
- break;
- case LEAPIORAID_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING:
- desc = "ext_manufacturing";
- break;
- }
- break;
- }
- if (!desc)
- return;
- pr_info("%s %s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
- ioc->name, calling_function_name, desc,
- mpi_request->Header.PageNumber, mpi_request->Action,
- le32_to_cpu(mpi_request->PageAddress), smid);
- if (!mpi_reply)
- return;
- if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
- pr_err(
- "%s \tiocstatus(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
-}
-
-static int
-leapioraid_config_alloc_config_dma_memory(struct LEAPIORAID_ADAPTER *ioc,
- struct config_request *mem)
-{
- int r = 0;
-
- if (mem->sz > ioc->config_page_sz) {
- mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
- &mem->page_dma, GFP_KERNEL);
- if (!mem->page)
- r = -ENOMEM;
- } else {
- mem->page = ioc->config_page;
- mem->page_dma = ioc->config_page_dma;
- }
- ioc->config_vaddr = mem->page;
- return r;
-}
-
-static void
-leapioraid_config_free_config_dma_memory(struct LEAPIORAID_ADAPTER *ioc,
- struct config_request *mem)
-{
- if (mem->sz > ioc->config_page_sz)
- dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page,
- mem->page_dma);
-}
-
-u8
-leapioraid_config_done(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index,
- u32 reply)
-{
- struct LeapioraidDefaultRep_t *mpi_reply;
-
- if (ioc->config_cmds.status == LEAPIORAID_CMD_NOT_USED)
- return 1;
- if (ioc->config_cmds.smid != smid)
- return 1;
- ioc->config_cmds.status |= LEAPIORAID_CMD_COMPLETE;
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (mpi_reply) {
- ioc->config_cmds.status |= LEAPIORAID_CMD_REPLY_VALID;
- memcpy(ioc->config_cmds.reply, mpi_reply,
- mpi_reply->MsgLength * 4);
- }
- ioc->config_cmds.status &= ~LEAPIORAID_CMD_PENDING;
- if (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)
- leapioraid_config_display_some_debug(
- ioc, smid, "config_done", mpi_reply);
- ioc->config_cmds.smid = USHORT_MAX;
- complete(&ioc->config_cmds.done);
- return 1;
-}
-
-static int
-leapioraid_config_request(
- struct LEAPIORAID_ADAPTER *ioc, struct LeapioraidCfgReq_t *mpi_request,
- struct LeapioraidCfgRep_t *mpi_reply, int timeout,
- void *config_page, u16 config_page_sz)
-{
- u16 smid;
- struct LeapioraidCfgReq_t *config_request;
- int r;
- u8 retry_count, issue_host_reset = 0;
- struct config_request mem;
- u32 ioc_status = UINT_MAX;
- u8 issue_reset;
-
- mutex_lock(&ioc->config_cmds.mutex);
- if (ioc->config_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: config_cmd in use\n",
- ioc->name, __func__);
- mutex_unlock(&ioc->config_cmds.mutex);
- return -EAGAIN;
- }
- retry_count = 0;
- memset(&mem, 0, sizeof(struct config_request));
- mpi_request->VF_ID = 0;
- mpi_request->VP_ID = 0;
- if (config_page) {
- mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
- mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
- mpi_request->Header.PageType = mpi_reply->Header.PageType;
- mpi_request->Header.PageLength = mpi_reply->Header.PageLength;
- mpi_request->ExtPageLength = mpi_reply->ExtPageLength;
- mpi_request->ExtPageType = mpi_reply->ExtPageType;
- if (mpi_request->Header.PageLength)
- mem.sz = mpi_request->Header.PageLength * 4;
- else
- mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
- r = leapioraid_config_alloc_config_dma_memory(ioc, &mem);
- if (r != 0)
- goto out;
- if (mpi_request->Action ==
- LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
- mpi_request->Action ==
- LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
- ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
- LEAPIORAID_CONFIG_COMMON_WRITE_SGLFLAGS
- | mem.sz, mem.page_dma);
- memcpy(mem.page, config_page,
- min_t(u16, mem.sz, config_page_sz));
- } else {
- memset(config_page, 0, config_page_sz);
- ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
- LEAPIORAID_CONFIG_COMMON_SGLFLAGS
- | mem.sz, mem.page_dma);
- memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz));
- }
- }
-retry_config:
- if (retry_count) {
- if (retry_count > 2) {
- r = -EFAULT;
- goto free_mem;
- }
- pr_info("%s %s: attempting retry (%d)\n",
- ioc->name, __func__, retry_count);
- }
- r = leapioraid_wait_for_ioc_to_operational(ioc,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT);
- if (r) {
- if (r == -ETIME)
- issue_host_reset = 1;
- goto free_mem;
- }
- smid = leapioraid_base_get_smid(ioc, ioc->config_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED;
- r = -EAGAIN;
- goto free_mem;
- }
- r = 0;
- memset(mpi_reply, 0, sizeof(struct LeapioraidCfgRep_t));
- memset(ioc->config_cmds.reply, 0, sizeof(struct LeapioraidCfgRep_t));
- ioc->config_cmds.status = LEAPIORAID_CMD_PENDING;
- config_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->config_cmds.smid = smid;
- memcpy(config_request, mpi_request, sizeof(struct LeapioraidCfgReq_t));
- if (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)
- leapioraid_config_display_some_debug(ioc, smid, "config_request", NULL);
- init_completion(&ioc->config_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->config_cmds.done, timeout * HZ);
- if (!(ioc->config_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG))
- leapioraid_config_display_some_debug(ioc, smid,
- "config_request no reply",
- NULL);
- leapioraid_check_cmd_timeout(ioc, ioc->config_cmds.status,
- mpi_request,
- sizeof(struct LeapioraidCfgReq_t) / 4,
- issue_reset);
- pr_info("%s issue_reset=%d\n", __func__, issue_reset);
- retry_count++;
- if (ioc->config_cmds.smid == smid)
- leapioraid_base_free_smid(ioc, smid);
- if (ioc->config_cmds.status & LEAPIORAID_CMD_RESET)
- goto retry_config;
- if (ioc->shost_recovery || ioc->pci_error_recovery) {
- issue_host_reset = 0;
- r = -EFAULT;
- } else
- issue_host_reset = 1;
- goto free_mem;
- }
- if (ioc->config_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- memcpy(mpi_reply, ioc->config_cmds.reply,
- sizeof(struct LeapioraidCfgRep_t));
- if ((mpi_request->Header.PageType & 0xF) !=
- (mpi_reply->Header.PageType & 0xF)) {
- if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG))
- leapioraid_config_display_some_debug(ioc, smid,
- "config_request",
- NULL);
- leapioraid_debug_dump_mf(mpi_request, ioc->request_sz / 4);
- leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4);
- panic(
- "%s %s: Firmware BUG: mpi_reply mismatch:\n\t\t"
- "Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
- ioc->name,
- __func__,
- (mpi_request->Header.PageType & 0xF),
- (mpi_reply->Header.PageType & 0xF));
- }
- if (((mpi_request->Header.PageType & 0xF) ==
- LEAPIORAID_CONFIG_PAGETYPE_EXTENDED) &&
- mpi_request->ExtPageType != mpi_reply->ExtPageType) {
- if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG))
- leapioraid_config_display_some_debug(ioc, smid,
- "config_request",
- NULL);
- leapioraid_debug_dump_mf(mpi_request, ioc->request_sz / 4);
- leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4);
- panic(
- "%s %s: Firmware BUG: mpi_reply mismatch:\n\t\t"
- "Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
- ioc->name,
- __func__,
- mpi_request->ExtPageType,
- mpi_reply->ExtPageType);
- }
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
- & LEAPIORAID_IOCSTATUS_MASK;
- }
- if (retry_count)
- pr_info("%s %s: retry (%d) completed!!\n",
- ioc->name, __func__, retry_count);
- if ((ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) &&
- config_page && mpi_request->Action ==
- LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT) {
- u8 *p = (u8 *) mem.page;
-
- if (p) {
- if ((mpi_request->Header.PageType & 0xF) !=
- (p[3] & 0xF)) {
- if (!
- (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG))
- leapioraid_config_display_some_debug(ioc, smid,
- "config_request",
- NULL);
- leapioraid_debug_dump_mf(mpi_request,
- ioc->request_sz / 4);
- leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4);
- leapioraid_debug_dump_config(p, min_t(u16, mem.sz,
- config_page_sz) /
- 4);
- panic(
- "%s %s: Firmware BUG: config page mismatch:\n\t\t"
- "Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
- ioc->name,
- __func__,
- (mpi_request->Header.PageType & 0xF),
- (p[3] & 0xF));
- }
- if (((mpi_request->Header.PageType & 0xF) ==
- LEAPIORAID_CONFIG_PAGETYPE_EXTENDED) &&
- (mpi_request->ExtPageType != p[6])) {
- if (!
- (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG))
- leapioraid_config_display_some_debug(ioc, smid,
- "config_request",
- NULL);
- leapioraid_debug_dump_mf(mpi_request,
- ioc->request_sz / 4);
- leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4);
- leapioraid_debug_dump_config(p, min_t(u16, mem.sz,
- config_page_sz) /
- 4);
- panic(
- "%s %s: Firmware BUG: config page mismatch:\n\t\t"
- "Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
- ioc->name,
- __func__,
- mpi_request->ExtPageType,
- p[6]);
- }
- }
- memcpy(config_page, mem.page, min_t(u16, mem.sz,
- config_page_sz));
- }
-free_mem:
- if (config_page)
- leapioraid_config_free_config_dma_memory(ioc, &mem);
-out:
- ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_unlock(&ioc->config_cmds.mutex);
- if (issue_host_reset) {
- if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) {
- leapioraid_base_hard_reset_handler(ioc,
- FORCE_BIG_HAMMER);
- r = -EFAULT;
- } else {
- if (leapioraid_base_check_for_fault_and_issue_reset
- (ioc))
- return -EFAULT;
- r = -EAGAIN;
- }
- }
- return r;
-}
-
-int
-leapioraid_config_get_manufacturing_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidManP0_t *
- config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x00;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_manufacturing_pg10(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidManuP10_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING;
- mpi_request.Header.PageNumber = 10;
- mpi_request.Header.PageVersion = 0x00;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidManuP11_t
- *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING;
- mpi_request.Header.PageNumber = 11;
- mpi_request.Header.PageVersion = 0x00;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_set_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidManuP11_t
- *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING;
- mpi_request.Header.PageNumber = 11;
- mpi_request.Header.PageVersion = 0x00;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_bios_pg2(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidBiosP2_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_BIOS;
- mpi_request.Header.PageNumber = 2;
- mpi_request.Header.PageVersion = 0x04;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_bios_pg3(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidBiosP3_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_BIOS;
- mpi_request.Header.PageNumber = 3;
- mpi_request.Header.PageVersion = 0x01;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOUnitP0_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x02;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOUnitP1_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT;
- mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = 0x04;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_set_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOUnitP1_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT;
- mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = 0x04;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_iounit_pg8(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOUnitP8_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT;
- mpi_request.Header.PageNumber = 8;
- mpi_request.Header.PageVersion = 0x00;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOCP1_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC;
- mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = 0x00;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_set_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOCP1_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC;
- mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = 0x00;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_ioc_pg8(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOCP8_t *config_page)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC;
- mpi_request.Header.PageNumber = 8;
- mpi_request.Header.PageVersion = 0x00;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_sas_device_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasDevP0_t *config_page,
- u32 form, u32 handle)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE;
- mpi_request.Header.PageVersion = 0x09;
- mpi_request.Header.PageNumber = 0;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress = cpu_to_le32(form | handle);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_number_hba_phys(struct LEAPIORAID_ADAPTER *ioc,
- u8 *num_phys)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
- u16 ioc_status;
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasIOUnitP0_t config_page;
-
- *num_phys = 0;
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x05;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
- sizeof(struct LeapioraidSasIOUnitP0_t));
- if (!r) {
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS)
- *num_phys = config_page.NumPhys;
- }
-out:
- return r;
-}
-
-int
-leapioraid_config_get_sas_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasIOUnitP0_t *config_page,
- u16 sz)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x05;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sz);
-out:
- return r;
-}
-
-int
-leapioraid_config_get_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasIOUnitP1_t *config_page,
- u16 sz)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
- mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = 0x09;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sz);
-out:
- return r;
-}
-
-int
-leapioraid_config_set_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasIOUnitP1_t *config_page,
- u16 sz)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
- mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = 0x09;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT;
- leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sz);
-out:
- return r;
-}
-
-int
-leapioraid_config_get_expander_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidExpanderP0_t *config_page,
- u32 form, u32 handle)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x06;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress = cpu_to_le32(form | handle);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_expander_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidExpanderP1_t *config_page,
- u32 phy_number, u16 handle)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
- mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = 0x02;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress =
- cpu_to_le32(LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
- (phy_number << LEAPIORAID_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
- handle);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_enclosure_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasEncP0_t *config_page,
- u32 form, u32 handle)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x04;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress = cpu_to_le32(form | handle);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_phy_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasPhyP0_t *config_page,
- u32 phy_number)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x03;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress =
- cpu_to_le32(LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_phy_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasPhyP1_t *config_page,
- u32 phy_number)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY;
- mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = 0x01;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress =
- cpu_to_le32(LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_raid_volume_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidRaidVolP1_t *config_page,
- u32 form, u32 handle)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME;
- mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = 0x03;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress = cpu_to_le32(form | handle);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_number_pds(struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, u8 *num_pds)
-{
- struct LeapioraidCfgReq_t mpi_request;
- struct LeapioraidRaidVolP0_t config_page;
- struct LeapioraidCfgRep_t mpi_reply;
- int r;
- u16 ioc_status;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- *num_pds = 0;
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x0A;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress =
- cpu_to_le32(LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE | handle);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
- sizeof(struct LeapioraidRaidVolP0_t));
- if (!r) {
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS)
- *num_pds = config_page.NumPhysDisks;
- }
-out:
- return r;
-}
-
-int
-leapioraid_config_get_raid_volume_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidRaidVolP0_t *config_page,
- u32 form, u32 handle, u16 sz)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x0A;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress = cpu_to_le32(form | handle);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sz);
-out:
- return r;
-}
-
-int
-leapioraid_config_get_phys_disk_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidRaidPDP0_t *config_page,
- u32 form, u32 form_specific)
-{
- struct LeapioraidCfgReq_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK;
- mpi_request.Header.PageNumber = 0;
- mpi_request.Header.PageVersion = 0x05;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.PageAddress = cpu_to_le32(form | form_specific);
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = leapioraid_config_request(ioc, &mpi_request, mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
-out:
- return r;
-}
-
-int
-leapioraid_config_get_volume_handle(struct LEAPIORAID_ADAPTER *ioc,
- u16 pd_handle, u16 *volume_handle)
-{
- struct LeapioraidRaidCfgP0_t *config_page = NULL;
- struct LeapioraidCfgReq_t mpi_request;
- struct LeapioraidCfgRep_t mpi_reply;
- int r, i, config_page_sz;
- u16 ioc_status;
- int config_num;
- u16 element_type;
- u16 phys_disk_dev_handle;
-
- *volume_handle = 0;
- memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_CONFIG;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG;
- mpi_request.Header.PageVersion = 0x00;
- mpi_request.Header.PageNumber = 0;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
- mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT;
- config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
- config_page = kmalloc(config_page_sz, GFP_KERNEL);
- if (!config_page) {
- r = -1;
- goto out;
- }
- config_num = 0xff;
- while (1) {
- mpi_request.PageAddress = cpu_to_le32(config_num +
- LEAPIORAID_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM);
- r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply,
- LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT,
- config_page, config_page_sz);
- if (r)
- goto out;
- r = -1;
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE) {
- *volume_handle = 0;
- r = 0;
- goto out;
- } else if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS)
- goto out;
- for (i = 0; i < config_page->NumElements; i++) {
- element_type =
- le16_to_cpu(config_page->ConfigElement[i].ElementFlags) &
- LEAPIORAID_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
- if (element_type ==
- LEAPIORAID_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT
- || element_type ==
- LEAPIORAID_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) {
- phys_disk_dev_handle =
- le16_to_cpu(config_page->ConfigElement[i].PhysDiskDevHandle);
- if (phys_disk_dev_handle == pd_handle) {
- *volume_handle =
- le16_to_cpu
- (config_page->ConfigElement[i].VolDevHandle);
- r = 0;
- goto out;
- }
- } else if (element_type ==
- LEAPIORAID_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) {
- *volume_handle = 0;
- r = 0;
- goto out;
- }
- }
- config_num = config_page->ConfigNum;
- }
-out:
- kfree(config_page);
- return r;
-}
-
-int
-leapioraid_config_get_volume_wwid(struct LEAPIORAID_ADAPTER *ioc,
- u16 volume_handle, u64 *wwid)
-{
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidRaidVolP1_t raid_vol_pg1;
-
- *wwid = 0;
- if (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply,
- &raid_vol_pg1,
- LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE,
- volume_handle))) {
- *wwid = le64_to_cpu(raid_vol_pg1.WWID);
- return 0;
- } else
- return -1;
-}
diff --git a/drivers/scsi/leapioraid/leapioraid_func.h b/drivers/scsi/leapioraid/leapioraid_func.h
deleted file mode 100644
index a4beb1412d66..000000000000
--- a/drivers/scsi/leapioraid/leapioraid_func.h
+++ /dev/null
@@ -1,1262 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is the Fusion MPT base driver providing common API layer interface
- * for access to MPT (Message Passing Technology) firmware.
- *
- * Copyright (C) 2013-2021 LSI Corporation
- * Copyright (C) 2013-2021 Avago Technologies
- * Copyright (C) 2013-2021 Broadcom Inc.
- * (mailto:MPT-FusionLinux.pdl@broadcom.com)
- *
- * Copyright (C) 2024 LeapIO Tech Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
- */
-
-#ifndef LEAPIORAID_FUNC_H_INCLUDED
-#define LEAPIORAID_FUNC_H_INCLUDED
-
-#include "leapioraid.h"
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_transport_sas.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/irq_poll.h>
-
-#ifndef fallthrough
-#define fallthrough
-#endif
-
-#define SYS_LOG_BUF_SIZE (0x200000) //2M
-#define SYS_LOG_BUF_RESERVE (0x1000) //256
-
-#define MAX_UPD_PAYLOAD_SZ (0x4000)
-
-#define LEAPIORAID_DRIVER_NAME "LeapIoRaid"
-#define LEAPIORAID_AUTHOR "LeapIO Inc."
-#define LEAPIORAID_DESCRIPTION "LEAPIO RAID Driver"
-#define LEAPIORAID_DRIVER_VERSION "1.02.02.00"
-#define LEAPIORAID_MAJOR_VERSION (1)
-#define LEAPIORAID_MINOR_VERSION (02)
-#define LEAPIORAID_BUILD_VERSION (02)
-#define LEAPIORAID_RELEASE_VERSION (00)
-
-#define LEAPIORAID_VENDOR_ID (0xD405)
-#define LEAPIORAID_DEVICE_ID_1 (0x1000)
-#define LEAPIORAID_DEVICE_ID_2 (0x1001)
-#define LEAPIORAID_HBA (0x8200)
-#define LEAPIORAID_RAID (0x8201)
-
-#define LEAPIORAID_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE
-
-#define LEAPIORAID_MIN_PHYS_SEGMENTS (16)
-#define LEAPIORAID_KDUMP_MIN_PHYS_SEGMENTS (32)
-
-#define LEAPIORAID_MAX_SG_SEGMENTS SG_MAX_SEGMENTS
-#define LEAPIORAID_MAX_PHYS_SEGMENTS_STRING "SG_CHUNK_SIZE"
-
-#define LEAPIORAID_SG_DEPTH LEAPIORAID_MAX_PHYS_SEGMENTS
-
-
-#define LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT 15
-#define LEAPIORAID_CONFIG_COMMON_SGLFLAGS ((LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | \
- LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | LEAPIORAID_SGE_FLAGS_END_OF_BUFFER \
- | LEAPIORAID_SGE_FLAGS_END_OF_LIST) << LEAPIORAID_SGE_FLAGS_SHIFT)
-#define LEAPIORAID_CONFIG_COMMON_WRITE_SGLFLAGS ((LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | \
- LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | LEAPIORAID_SGE_FLAGS_END_OF_BUFFER \
- | LEAPIORAID_SGE_FLAGS_END_OF_LIST | LEAPIORAID_SGE_FLAGS_HOST_TO_IOC) \
- << LEAPIORAID_SGE_FLAGS_SHIFT)
-
-#define LEAPIORAID_SATA_QUEUE_DEPTH (32)
-#define LEAPIORAID_SAS_QUEUE_DEPTH (64)
-#define LEAPIORAID_RAID_QUEUE_DEPTH (64)
-#define LEAPIORAID_KDUMP_SCSI_IO_DEPTH (64)
-#define LEAPIORAID_RAID_MAX_SECTORS (128)
-
-#define LEAPIORAID_NAME_LENGTH (48)
-#define LEAPIORAID_DRIVER_NAME_LENGTH (24)
-#define LEAPIORAID_STRING_LENGTH (64)
-
-#define LEAPIORAID_FRAME_START_OFFSET (256)
-#define LEAPIORAID_REPLY_FREE_POOL_SIZE (512)
-#define LEAPIORAID_MAX_CALLBACKS (32)
-#define LEAPIORAID_MAX_HBA_NUM_PHYS (16)
-
-#define LEAPIORAID_INTERNAL_CMDS_COUNT (10)
-#define LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT (3)
-#define LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL (1)
-#define LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY (2)
-
-#define LEAPIORAID_INVALID_DEVICE_HANDLE (0xFFFF)
-#define LEAPIORAID_MAX_CHAIN_ELEMT_SZ (16)
-#define LEAPIORAID_DEFAULT_NUM_FWCHAIN_ELEMTS (8)
-#define LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY (30)
-#define LEAPIORAID_READL_RETRY_COUNT_OF_THREE (3)
-
-#define LEAPIORAID_IOC_PRE_RESET_PHASE (1)
-#define LEAPIORAID_IOC_AFTER_RESET_PHASE (2)
-#define LEAPIORAID_IOC_DONE_RESET_PHASE (3)
-
-#define LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT (0x01)
-#define LEAPIORAID_TARGET_FLAGS_VOLUME (0x02)
-#define LEAPIORAID_TARGET_FASTPATH_IO (0x08)
-
-#define LEAPIORAID_DEVICE_HIGH_IOPS_DEPTH (8)
-#define LEAPIORAID_HIGH_IOPS_REPLY_QUEUES (8)
-#define LEAPIORAID_HIGH_IOPS_BATCH_COUNT (16)
-#define LEAPIORAID_GEN35_MAX_MSIX_QUEUES (128)
-#define LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK (16)
-
-#define LEAPIORAID_IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED (0x2810)
-
-#ifndef DID_TRANSPORT_DISRUPTED
-#define DID_TRANSPORT_DISRUPTED DID_BUS_BUSY
-#endif
-#ifndef ULLONG_MAX
-#define ULLONG_MAX (~0ULL)
-#endif
-#ifndef USHORT_MAX
-#define USHORT_MAX ((u16)(~0U))
-#endif
-#ifndef UINT_MAX
-#define UINT_MAX (~0U)
-#endif
-
-static inline void *leapioraid_shost_private(struct Scsi_Host *shost)
-{
- return (void *)shost->hostdata;
-}
-
-struct LeapioraidManuP10_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- U8 OEMIdentifier;
- U8 Reserved1;
- U16 Reserved2;
- U32 Reserved3;
- U32 GenericFlags0;
- U32 GenericFlags1;
- U32 Reserved4;
- U32 OEMSpecificFlags0;
- U32 OEMSpecificFlags1;
- U32 Reserved5[18];
-};
-
-struct LeapioraidManuP11_t {
- struct LEAPIORAID_CONFIG_PAGE_HEADER Header;
- __le32 Reserved1;
- u8 Reserved2;
- u8 EEDPTagMode;
- u8 Reserved3;
- u8 Reserved4;
- __le32 Reserved5[8];
- u16 AddlFlags2;
- u8 AddlFlags3;
- u8 Reserved6;
- __le32 Reserved7[7];
- u8 AbortTO;
- u8 NumPerDevEvents;
- u8 HostTraceBufferDecrementSizeKB;
- u8 HostTraceBufferFlags;
- u16 HostTraceBufferMaxSizeKB;
- u16 HostTraceBufferMinSizeKB;
- u8 CoreDumpTOSec;
- u8 TimeSyncInterval;
- u16 Reserved9;
- __le32 Reserved10;
-};
-
-struct LEAPIORAID_TARGET {
- struct scsi_target *starget;
- u64 sas_address;
- struct leapioraid_raid_device *raid_device;
- u16 handle;
- int num_luns;
- u32 flags;
- u8 deleted;
- u8 tm_busy;
- struct leapioraid_hba_port *port;
- struct leapioraid_sas_device *sas_dev;
-};
-
-#define LEAPIORAID_DEVICE_FLAGS_INIT (0x01)
-#define LEAPIORAID_DEVICE_TLR_ON (0x02)
-
-struct LEAPIORAID_DEVICE {
- struct LEAPIORAID_TARGET *sas_target;
- unsigned int lun;
- u32 flags;
- u8 configured_lun;
- u8 block;
- u8 deleted;
- u8 tlr_snoop_check;
- u8 ignore_delay_remove;
- u8 ncq_prio_enable;
- unsigned long ata_command_pending;
-};
-
-#define LEAPIORAID_CMND_PENDING_BIT (0)
-#define LEAPIORAID_CMD_NOT_USED (0x8000)
-#define LEAPIORAID_CMD_COMPLETE (0x0001)
-#define LEAPIORAID_CMD_PENDING (0x0002)
-#define LEAPIORAID_CMD_REPLY_VALID (0x0004)
-#define LEAPIORAID_CMD_RESET (0x0008)
-#define LEAPIORAID_CMD_COMPLETE_ASYNC (0x0010)
-
-struct leapioraid_internal_cmd {
- struct mutex mutex;
- struct completion done;
- void *reply;
- void *sense;
- u16 status;
- u16 smid;
-};
-
-struct leapioraid_scsi_io_transfer {
- u16 handle;
- u8 is_raid;
- enum dma_data_direction dir;
- u32 data_length;
- dma_addr_t data_dma;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
- u32 lun;
- u8 cdb_length;
- u8 cdb[32];
- u8 timeout;
- u8 VF_ID;
- u8 VP_ID;
- u8 valid_reply;
- u32 sense_length;
- u16 ioc_status;
- u8 scsi_state;
- u8 scsi_status;
- u32 log_info;
- u32 transfer_length;
-};
-
-struct leapioraid_internal_qcmd {
- struct list_head list;
- void *request;
- void *reply;
- void *sense;
- u16 status;
- u16 smid;
- struct leapioraid_scsi_io_transfer *transfer_packet;
-};
-
-#define LEAPIORAID_WIDE_PORT_API (1)
-#define LEAPIORAID_WIDE_PORT_API_PLUS (1)
-
-struct leapioraid_sas_device {
- struct list_head list;
- struct scsi_target *starget;
- u64 sas_address;
- u64 device_name;
- u16 handle;
- u64 sas_address_parent;
- u16 enclosure_handle;
- u64 enclosure_logical_id;
- u16 volume_handle;
- u64 volume_wwid;
- u32 device_info;
- int id;
- int channel;
- u16 slot;
- u8 phy;
- u8 responding;
- u8 fast_path;
- u8 pfa_led_on;
- struct kref refcount;
- u8 *serial_number;
- u8 pend_sas_rphy_add;
- u8 enclosure_level;
- u8 chassis_slot;
- u8 is_chassis_slot_valid;
- u8 connector_name[5];
- u8 ssd_device;
- u8 supports_sata_smart;
- u8 port_type;
- struct leapioraid_hba_port *port;
- struct sas_rphy *rphy;
-};
-
-static inline
-void leapioraid_sas_device_get(struct leapioraid_sas_device *s)
-{
- kref_get(&s->refcount);
-}
-
-static inline
-void leapioraid_sas_device_free(struct kref *r)
-{
- kfree(container_of(r, struct leapioraid_sas_device, refcount));
-}
-
-static inline
-void leapioraid_sas_device_put(struct leapioraid_sas_device *s)
-{
- kref_put(&s->refcount, leapioraid_sas_device_free);
-}
-
-struct leapioraid_raid_device {
- struct list_head list;
- struct scsi_target *starget;
- struct scsi_device *sdev;
- u64 wwid;
- u16 handle;
- u16 block_sz;
- int id;
- int channel;
- u8 volume_type;
- u8 num_pds;
- u8 responding;
- u8 percent_complete;
- u8 direct_io_enabled;
- u8 stripe_exponent;
- u8 block_exponent;
- u64 max_lba;
- u32 stripe_sz;
- u32 device_info;
- u16 pd_handle[8];
-};
-
-struct leapioraid_boot_device {
- int channel;
- void *device;
-};
-
-struct leapioraid_sas_port {
- struct list_head port_list;
- u8 num_phys;
- struct leapioraid_hba_port *hba_port;
- struct sas_identify remote_identify;
- struct sas_rphy *rphy;
-#if defined(LEAPIORAID_WIDE_PORT_API)
- struct sas_port *port;
-#endif
- struct list_head phy_list;
-};
-
-struct leapioraid_sas_phy {
- struct list_head port_siblings;
- struct sas_identify identify;
- struct sas_identify remote_identify;
- struct sas_phy *phy;
- u8 phy_id;
- u16 handle;
- u16 attached_handle;
- u8 phy_belongs_to_port;
- u8 hba_vphy;
- struct leapioraid_hba_port *port;
-};
-
-struct leapioraid_raid_sas_node {
- struct list_head list;
- struct device *parent_dev;
- u8 num_phys;
- u64 sas_address;
- u16 handle;
- u64 sas_address_parent;
- u16 enclosure_handle;
- u64 enclosure_logical_id;
- u8 responding;
- u8 nr_phys_allocated;
- struct leapioraid_hba_port *port;
- struct leapioraid_sas_phy *phy;
- struct list_head sas_port_list;
- struct sas_rphy *rphy;
-};
-
-struct leapioraid_enclosure_node {
- struct list_head list;
- struct LeapioraidSasEncP0_t pg0;
-};
-
-enum reset_type {
- FORCE_BIG_HAMMER,
- SOFT_RESET,
-};
-
-struct leapioraid_chain_tracker {
- void *chain_buffer;
- dma_addr_t chain_buffer_dma;
-};
-
-struct leapioraid_chain_lookup {
- struct leapioraid_chain_tracker *chains_per_smid;
- atomic_t chain_offset;
-};
-
-struct leapioraid_scsiio_tracker {
- u16 smid;
- struct scsi_cmnd *scmd;
- u8 cb_idx;
- u8 direct_io;
- struct list_head chain_list;
- u16 msix_io;
-};
-
-struct leapioraid_request_tracker {
- u16 smid;
- u8 cb_idx;
- struct list_head tracker_list;
-};
-
-struct leapioraid_tr_list {
- struct list_head list;
- u16 handle;
- u16 state;
-};
-
-struct leapioraid_sc_list {
- struct list_head list;
- u16 handle;
-};
-
-struct leapioraid_event_ack_list {
- struct list_head list;
- U16 Event;
- U32 EventContext;
-};
-
-struct leapioraid_adapter_reply_queue {
- struct LEAPIORAID_ADAPTER *ioc;
- u8 msix_index;
- u32 reply_post_host_index;
- union LeapioraidRepDescUnion_t *reply_post_free;
- char name[LEAPIORAID_NAME_LENGTH];
- atomic_t busy;
- cpumask_var_t affinity_hint;
- u32 os_irq;
- struct irq_poll irqpoll;
- bool irq_poll_scheduled;
- bool irq_line_enable;
- bool is_blk_mq_poll_q;
- struct list_head list;
-};
-
-struct leapioraid_blk_mq_poll_queue {
- atomic_t busy;
- atomic_t pause;
- struct leapioraid_adapter_reply_queue *reply_q;
-};
-
-union leapioraid_version_union {
- struct LEAPIORAID_VERSION_STRUCT Struct;
- u32 Word;
-};
-
-typedef void (*LEAPIORAID_ADD_SGE)(void *paddr, u32 flags_length,
- dma_addr_t dma_addr);
-typedef int (*LEAPIORAID_BUILD_SG_SCMD)(struct LEAPIORAID_ADAPTER *ioc,
- struct scsi_cmnd *scmd, u16 smid);
-typedef void (*LEAPIORAID_BUILD_SG)(struct LEAPIORAID_ADAPTER *ioc, void *psge,
- dma_addr_t data_out_dma, size_t data_out_sz,
- dma_addr_t data_in_dma, size_t data_in_sz);
-typedef void (*LEAPIORAID_BUILD_ZERO_LEN_SGE)(struct LEAPIORAID_ADAPTER *ioc,
- void *paddr);
-typedef void (*PUT_SMID_IO_FP_HIP_TA)(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u16 funcdep);
-typedef void (*PUT_SMID_DEFAULT)(struct LEAPIORAID_ADAPTER *ioc, u16 smid);
-typedef u32(*BASE_READ_REG) (const void __iomem *addr,
- u8 retry_count);
-typedef u8(*GET_MSIX_INDEX) (struct LEAPIORAID_ADAPTER *ioc,
- struct scsi_cmnd *scmd);
-
-struct leapioraid_facts {
- u16 MsgVersion;
- u16 HeaderVersion;
- u8 IOCNumber;
- u8 VP_ID;
- u8 VF_ID;
- u16 IOCExceptions;
- u16 IOCStatus;
- u32 IOCLogInfo;
- u8 MaxChainDepth;
- u8 WhoInit;
- u8 NumberOfPorts;
- u8 MaxMSIxVectors;
- u16 RequestCredit;
- u16 ProductID;
- u32 IOCCapabilities;
- union leapioraid_version_union FWVersion;
- u16 IOCRequestFrameSize;
- u16 IOCMaxChainSegmentSize;
- u16 MaxInitiators;
- u16 MaxTargets;
- u16 MaxSasExpanders;
- u16 MaxEnclosures;
- u16 ProtocolFlags;
- u16 HighPriorityCredit;
- u16 MaxReplyDescriptorPostQueueDepth;
- u8 ReplyFrameSize;
- u8 MaxVolumes;
- u16 MaxDevHandle;
- u16 MaxPersistentEntries;
- u16 MinDevHandle;
- u8 CurrentHostPageSize;
-};
-
-struct leapioraid_port_facts {
- u8 PortNumber;
- u8 VP_ID;
- u8 VF_ID;
- u8 PortType;
- u16 MaxPostedCmdBuffers;
-};
-
-struct leapioraid_reply_post_struct {
- union LeapioraidRepDescUnion_t *reply_post_free;
- dma_addr_t reply_post_free_dma;
-};
-
-struct leapioraid_virtual_phy {
- struct list_head list;
- u64 sas_address;
- u32 phy_mask;
- u8 flags;
-};
-
-#define LEAPIORAID_VPHY_FLAG_DIRTY_PHY (0x01)
-struct leapioraid_hba_port {
- struct list_head list;
- u64 sas_address;
- u32 phy_mask;
- u8 port_id;
- u8 flags;
- u32 vphys_mask;
- struct list_head vphys_list;
-};
-
-#define LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT (0x01)
-#define LEAPIORAID_HBA_PORT_FLAG_NEW_PORT (0x02)
-#define LEAPIORAID_MULTIPATH_DISABLED_PORT_ID (0xFF)
-
-typedef void (*LEAPIORAID_FLUSH_RUNNING_CMDS)(struct LEAPIORAID_ADAPTER *
- ioc);
-
-struct LEAPIORAID_ADAPTER {
- struct list_head list;
- struct Scsi_Host *shost;
- u8 id;
- u8 IOCNumber;
- int cpu_count;
- char name[LEAPIORAID_NAME_LENGTH];
- char driver_name[LEAPIORAID_DRIVER_NAME_LENGTH];
- char tmp_string[LEAPIORAID_STRING_LENGTH];
- struct pci_dev *pdev;
- struct LeapioraidSysInterfaceRegs_t __iomem *chip;
- phys_addr_t chip_phys;
- int logging_level;
- int fwfault_debug;
- u8 ir_firmware;
- int bars;
- u8 mask_interrupts;
- struct mutex pci_access_mutex;
- char fault_reset_work_q_name[48];
- char hba_hot_unplug_work_q_name[48];
- struct workqueue_struct *fault_reset_work_q;
- struct workqueue_struct *hba_hot_unplug_work_q;
- struct delayed_work fault_reset_work;
- struct delayed_work hba_hot_unplug_work;
- struct workqueue_struct *smart_poll_work_q;
- struct delayed_work smart_poll_work;
- u8 adapter_over_temp;
- char firmware_event_name[48];
- struct workqueue_struct *firmware_event_thread;
- spinlock_t fw_event_lock;
- struct list_head fw_event_list;
- struct leapioraid_fw_event_work *current_event;
- u8 fw_events_cleanup;
- int aen_event_read_flag;
- u8 broadcast_aen_busy;
- u16 broadcast_aen_pending;
- u8 shost_recovery;
- u8 got_task_abort_from_ioctl;
- u8 got_task_abort_from_sysfs;
- struct mutex reset_in_progress_mutex;
- struct mutex hostdiag_unlock_mutex;
- spinlock_t ioc_reset_in_progress_lock;
- spinlock_t hba_hot_unplug_lock;
- u8 ioc_link_reset_in_progress;
- int ioc_reset_status;
- u8 ignore_loginfos;
- u8 remove_host;
- u8 pci_error_recovery;
- u8 wait_for_discovery_to_complete;
- u8 is_driver_loading;
- u8 port_enable_failed;
- u8 start_scan;
- u16 start_scan_failed;
- u8 msix_enable;
- u8 *cpu_msix_table;
- resource_size_t **reply_post_host_index;
- u16 cpu_msix_table_sz;
- u32 ioc_reset_count;
- LEAPIORAID_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
- u32 non_operational_loop;
- u8 ioc_coredump_loop;
- u32 timestamp_update_count;
- u32 time_sync_interval;
- u8 multipath_on_hba;
- atomic64_t total_io_cnt;
- atomic64_t high_iops_outstanding;
- bool msix_load_balance;
- u16 thresh_hold;
- u8 high_iops_queues;
- u8 iopoll_q_start_index;
- u32 drv_internal_flags;
- u32 drv_support_bitmap;
- u32 dma_mask;
- bool enable_sdev_max_qd;
- bool use_32bit_dma;
- struct leapioraid_blk_mq_poll_queue *blk_mq_poll_queues;
- u8 scsi_io_cb_idx;
- u8 tm_cb_idx;
- u8 transport_cb_idx;
- u8 scsih_cb_idx;
- u8 ctl_cb_idx;
- u8 ctl_tm_cb_idx;
- u8 base_cb_idx;
- u8 port_enable_cb_idx;
- u8 config_cb_idx;
- u8 tm_tr_cb_idx;
- u8 tm_tr_volume_cb_idx;
- u8 tm_tr_internal_cb_idx;
- u8 tm_sas_control_cb_idx;
- struct leapioraid_internal_cmd base_cmds;
- struct leapioraid_internal_cmd port_enable_cmds;
- struct leapioraid_internal_cmd transport_cmds;
- struct leapioraid_internal_cmd scsih_cmds;
- struct leapioraid_internal_cmd tm_cmds;
- struct leapioraid_internal_cmd ctl_cmds;
- struct leapioraid_internal_cmd config_cmds;
- struct list_head scsih_q_intenal_cmds;
- spinlock_t scsih_q_internal_lock;
- LEAPIORAID_ADD_SGE base_add_sg_single;
- LEAPIORAID_BUILD_SG_SCMD build_sg_scmd;
- LEAPIORAID_BUILD_SG build_sg;
- LEAPIORAID_BUILD_ZERO_LEN_SGE build_zero_len_sge;
- u16 sge_size_ieee;
- LEAPIORAID_BUILD_SG build_sg_mpi;
- LEAPIORAID_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi;
- u32 event_type[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS];
- u32 event_context;
- void *event_log;
- u32 event_masks[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS];
- u8 disable_eedp_support;
- u8 tm_custom_handling;
- u16 max_shutdown_latency;
- u16 max_wideport_qd;
- u16 max_narrowport_qd;
- u8 max_sata_qd;
- struct leapioraid_facts facts;
- struct leapioraid_facts prev_fw_facts;
- struct leapioraid_port_facts *pfacts;
- struct LeapioraidManP0_t manu_pg0;
- struct LeapioraidManuP10_t manu_pg10;
- struct LeapioraidManuP11_t manu_pg11;
- struct LeapioraidBiosP2_t bios_pg2;
- struct LeapioraidBiosP3_t bios_pg3;
- struct LeapioraidIOCP8_t ioc_pg8;
- struct LeapioraidIOUnitP0_t iounit_pg0;
- struct LeapioraidIOUnitP1_t iounit_pg1;
- struct LeapioraidIOUnitP8_t iounit_pg8;
- struct LeapioraidIOCP1_t ioc_pg1_copy;
- struct leapioraid_boot_device req_boot_device;
- struct leapioraid_boot_device req_alt_boot_device;
- struct leapioraid_boot_device current_boot_device;
- struct leapioraid_raid_sas_node sas_hba;
- struct list_head sas_expander_list;
- struct list_head enclosure_list;
- spinlock_t sas_node_lock;
- struct list_head sas_device_list;
- struct list_head sas_device_init_list;
- spinlock_t sas_device_lock;
- struct list_head pcie_device_list;
- struct list_head pcie_device_init_list;
- spinlock_t pcie_device_lock;
- struct list_head raid_device_list;
- spinlock_t raid_device_lock;
- u8 io_missing_delay;
- u16 device_missing_delay;
- int sas_id;
- int pcie_target_id;
- void *blocking_handles;
- void *pd_handles;
- u16 pd_handles_sz;
- void *pend_os_device_add;
- u16 pend_os_device_add_sz;
- u16 config_page_sz;
- void *config_page;
- dma_addr_t config_page_dma;
- void *config_vaddr;
- u16 hba_queue_depth;
- u16 sge_size;
- u16 scsiio_depth;
- u16 request_sz;
- u8 *request;
- dma_addr_t request_dma;
- u32 request_dma_sz;
- spinlock_t scsi_lookup_lock;
- int pending_io_count;
- wait_queue_head_t reset_wq;
- int pending_tm_count;
- u32 terminated_tm_count;
- wait_queue_head_t pending_tm_wq;
- u8 out_of_frames;
- wait_queue_head_t no_frames_tm_wq;
- u16 *io_queue_num;
- u32 page_size;
- struct leapioraid_chain_lookup *chain_lookup;
- struct list_head free_chain_list;
- struct dma_pool *chain_dma_pool;
- u16 max_sges_in_main_message;
- u16 max_sges_in_chain_message;
- u16 chains_needed_per_io;
- u16 chain_segment_sz;
- u16 chains_per_prp_buffer;
- u16 hi_priority_smid;
- u8 *hi_priority;
- dma_addr_t hi_priority_dma;
- u16 hi_priority_depth;
- struct leapioraid_request_tracker *hpr_lookup;
- struct list_head hpr_free_list;
- u16 internal_smid;
- u8 *internal;
- dma_addr_t internal_dma;
- u16 internal_depth;
- struct leapioraid_request_tracker *internal_lookup;
- struct list_head internal_free_list;
- u8 *sense;
- dma_addr_t sense_dma;
- struct dma_pool *sense_dma_pool;
- u16 reply_sz;
- u8 *reply;
- dma_addr_t reply_dma;
- u32 reply_dma_max_address;
- u32 reply_dma_min_address;
- struct dma_pool *reply_dma_pool;
- u16 reply_free_queue_depth;
- __le32 *reply_free;
- dma_addr_t reply_free_dma;
- struct dma_pool *reply_free_dma_pool;
- u32 reply_free_host_index;
- u16 reply_post_queue_depth;
- struct leapioraid_reply_post_struct *reply_post;
- struct dma_pool *reply_post_free_dma_pool;
- struct dma_pool *reply_post_free_array_dma_pool;
- struct LeapioraidIOCInitRDPQArrayEntry *reply_post_free_array;
- dma_addr_t reply_post_free_array_dma;
- u8 reply_queue_count;
- struct list_head reply_queue_list;
- u8 rdpq_array_capable;
- u8 rdpq_array_enable;
- u8 rdpq_array_enable_assigned;
- u8 combined_reply_queue;
- u8 nc_reply_index_count;
- u8 smp_affinity_enable;
- resource_size_t **replyPostRegisterIndex;
- struct list_head delayed_tr_list;
- struct list_head delayed_tr_volume_list;
- struct list_head delayed_internal_tm_list;
- struct list_head delayed_sc_list;
- struct list_head delayed_event_ack_list;
- u32 ring_buffer_offset;
- u32 ring_buffer_sz;
- u8 reset_from_user;
- u8 hide_ir_msg;
- u8 warpdrive_msg;
- u8 mfg_pg10_hide_flag;
- u8 hide_drives;
- u8 atomic_desc_capable;
- BASE_READ_REG base_readl;
- PUT_SMID_IO_FP_HIP_TA put_smid_scsi_io;
- PUT_SMID_IO_FP_HIP_TA put_smid_fast_path;
- PUT_SMID_IO_FP_HIP_TA put_smid_hi_priority;
- PUT_SMID_DEFAULT put_smid_default;
- GET_MSIX_INDEX get_msix_index_for_smlio;
- void *device_remove_in_progress;
- u16 device_remove_in_progress_sz;
- u8 *tm_tr_retry;
- u32 tm_tr_retry_sz;
- u8 temp_sensors_count;
- struct list_head port_table_list;
- u8 *log_buffer;
- dma_addr_t log_buffer_dma;
- char pcie_log_work_q_name[48];
- struct workqueue_struct *pcie_log_work_q;
- struct delayed_work pcie_log_work;
- u32 open_pcie_trace;
-};
-
-#define LEAPIORAID_DEBUG (0x00000001)
-#define LEAPIORAID_DEBUG_MSG_FRAME (0x00000002)
-#define LEAPIORAID_DEBUG_SG (0x00000004)
-#define LEAPIORAID_DEBUG_EVENTS (0x00000008)
-#define LEAPIORAID_DEBUG_EVENT_WORK_TASK (0x00000010)
-#define LEAPIORAID_DEBUG_INIT (0x00000020)
-#define LEAPIORAID_DEBUG_EXIT (0x00000040)
-#define LEAPIORAID_DEBUG_FAIL (0x00000080)
-#define LEAPIORAID_DEBUG_TM (0x00000100)
-#define LEAPIORAID_DEBUG_REPLY (0x00000200)
-#define LEAPIORAID_DEBUG_HANDSHAKE (0x00000400)
-#define LEAPIORAID_DEBUG_CONFIG (0x00000800)
-#define LEAPIORAID_DEBUG_DL (0x00001000)
-#define LEAPIORAID_DEBUG_RESET (0x00002000)
-#define LEAPIORAID_DEBUG_SCSI (0x00004000)
-#define LEAPIORAID_DEBUG_IOCTL (0x00008000)
-#define LEAPIORAID_DEBUG_CSMISAS (0x00010000)
-#define LEAPIORAID_DEBUG_SAS (0x00020000)
-#define LEAPIORAID_DEBUG_TRANSPORT (0x00040000)
-#define LEAPIORAID_DEBUG_TASK_SET_FULL (0x00080000)
-
-#define LEAPIORAID_CHECK_LOGGING(IOC, CMD, BITS) \
-{ \
- if (IOC->logging_level & BITS) \
- CMD; \
-}
-
-#define dprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG)
-#define dsgprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SG)
-#define devtprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EVENTS)
-#define dewtprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EVENT_WORK_TASK)
-#define dinitprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_INIT)
-#define dexitprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EXIT)
-#define dfailprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_FAIL)
-#define dtmprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TM)
-#define dreplyprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_REPLY)
-#define dhsprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_HANDSHAKE)
-#define dcprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_CONFIG)
-#define ddlprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_DL)
-#define drsprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_RESET)
-#define dsprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SCSI)
-#define dctlprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_IOCTL)
-#define dcsmisasprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_CSMISAS)
-#define dsasprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SAS)
-#define dsastransport(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SAS_WIDE)
-#define dmfprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_MSG_FRAME)
-#define dtsfprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TASK_SET_FULL)
-#define dtransportprintk(IOC, CMD) \
- LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TRANSPORT)
-
-static inline void
-leapioraid_debug_dump_mf(void *mpi_request, int sz)
-{
- int i;
- __le32 *mfp = (__le32 *) mpi_request;
-
- pr_info("mf:\n\t");
- for (i = 0; i < sz; i++) {
- if (i && ((i % 8) == 0))
- pr_info("\n\t");
- pr_info("%08x ", le32_to_cpu(mfp[i]));
- }
- pr_info("\n");
-}
-
-static inline void
-leapioraid_debug_dump_reply(void *mpi_request, int sz)
-{
- int i;
- __le32 *mfp = (__le32 *) mpi_request;
-
- pr_info("reply:\n\t");
- for (i = 0; i < sz; i++) {
- if (i && ((i % 8) == 0))
- pr_info("\n\t");
- pr_info("%08x ", le32_to_cpu(mfp[i]));
- }
- pr_info("\n");
-}
-
-static inline void
-leapioraid_debug_dump_config(void *mpi_request, int sz)
-{
- int i;
- __le32 *mfp = (__le32 *) mpi_request;
-
- pr_info("config:\n\t");
- for (i = 0; i < sz; i++) {
- if (i && ((i % 8) == 0))
- pr_info("\n\t");
- pr_info("%08x ", le32_to_cpu(mfp[i]));
- }
- pr_info("\n");
-}
-
-#define LEAPIORAID_DRV_INTERNAL_BITMAP_BLK_MQ (0x00000001)
-#define LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED (0x00000002)
-
-typedef u8(*LEAPIORAID_CALLBACK) (struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply);
-
-#define SCSIH_MAP_QUEUE(shost) static void leapioraid_scsihost_map_queues(shost)
-
-extern struct list_head leapioraid_ioc_list;
-extern spinlock_t leapioraid_gioc_lock;
-void leapioraid_base_start_watchdog(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_stop_watchdog(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_start_log_watchdog(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_stop_log_watchdog(struct LEAPIORAID_ADAPTER *ioc);
-int leapioraid_base_trace_log_init(struct LEAPIORAID_ADAPTER *ioc);
-int leapioraid_base_attach(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_detach(struct LEAPIORAID_ADAPTER *ioc);
-int leapioraid_base_map_resources(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_free_resources(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_free_enclosure_list(struct LEAPIORAID_ADAPTER *ioc);
-int leapioraid_base_hard_reset_handler(struct LEAPIORAID_ADAPTER *ioc,
- enum reset_type type);
-void *leapioraid_base_get_msg_frame(struct LEAPIORAID_ADAPTER *ioc, u16 smid);
-void *leapioraid_base_get_sense_buffer(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid);
-__le32 leapioraid_base_get_sense_buffer_dma(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid);
-__le64 leapioraid_base_get_sense_buffer_dma_64(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid);
-void leapioraid_base_sync_reply_irqs(struct LEAPIORAID_ADAPTER *ioc, u8 poll);
-u16 leapioraid_base_get_smid_hpr(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx);
-u16 leapioraid_base_get_smid_scsiio(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx,
- struct scsi_cmnd *scmd);
-u16 leapioraid_base_get_smid(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx);
-void leapioraid_base_free_smid(struct LEAPIORAID_ADAPTER *ioc, u16 smid);
-void leapioraid_base_initialize_callback_handler(void);
-u8 leapioraid_base_register_callback_handler(LEAPIORAID_CALLBACK cb_func);
-void leapioraid_base_release_callback_handler(u8 cb_idx);
-u8 leapioraid_base_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index,
- u32 reply);
-u8 leapioraid_port_enable_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply);
-void *leapioraid_base_get_reply_virt_addr(struct LEAPIORAID_ADAPTER *ioc,
- u32 phys_addr);
-u32 leapioraid_base_get_iocstate(struct LEAPIORAID_ADAPTER *ioc, int cooked);
-int leapioraid_base_check_and_get_msix_vectors(struct pci_dev *pdev);
-void leapioraid_base_fault_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code);
-#define leapioraid_print_fault_code(ioc, fault_code) \
- do { \
- pr_err("%s fault info from func: %s\n", ioc->name, __func__); \
- leapioraid_base_fault_info(ioc, fault_code); \
- } while (0)
-void leapioraid_base_coredump_info(struct LEAPIORAID_ADAPTER *ioc,
- u16 fault_code);
-int leapioraid_base_wait_for_coredump_completion(struct LEAPIORAID_ADAPTER *ioc,
- const char *caller);
-int leapioraid_base_sas_iounit_control(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidSasIoUnitControlRep_t *
- mpi_reply,
- struct LeapioraidSasIoUnitControlReq_t *
- mpi_request);
-int leapioraid_base_scsi_enclosure_processor(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidSepRep_t *mpi_reply,
- struct LeapioraidSepReq_t *mpi_request);
-void leapioraid_base_validate_event_type(struct LEAPIORAID_ADAPTER *ioc,
- u32 *event_type);
-void leapioraid_halt_firmware(struct LEAPIORAID_ADAPTER *ioc, u8 set_fault);
-struct leapioraid_scsiio_tracker *leapioraid_get_st_from_smid(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid);
-void leapioraid_base_clear_st(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_scsiio_tracker *st);
-struct leapioraid_scsiio_tracker *leapioraid_base_scsi_cmd_priv(
- struct scsi_cmnd *scmd);
-int
-leapioraid_base_check_for_fault_and_issue_reset(struct LEAPIORAID_ADAPTER *ioc);
-int leapioraid_port_enable(struct LEAPIORAID_ADAPTER *ioc);
-u8 leapioraid_base_pci_device_is_unplugged(struct LEAPIORAID_ADAPTER *ioc);
-u8 leapioraid_base_pci_device_is_available(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_free_irq(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_disable_msix(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_wait_for_commands_to_complete(struct LEAPIORAID_ADAPTER *ioc);
-u8 leapioraid_base_check_cmd_timeout(struct LEAPIORAID_ADAPTER *ioc,
- u8 status, void *mpi_request, int sz);
-#define leapioraid_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \
- do { \
- pr_err("%s In func: %s\n", ioc->name, __func__); \
- issue_reset = leapioraid_base_check_cmd_timeout(ioc, status, mpi_request, sz); \
- } while (0)
-int leapioraid_wait_for_ioc_to_operational(struct LEAPIORAID_ADAPTER *ioc,
- int wait_count);
-void leapioraid_base_start_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_stop_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc);
-int leapioraid_base_make_ioc_ready(struct LEAPIORAID_ADAPTER *ioc,
- enum reset_type type);
-void leapioraid_base_mask_interrupts(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_unmask_interrupts(struct LEAPIORAID_ADAPTER *ioc);
-int leapioraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
-void leapioraid_base_pause_mq_polling(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_base_resume_mq_polling(struct LEAPIORAID_ADAPTER *ioc);
-int leapioraid_base_unlock_and_get_host_diagnostic(struct LEAPIORAID_ADAPTER
- *ioc, u32 *host_diagnostic);
-void leapioraid_base_lock_host_diagnostic(struct LEAPIORAID_ADAPTER *ioc);
-extern char driver_name[LEAPIORAID_NAME_LENGTH];
-struct scsi_cmnd *leapioraid_scsihost_scsi_lookup_get(struct LEAPIORAID_ADAPTER
- *ioc, u16 smid);
-u8 leapioraid_scsihost_event_callback(struct LEAPIORAID_ADAPTER *ioc,
- u8 msix_index, u32 reply);
-void leapioraid_scsihost_reset_handler(struct LEAPIORAID_ADAPTER *ioc,
- int reset_phase);
-int leapioraid_scsihost_issue_tm(struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type,
- u16 smid_task, u8 timeout, u8 tr_method);
-int leapioraid_scsihost_issue_locked_tm(struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, uint channel, uint id,
- uint lun, u8 type, u16 smid_task,
- u8 timeout, u8 tr_method);
-void leapioraid_scsihost_set_tm_flag(struct LEAPIORAID_ADAPTER *ioc,
- u16 handle);
-void leapioraid_scsihost_clear_tm_flag(struct LEAPIORAID_ADAPTER *ioc,
- u16 handle);
-void leapioraid_expander_remove(
- struct LEAPIORAID_ADAPTER *ioc, u64 sas_address,
- struct leapioraid_hba_port *port);
-void leapioraid_device_remove_by_sas_address(struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address,
- struct leapioraid_hba_port *port);
-u8 leapioraid_check_for_pending_internal_cmds(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid);
-struct leapioraid_hba_port *leapioraid_get_port_by_id(
- struct LEAPIORAID_ADAPTER *ioc, u8 port, u8 skip_dirty_flag);
-struct leapioraid_virtual_phy *leapioraid_get_vphy_by_phy(
- struct LEAPIORAID_ADAPTER *ioc, struct leapioraid_hba_port *port, u32 phy);
-struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_handle(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle);
-struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_sas_address(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address,
- struct leapioraid_hba_port *port);
-struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr_and_rphy(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address,
- struct sas_rphy *rphy);
-struct leapioraid_sas_device *leapioraid_get_sdev_by_addr(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address,
- struct leapioraid_hba_port *port);
-struct leapioraid_sas_device *leapioraid_get_sdev_by_handle(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle);
-void leapioraid_scsihost_flush_running_cmds(struct LEAPIORAID_ADAPTER *ioc);
-void leapioraid_port_enable_complete(struct LEAPIORAID_ADAPTER *ioc);
-struct leapioraid_raid_device *leapioraid_raid_device_find_by_handle(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle);
-void leapioraid_scsihost_sas_device_remove(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_device *sas_device);
-void leapioraid_scsihost_clear_outstanding_scsi_tm_commands(
- struct LEAPIORAID_ADAPTER *ioc);
-u32 leapioraid_base_mod64(u64 dividend, u32 divisor);
-void
-leapioraid__scsihost_change_queue_depth(struct scsi_device *sdev, int qdepth);
-u8 leapioraid_scsihost_ncq_prio_supp(struct scsi_device *sdev);
-u8 leapioraid_config_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply);
-int leapioraid_config_get_number_hba_phys(struct LEAPIORAID_ADAPTER *ioc,
- u8 *num_phys);
-int leapioraid_config_get_manufacturing_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidManP0_t *
- config_page);
-int leapioraid_config_get_manufacturing_pg10(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidManuP10_t
- *config_page);
-int leapioraid_config_get_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidManuP11_t
- *config_page);
-int leapioraid_config_set_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidManuP11_t
- *config_page);
-int leapioraid_config_get_bios_pg2(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidBiosP2_t *config_page);
-int leapioraid_config_get_bios_pg3(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidBiosP3_t *config_page);
-int leapioraid_config_get_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOUnitP0_t *config_page);
-int leapioraid_config_get_sas_device_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasDevP0_t *config_page,
- u32 form, u32 handle);
-int leapioraid_config_get_sas_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasIOUnitP0_t *config_page,
- u16 sz);
-int leapioraid_config_get_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOUnitP1_t *config_page);
-int leapioraid_config_set_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOUnitP1_t *config_page);
-int leapioraid_config_get_iounit_pg8(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOUnitP8_t *config_page);
-int leapioraid_config_get_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasIOUnitP1_t *config_page,
- u16 sz);
-int leapioraid_config_set_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasIOUnitP1_t *config_page,
- u16 sz);
-int leapioraid_config_get_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOCP1_t *config_page);
-int leapioraid_config_set_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOCP1_t *config_page);
-int leapioraid_config_get_ioc_pg8(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidIOCP8_t *config_page);
-int leapioraid_config_get_expander_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidExpanderP0_t *config_page,
- u32 form, u32 handle);
-int leapioraid_config_get_expander_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidExpanderP1_t *config_page,
- u32 phy_number, u16 handle);
-int leapioraid_config_get_enclosure_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasEncP0_t *
- config_page, u32 form, u32 handle);
-int leapioraid_config_get_phy_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasPhyP0_t *config_page,
- u32 phy_number);
-int leapioraid_config_get_phy_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidSasPhyP1_t *config_page,
- u32 phy_number);
-int leapioraid_config_get_raid_volume_pg1(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidRaidVolP1_t *config_page,
- u32 form, u32 handle);
-int leapioraid_config_get_number_pds(struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- u8 *num_pds);
-int leapioraid_config_get_raid_volume_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidRaidVolP0_t *config_page,
- u32 form, u32 handle, u16 sz);
-int leapioraid_config_get_phys_disk_pg0(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidCfgRep_t *mpi_reply,
- struct LeapioraidRaidPDP0_t *
- config_page, u32 form,
- u32 form_specific);
-int leapioraid_config_get_volume_handle(struct LEAPIORAID_ADAPTER *ioc,
- u16 pd_handle, u16 *volume_handle);
-int leapioraid_config_get_volume_wwid(struct LEAPIORAID_ADAPTER *ioc,
- u16 volume_handle, u64 *wwid);
-extern const struct attribute_group *leapioraid_host_groups[];
-extern const struct attribute_group *leapioraid_dev_groups[];
-void leapioraid_ctl_init(void);
-void leapioraid_ctl_exit(void);
-u8 leapioraid_ctl_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index,
- u32 reply);
-u8 leapioraid_ctl_tm_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply);
-void leapioraid_ctl_reset_handler(struct LEAPIORAID_ADAPTER *ioc,
- int reset_phase);
-u8 leapioraid_ctl_event_callback(struct LEAPIORAID_ADAPTER *ioc, u8 msix_index,
- u32 reply);
-void leapioraid_ctl_add_to_event_log(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventNotificationRep_t *
- mpi_reply);
-void leapioraid_ctl_clear_outstanding_ioctls(struct LEAPIORAID_ADAPTER *ioc);
-int leapioraid_ctl_release(struct inode *inode, struct file *filep);
-void ctl_init(void);
-void ctl_exit(void);
-u8 leapioraid_transport_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply);
-struct leapioraid_sas_port *leapioraid_transport_port_add(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, u64 sas_address,
- struct leapioraid_hba_port *port);
-void leapioraid_transport_port_remove(struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address, u64 sas_address_parent,
- struct leapioraid_hba_port *port);
-int leapioraid_transport_add_host_phy(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_phy *leapioraid_phy,
- struct LeapioraidSasPhyP0_t phy_pg0,
- struct device *parent_dev);
-int leapioraid_transport_add_expander_phy(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_phy *leapioraid_phy,
- struct LeapioraidExpanderP1_t expander_pg1,
- struct device *parent_dev);
-void leapioraid_transport_update_links(struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address, u16 handle,
- u8 phy_number, u8 link_rate,
- struct leapioraid_hba_port *port);
-extern struct sas_function_template leapioraid_transport_functions;
-extern struct scsi_transport_template *leapioraid_transport_template;
-void
-leapioraid_transport_del_phy_from_an_existing_port(struct LEAPIORAID_ADAPTER
- *ioc,
- struct leapioraid_raid_sas_node *sas_node,
- struct leapioraid_sas_phy
- *leapioraid_phy);
-#if defined(LEAPIORAID_WIDE_PORT_API)
-void
-leapioraid_transport_add_phy_to_an_existing_port(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_sas_node *sas_node,
- struct leapioraid_sas_phy
- *leapioraid_phy,
- u64 sas_address,
- struct leapioraid_hba_port *port);
-#endif
-#endif
diff --git a/drivers/scsi/leapioraid/leapioraid_os.c b/drivers/scsi/leapioraid/leapioraid_os.c
deleted file mode 100644
index c7bb14f2eff9..000000000000
--- a/drivers/scsi/leapioraid/leapioraid_os.c
+++ /dev/null
@@ -1,9825 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Scsi Host Layer for MPT (Message Passing Technology) based controllers
- *
- * Copyright (C) 2013-2021 LSI Corporation
- * Copyright (C) 2013-2021 Avago Technologies
- * Copyright (C) 2013-2021 Broadcom Inc.
- * (mailto:MPT-FusionLinux.pdl@broadcom.com)
- *
- * Copyright (C) 2024 LeapIO Tech Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/blkdev.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <asm/unaligned.h>
-#include <linux/aer.h>
-#include <linux/raid_class.h>
-#include "leapioraid_func.h"
-#include <linux/blk-mq-pci.h>
-
-#define RAID_CHANNEL 1
-
-static void leapioraid_scsihost_expander_node_remove(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_sas_node *sas_expander);
-static void leapioraid_firmware_event_work(struct work_struct *work);
-static void leapioraid_firmware_event_work_delayed(struct work_struct *work);
-static enum device_responsive_state
-leapioraid_scsihost_inquiry_vpd_sn(struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- u8 **serial_number);
-static enum device_responsive_state
-leapioraid_scsihost_inquiry_vpd_supported_pages(struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, u32 lun, void *data,
- u32 data_length);
-static enum device_responsive_state leapioraid_scsihost_ata_pass_thru_idd(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle,
- u8 *is_ssd_device,
- u8 tr_timeout,
- u8 tr_method);
-static enum device_responsive_state
-leapioraid_scsihost_wait_for_target_to_become_ready(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, u8 retry_count, u8 is_pd,
- u8 tr_timeout, u8 tr_method);
-static enum device_responsive_state
-leapioraid_scsihost_wait_for_device_to_become_ready(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, u8 retry_count, u8 is_pd,
- int lun, u8 tr_timeout, u8 tr_method);
-static void leapioraid_scsihost_remove_device(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_device *sas_device);
-static int leapioraid_scsihost_add_device(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- u8 retry_count, u8 is_pd);
-static u8 leapioraid_scsihost_check_for_pending_tm(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid);
-static void leapioraid_scsihost_send_event_to_turn_on_pfa_led(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle);
-static void leapioraid_scsihost_complete_devices_scanning(
- struct LEAPIORAID_ADAPTER *ioc);
-
-LIST_HEAD(leapioraid_ioc_list);
-DEFINE_SPINLOCK(leapioraid_gioc_lock);
-
-MODULE_AUTHOR(LEAPIORAID_AUTHOR);
-MODULE_DESCRIPTION(LEAPIORAID_DESCRIPTION);
-MODULE_LICENSE("GPL");
-MODULE_VERSION(LEAPIORAID_DRIVER_VERSION);
-
-static u8 scsi_io_cb_idx = -1;
-static u8 tm_cb_idx = -1;
-static u8 ctl_cb_idx = -1;
-static u8 ctl_tm_cb_idx = -1;
-static u8 base_cb_idx = -1;
-static u8 port_enable_cb_idx = -1;
-static u8 transport_cb_idx = -1;
-static u8 scsih_cb_idx = -1;
-static u8 config_cb_idx = -1;
-static int leapioraid_ids;
-static u8 tm_tr_cb_idx = -1;
-static u8 tm_tr_volume_cb_idx = -1;
-static u8 tm_tr_internal_cb_idx = -1;
-static u8 tm_sas_control_cb_idx = -1;
-static u32 logging_level;
-
-MODULE_PARM_DESC(logging_level,
- " bits for enabling additional logging info (default=0)");
-
-static int open_pcie_trace;
-module_param(open_pcie_trace, int, 0444);
-MODULE_PARM_DESC(open_pcie_trace, "open_pcie_trace: open=1/default=0(close)");
-
-static int disable_discovery = -1;
-module_param(disable_discovery, int, 0444);
-MODULE_PARM_DESC(disable_discovery, "disable discovery");
-
-static struct raid_template *leapioraid_raid_template;
-
-enum device_responsive_state {
- DEVICE_READY,
- DEVICE_RETRY,
- DEVICE_RETRY_UA,
- DEVICE_START_UNIT,
- DEVICE_STOP_UNIT,
- DEVICE_ERROR,
-};
-
-struct sense_info {
- u8 skey;
- u8 asc;
- u8 ascq;
-};
-
-#define LEAPIORAID_TURN_ON_PFA_LED (0xFFFC)
-#define LEAPIORAID_PORT_ENABLE_COMPLETE (0xFFFD)
-#define LEAPIORAID_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
-
-struct leapioraid_fw_event_work {
- struct list_head list;
- struct work_struct work;
- u8 cancel_pending_work;
- struct delayed_work delayed_work;
- u8 delayed_work_active;
- struct LEAPIORAID_ADAPTER *ioc;
- u16 device_handle;
- u8 VF_ID;
- u8 VP_ID;
- u8 ignore;
- u16 event;
- struct kref refcount;
- void *event_data;
- u8 *retries;
-};
-
-static void
-leapioraid_fw_event_work_free(struct kref *r)
-{
- struct leapioraid_fw_event_work *fw_work;
-
- fw_work = container_of(
- r, struct leapioraid_fw_event_work, refcount);
- kfree(fw_work->event_data);
- kfree(fw_work->retries);
- kfree(fw_work);
-}
-
-static void
-leapioraid_fw_event_work_get(
- struct leapioraid_fw_event_work *fw_work)
-{
- kref_get(&fw_work->refcount);
-}
-
-static void
-leapioraid_fw_event_work_put(struct leapioraid_fw_event_work *fw_work)
-{
- kref_put(&fw_work->refcount, leapioraid_fw_event_work_free);
-}
-
-static
-struct leapioraid_fw_event_work *leapioraid_alloc_fw_event_work(int len)
-{
- struct leapioraid_fw_event_work *fw_event;
-
- fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
- if (!fw_event)
- return NULL;
- kref_init(&fw_event->refcount);
- return fw_event;
-}
-
-static int
-leapioraid_scsihost_set_debug_level(
- const char *val, const struct kernel_param *kp)
-{
- int ret = param_set_int(val, kp);
- struct LEAPIORAID_ADAPTER *ioc;
-
- if (ret)
- return ret;
- pr_info("setting logging_level(0x%08x)\n", logging_level);
- spin_lock(&leapioraid_gioc_lock);
- list_for_each_entry(ioc, &leapioraid_ioc_list, list)
- ioc->logging_level = logging_level;
- spin_unlock(&leapioraid_gioc_lock);
- return 0;
-}
-
-module_param_call(logging_level,
- leapioraid_scsihost_set_debug_level, param_get_int,
- &logging_level, 0644);
-
-static inline int
-leapioraid_scsihost_srch_boot_sas_address(u64 sas_address,
- struct LEAPIORAID_BOOT_DEVICE_SAS_WWID *boot_device)
-{
- return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
-}
-
-static inline int
-leapioraid_scsihost_srch_boot_device_name(u64 device_name,
- struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME *boot_device)
-{
- return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
-}
-
-static inline int
-leapioraid_scsihost_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
- struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT *boot_device)
-{
- return (enclosure_logical_id ==
- le64_to_cpu(boot_device->EnclosureLogicalID)
- && slot_number == le16_to_cpu(boot_device->SlotNumber)) ? 1 : 0;
-}
-
-static void
-leapioraid_scsihost_display_enclosure_chassis_info(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_device *sas_device,
- struct scsi_device *sdev,
- struct scsi_target *starget)
-{
- if (sdev) {
- if (sas_device->enclosure_handle != 0)
- sdev_printk(KERN_INFO, sdev,
- "enclosure logical id(0x%016llx), slot(%d)\n",
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- sdev_printk(KERN_INFO, sdev,
- "enclosure level(0x%04x), connector name( %s)\n",
- sas_device->enclosure_level,
- sas_device->connector_name);
- if (sas_device->is_chassis_slot_valid)
- sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
- sas_device->chassis_slot);
- } else if (starget) {
- if (sas_device->enclosure_handle != 0)
- starget_printk(KERN_INFO, starget,
- "enclosure logical id(0x%016llx), slot(%d)\n",
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- starget_printk(KERN_INFO, starget,
- "enclosure level(0x%04x), connector name( %s)\n",
- sas_device->enclosure_level,
- sas_device->connector_name);
- if (sas_device->is_chassis_slot_valid)
- starget_printk(KERN_INFO, starget,
- "chassis slot(0x%04x)\n", sas_device->chassis_slot);
- } else {
- if (sas_device->enclosure_handle != 0)
- pr_info("%s enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name,
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- pr_info("%s enclosure level(0x%04x),connector name( %s)\n",
- ioc->name,
- sas_device->enclosure_level,
- sas_device->connector_name);
- if (sas_device->is_chassis_slot_valid)
- pr_info("%s chassis slot(0x%04x)\n",
- ioc->name, sas_device->chassis_slot);
- }
-}
-
-struct leapioraid_hba_port *leapioraid_get_port_by_id(
- struct LEAPIORAID_ADAPTER *ioc,
- u8 port_id, u8 skip_dirty_flag)
-{
- struct leapioraid_hba_port *port, *port_next;
-
- if (!ioc->multipath_on_hba)
- port_id = LEAPIORAID_MULTIPATH_DISABLED_PORT_ID;
- list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) {
- if (port->port_id != port_id)
- continue;
- if (port->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT)
- continue;
- return port;
- }
- if (skip_dirty_flag) {
- port = port_next = NULL;
- list_for_each_entry_safe(port, port_next,
- &ioc->port_table_list, list) {
- if (port->port_id != port_id)
- continue;
- return port;
- }
- }
- if (unlikely(!ioc->multipath_on_hba)) {
- port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_ATOMIC);
- if (!port)
- return NULL;
-
- port->port_id = LEAPIORAID_MULTIPATH_DISABLED_PORT_ID;
- pr_err(
- "%s hba_port entry: %p, port: %d is added to hba_port list\n",
- ioc->name, port, port->port_id);
- list_add_tail(&port->list, &ioc->port_table_list);
- return port;
- }
- return NULL;
-}
-
-struct leapioraid_virtual_phy *leapioraid_get_vphy_by_phy(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_hba_port *port, u32 phy)
-{
- struct leapioraid_virtual_phy *vphy, *vphy_next;
-
- if (!port->vphys_mask)
- return NULL;
- list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
- if (vphy->phy_mask & (1 << phy))
- return vphy;
- }
- return NULL;
-}
-
-static int
-leapioraid_scsihost_is_boot_device(u64 sas_address, u64 device_name,
- u64 enclosure_logical_id, u16 slot, u8 form,
- union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE *boot_device)
-{
- int rc = 0;
-
- switch (form) {
- case LEAPIORAID_BIOSPAGE2_FORM_SAS_WWID:
- if (!sas_address)
- break;
- rc = leapioraid_scsihost_srch_boot_sas_address(sas_address,
- &boot_device->SasWwid);
- break;
- case LEAPIORAID_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
- if (!enclosure_logical_id)
- break;
- rc = leapioraid_scsihost_srch_boot_encl_slot(
- enclosure_logical_id,
- slot,
- &boot_device->EnclosureSlot);
- break;
- case LEAPIORAID_BIOSPAGE2_FORM_DEVICE_NAME:
- if (!device_name)
- break;
- rc = leapioraid_scsihost_srch_boot_device_name(device_name,
- &boot_device->DeviceName);
- break;
- case LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
- break;
- }
- return rc;
-}
-
-static int
-leapioraid_scsihost_get_sas_address(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- u64 *sas_address)
-{
- struct LeapioraidSasDevP0_t sas_device_pg0;
- struct LeapioraidCfgRep_t mpi_reply;
- u32 ioc_status;
-
- *sas_address = 0;
- if ((leapioraid_config_get_sas_device_pg0
- (ioc, &mpi_reply, &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -ENXIO;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) {
- if ((handle <= ioc->sas_hba.num_phys) &&
- (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
- LEAPIORAID_SAS_DEVICE_INFO_SEP)))
- *sas_address = ioc->sas_hba.sas_address;
- else
- *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- return 0;
- }
- if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE)
- return -ENXIO;
- pr_err("%s handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
- ioc->name, handle, ioc_status,
- __FILE__, __LINE__, __func__);
- return -EIO;
-}
-
-static void
-leapioraid_scsihost_determine_boot_device(
- struct LEAPIORAID_ADAPTER *ioc, void *device,
- u32 channel)
-{
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_raid_device *raid_device;
- u64 sas_address;
- u64 device_name;
- u64 enclosure_logical_id;
- u16 slot;
-
- if (!ioc->is_driver_loading)
- return;
- if (!ioc->bios_pg3.BiosVersion)
- return;
- if (channel == RAID_CHANNEL) {
- raid_device = device;
- sas_address = raid_device->wwid;
- device_name = 0;
- enclosure_logical_id = 0;
- slot = 0;
- } else {
- sas_device = device;
- sas_address = sas_device->sas_address;
- device_name = sas_device->device_name;
- enclosure_logical_id = sas_device->enclosure_logical_id;
- slot = sas_device->slot;
- }
- if (!ioc->req_boot_device.device) {
- if (leapioraid_scsihost_is_boot_device(sas_address, device_name,
- enclosure_logical_id, slot,
- (ioc->bios_pg2.ReqBootDeviceForm &
- LEAPIORAID_BIOSPAGE2_FORM_MASK),
- &ioc->bios_pg2.RequestedBootDevice)) {
- dinitprintk(ioc,
- pr_err(
- "%s %s: req_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
- ioc->req_boot_device.device = device;
- ioc->req_boot_device.channel = channel;
- }
- }
- if (!ioc->req_alt_boot_device.device) {
- if (leapioraid_scsihost_is_boot_device(sas_address, device_name,
- enclosure_logical_id, slot,
- (ioc->bios_pg2.ReqAltBootDeviceForm &
- LEAPIORAID_BIOSPAGE2_FORM_MASK),
- &ioc->bios_pg2.RequestedAltBootDevice)) {
- dinitprintk(ioc,
- pr_err(
- "%s %s: req_alt_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
- ioc->req_alt_boot_device.device = device;
- ioc->req_alt_boot_device.channel = channel;
- }
- }
- if (!ioc->current_boot_device.device) {
- if (leapioraid_scsihost_is_boot_device(sas_address, device_name,
- enclosure_logical_id, slot,
- (ioc->bios_pg2.CurrentBootDeviceForm &
- LEAPIORAID_BIOSPAGE2_FORM_MASK),
- &ioc->bios_pg2.CurrentBootDevice)) {
- dinitprintk(ioc,
- pr_err(
- "%s %s: current_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
- ioc->current_boot_device.device = device;
- ioc->current_boot_device.channel = channel;
- }
- }
-}
-
-static
-struct leapioraid_sas_device *__leapioraid_get_sdev_from_target(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LEAPIORAID_TARGET *tgt_priv)
-{
- struct leapioraid_sas_device *ret;
-
- assert_spin_locked(&ioc->sas_device_lock);
- ret = tgt_priv->sas_dev;
- if (ret)
- leapioraid_sas_device_get(ret);
- return ret;
-}
-
-static
-struct leapioraid_sas_device *leapioraid_get_sdev_from_target(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LEAPIORAID_TARGET *tgt_priv)
-{
- struct leapioraid_sas_device *ret;
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- ret = __leapioraid_get_sdev_from_target(ioc, tgt_priv);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return ret;
-}
-
-static
-struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address, struct leapioraid_hba_port *port)
-{
- struct leapioraid_sas_device *sas_device;
-
- if (!port)
- return NULL;
- assert_spin_locked(&ioc->sas_device_lock);
- list_for_each_entry(sas_device, &ioc->sas_device_list, list)
- if (sas_device->sas_address == sas_address &&
- sas_device->port == port)
- goto found_device;
- list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
- if (sas_device->sas_address == sas_address &&
- sas_device->port == port)
- goto found_device;
- return NULL;
-found_device:
- leapioraid_sas_device_get(sas_device);
- return sas_device;
-}
-
-struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr_and_rphy(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address,
- struct sas_rphy *rphy)
-{
- struct leapioraid_sas_device *sas_device;
-
- assert_spin_locked(&ioc->sas_device_lock);
- list_for_each_entry(sas_device, &ioc->sas_device_list, list)
- if (sas_device->sas_address == sas_address &&
- (sas_device->rphy == rphy))
- goto found_device;
- list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
- if (sas_device->sas_address == sas_address &&
- (sas_device->rphy == rphy))
- goto found_device;
- return NULL;
-found_device:
- leapioraid_sas_device_get(sas_device);
- return sas_device;
-}
-
-struct leapioraid_sas_device *leapioraid_get_sdev_by_addr(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address,
- struct leapioraid_hba_port *port)
-{
- struct leapioraid_sas_device *sas_device = NULL;
- unsigned long flags;
-
- if (!port)
- return sas_device;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return sas_device;
-}
-
-static struct leapioraid_sas_device *__leapioraid_get_sdev_by_handle(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_sas_device *sas_device;
-
- assert_spin_locked(&ioc->sas_device_lock);
- list_for_each_entry(sas_device, &ioc->sas_device_list, list)
- if (sas_device->handle == handle)
- goto found_device;
- list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
- if (sas_device->handle == handle)
- goto found_device;
- return NULL;
-found_device:
- leapioraid_sas_device_get(sas_device);
- return sas_device;
-}
-
-struct leapioraid_sas_device *leapioraid_get_sdev_by_handle(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return sas_device;
-}
-
-void
-leapioraid_scsihost_sas_device_remove(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_device *sas_device)
-{
- unsigned long flags;
- int was_on_sas_device_list = 0;
-
- if (!sas_device)
- return;
- pr_info("%s %s: removing handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address);
- leapioraid_scsihost_display_enclosure_chassis_info(
- ioc, sas_device, NULL, NULL);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- if (!list_empty(&sas_device->list)) {
- list_del_init(&sas_device->list);
- was_on_sas_device_list = 1;
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (was_on_sas_device_list) {
- kfree(sas_device->serial_number);
- leapioraid_sas_device_put(sas_device);
- }
-}
-
-static void
-leapioraid_scsihost_device_remove_by_handle(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
- int was_on_sas_device_list = 0;
-
- if (ioc->shost_recovery)
- return;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_handle(ioc, handle);
- if (sas_device) {
- if (!list_empty(&sas_device->list)) {
- list_del_init(&sas_device->list);
- was_on_sas_device_list = 1;
- leapioraid_sas_device_put(sas_device);
- }
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (was_on_sas_device_list) {
- leapioraid_scsihost_remove_device(ioc, sas_device);
- leapioraid_sas_device_put(sas_device);
- }
-}
-
-void
-leapioraid_device_remove_by_sas_address(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address, struct leapioraid_hba_port *port)
-{
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
- int was_on_sas_device_list = 0;
-
- if (ioc->shost_recovery)
- return;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port);
- if (sas_device) {
- if (!list_empty(&sas_device->list)) {
- list_del_init(&sas_device->list);
- was_on_sas_device_list = 1;
- leapioraid_sas_device_put(sas_device);
- }
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (was_on_sas_device_list) {
- leapioraid_scsihost_remove_device(ioc, sas_device);
- leapioraid_sas_device_put(sas_device);
- }
-}
-
-static void
-leapioraid_scsihost_sas_device_add(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_device *sas_device)
-{
- unsigned long flags;
-
- dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name,
- __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
- dewtprintk(ioc,
- leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device,
- NULL, NULL));
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- leapioraid_sas_device_get(sas_device);
- list_add_tail(&sas_device->list, &ioc->sas_device_list);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (ioc->hide_drives) {
- clear_bit(sas_device->handle, ioc->pend_os_device_add);
- return;
- }
- if (!leapioraid_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent,
- sas_device->port)) {
- leapioraid_scsihost_sas_device_remove(ioc, sas_device);
- } else if (!sas_device->starget) {
- if (!ioc->is_driver_loading) {
- leapioraid_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent,
- sas_device->port);
- leapioraid_scsihost_sas_device_remove(ioc, sas_device);
- }
- } else
- clear_bit(sas_device->handle, ioc->pend_os_device_add);
-}
-
-static void
-leapioraid_scsihost_sas_device_init_add(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_device *sas_device)
-{
- unsigned long flags;
-
- dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name,
- __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
- dewtprintk(ioc,
- leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device,
- NULL, NULL));
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- leapioraid_sas_device_get(sas_device);
- list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
- leapioraid_scsihost_determine_boot_device(ioc, sas_device, 0);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-}
-
-static
-struct leapioraid_raid_device *leapioraid_scsihost_raid_device_find_by_id(
- struct LEAPIORAID_ADAPTER *ioc, int id, int channel)
-{
- struct leapioraid_raid_device *raid_device, *r;
-
- r = NULL;
- list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
- if (raid_device->id == id && raid_device->channel == channel) {
- r = raid_device;
- goto out;
- }
- }
-out:
- return r;
-}
-
-struct leapioraid_raid_device *leapioraid_raid_device_find_by_handle(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_raid_device *raid_device, *r;
-
- r = NULL;
- list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
- if (raid_device->handle != handle)
- continue;
- r = raid_device;
- goto out;
- }
-out:
- return r;
-}
-
-static
-struct leapioraid_raid_device *leapioraid_scsihost_raid_device_find_by_wwid(
- struct LEAPIORAID_ADAPTER *ioc, u64 wwid)
-{
- struct leapioraid_raid_device *raid_device, *r;
-
- r = NULL;
- list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
- if (raid_device->wwid != wwid)
- continue;
- r = raid_device;
- goto out;
- }
-out:
- return r;
-}
-
-static void
-leapioraid_scsihost_raid_device_add(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_device *raid_device)
-{
- unsigned long flags;
- u8 protection_mask;
-
- dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name,
- __func__, raid_device->handle,
- (unsigned long long)raid_device->wwid));
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- list_add_tail(&raid_device->list, &ioc->raid_device_list);
- if (!ioc->disable_eedp_support) {
- protection_mask = scsi_host_get_prot(ioc->shost);
- if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) {
- scsi_host_set_prot(ioc->shost, protection_mask & 0x77);
- pr_err(
- "%s: Disabling DIX0 because of unsupport!\n",
- ioc->name);
- }
- }
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
-}
-
-static void
-leapioraid_scsihost_raid_device_remove(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_device *raid_device)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- list_del(&raid_device->list);
- kfree(raid_device);
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
-}
-
-struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_handle(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_raid_sas_node *sas_expander, *r;
-
- r = NULL;
- list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
- if (sas_expander->handle != handle)
- continue;
- r = sas_expander;
- goto out;
- }
-out:
- return r;
-}
-
-static
-struct leapioraid_enclosure_node *leapioraid_scsihost_enclosure_find_by_handle(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle)
-{
- struct leapioraid_enclosure_node *enclosure_dev, *r;
-
- r = NULL;
- list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
- if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
- continue;
- r = enclosure_dev;
- goto out;
- }
-out:
- return r;
-}
-
-struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_sas_address(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address,
- struct leapioraid_hba_port *port)
-{
- struct leapioraid_raid_sas_node *sas_expander, *r;
-
- r = NULL;
- if (!port)
- return r;
- list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
- if (sas_expander->sas_address != sas_address ||
- sas_expander->port != port)
- continue;
- r = sas_expander;
- goto out;
- }
-out:
- return r;
-}
-
-static void
-leapioraid_scsihost_expander_node_add(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_sas_node *sas_expander)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-}
-
-static int
-leapioraid_scsihost_is_sas_end_device(u32 device_info)
-{
- if (device_info & LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE &&
- ((device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) |
- (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) |
- (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE)))
- return 1;
- else
- return 0;
-}
-
-static u8
-leapioraid_scsihost_scsi_lookup_find_by_target(
- struct LEAPIORAID_ADAPTER *ioc, int id,
- int channel)
-{
- int smid;
- struct scsi_cmnd *scmd;
-
- for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
- scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid);
- if (!scmd)
- continue;
- if (scmd->device->id == id && scmd->device->channel == channel)
- return 1;
- }
- return 0;
-}
-
-static u8
-leapioraid_scsihost_scsi_lookup_find_by_lun(
- struct LEAPIORAID_ADAPTER *ioc, int id,
- unsigned int lun, int channel)
-{
- int smid;
- struct scsi_cmnd *scmd;
-
- for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
- scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid);
- if (!scmd)
- continue;
- if (scmd->device->id == id &&
- scmd->device->channel == channel &&
- scmd->device->lun == lun)
- return 1;
- }
- return 0;
-}
-
-struct scsi_cmnd *leapioraid_scsihost_scsi_lookup_get(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- struct scsi_cmnd *scmd = NULL;
- struct leapioraid_scsiio_tracker *st;
- struct LeapioraidSCSIIOReq_t *mpi_request;
- u32 unique_tag = smid - 1;
-
- if (smid > 0 && smid <= ioc->shost->can_queue) {
- unique_tag =
- ioc->io_queue_num[smid -
- 1] << BLK_MQ_UNIQUE_TAG_BITS | (smid - 1);
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- if (!mpi_request->DevHandle)
- return scmd;
- scmd = scsi_host_find_tag(ioc->shost, unique_tag);
- if (scmd) {
- st = leapioraid_base_scsi_cmd_priv(scmd);
- if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0))
- scmd = NULL;
- }
- }
- return scmd;
-}
-
-static void
-leapioraid_scsihost_display_sdev_qd(struct scsi_device *sdev)
-{
- if (sdev->inquiry_len <= 7)
- return;
- sdev_printk(KERN_INFO, sdev,
- "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
- sdev->queue_depth, sdev->tagged_supported,
- sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
-}
-
-static int
-leapioraid_scsihost_change_queue_depth(
- struct scsi_device *sdev, int qdepth)
-{
- struct Scsi_Host *shost = sdev->host;
- int max_depth;
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
-
- max_depth = shost->can_queue;
-
- goto not_sata;
-
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- goto not_sata;
- sas_target_priv_data = sas_device_priv_data->sas_target;
- if (!sas_target_priv_data)
- goto not_sata;
- if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME))
- goto not_sata;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device =
- __leapioraid_get_sdev_from_target(ioc, sas_target_priv_data);
- if (sas_device) {
- if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE)
- max_depth = LEAPIORAID_SATA_QUEUE_DEPTH;
- leapioraid_sas_device_put(sas_device);
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-not_sata:
- if (!sdev->tagged_supported)
- max_depth = 1;
- if (qdepth > max_depth)
- qdepth = max_depth;
- scsi_change_queue_depth(sdev, qdepth);
- leapioraid_scsihost_display_sdev_qd(sdev);
- return sdev->queue_depth;
-}
-
-void
-leapioraid__scsihost_change_queue_depth(
- struct scsi_device *sdev, int qdepth)
-{
- struct Scsi_Host *shost = sdev->host;
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
-
- if (ioc->enable_sdev_max_qd)
- qdepth = shost->can_queue;
- leapioraid_scsihost_change_queue_depth(sdev, qdepth);
-}
-
-static int
-leapioraid_scsihost_target_alloc(struct scsi_target *starget)
-{
- struct Scsi_Host *shost = dev_to_shost(&starget->dev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_raid_device *raid_device;
- unsigned long flags;
- struct sas_rphy *rphy;
-
- sas_target_priv_data =
- kzalloc(sizeof(struct LEAPIORAID_TARGET), GFP_KERNEL);
- if (!sas_target_priv_data)
- return -ENOMEM;
- starget->hostdata = sas_target_priv_data;
- sas_target_priv_data->starget = starget;
- sas_target_priv_data->handle = LEAPIORAID_INVALID_DEVICE_HANDLE;
- if (starget->channel == RAID_CHANNEL) {
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_scsihost_raid_device_find_by_id(
- ioc, starget->id, starget->channel);
- if (raid_device) {
- sas_target_priv_data->handle = raid_device->handle;
- sas_target_priv_data->sas_address = raid_device->wwid;
- sas_target_priv_data->flags |=
- LEAPIORAID_TARGET_FLAGS_VOLUME;
- raid_device->starget = starget;
- }
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- return 0;
- }
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- rphy = dev_to_rphy(starget->dev.parent);
- sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc,
- rphy->identify.sas_address, rphy);
- if (sas_device) {
- sas_target_priv_data->handle = sas_device->handle;
- sas_target_priv_data->sas_address = sas_device->sas_address;
- sas_target_priv_data->port = sas_device->port;
- sas_target_priv_data->sas_dev = sas_device;
- sas_device->starget = starget;
- sas_device->id = starget->id;
- sas_device->channel = starget->channel;
- if (test_bit(sas_device->handle, ioc->pd_handles))
- sas_target_priv_data->flags |=
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT;
- if (sas_device->fast_path)
- sas_target_priv_data->flags |=
- LEAPIORAID_TARGET_FASTPATH_IO;
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return 0;
-}
-
-static void
-leapioraid_scsihost_target_destroy(struct scsi_target *starget)
-{
- struct Scsi_Host *shost = dev_to_shost(&starget->dev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_raid_device *raid_device;
- unsigned long flags;
-
- sas_target_priv_data = starget->hostdata;
- if (!sas_target_priv_data)
- return;
- if (starget->channel == RAID_CHANNEL) {
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_scsihost_raid_device_find_by_id(
- ioc, starget->id, starget->channel);
- if (raid_device) {
- raid_device->starget = NULL;
- raid_device->sdev = NULL;
- }
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- goto out;
- }
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device =
- __leapioraid_get_sdev_from_target(ioc, sas_target_priv_data);
- if (sas_device && (sas_device->starget == starget)
- && (sas_device->id == starget->id)
- && (sas_device->channel == starget->channel))
- sas_device->starget = NULL;
- if (sas_device) {
- sas_target_priv_data->sas_dev = NULL;
- leapioraid_sas_device_put(sas_device);
- leapioraid_sas_device_put(sas_device);
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-out:
- kfree(sas_target_priv_data);
- starget->hostdata = NULL;
-}
-
-static int
-leapioraid_scsihost_slave_alloc(struct scsi_device *sdev)
-{
- struct Scsi_Host *shost;
- struct LEAPIORAID_ADAPTER *ioc;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct scsi_target *starget;
- struct leapioraid_raid_device *raid_device;
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
-
- sas_device_priv_data =
- kzalloc(sizeof(*sas_device_priv_data), GFP_KERNEL);
- if (!sas_device_priv_data)
- return -ENOMEM;
- sas_device_priv_data->lun = sdev->lun;
- sas_device_priv_data->flags = LEAPIORAID_DEVICE_FLAGS_INIT;
- starget = scsi_target(sdev);
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->num_luns++;
- sas_device_priv_data->sas_target = sas_target_priv_data;
- sdev->hostdata = sas_device_priv_data;
- if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT))
- sdev->no_uld_attach = 1;
- shost = dev_to_shost(&starget->dev);
- ioc = leapioraid_shost_private(shost);
- if (starget->channel == RAID_CHANNEL) {
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_scsihost_raid_device_find_by_id(ioc,
- starget->id,
- starget->channel);
- if (raid_device)
- raid_device->sdev = sdev;
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- }
- if (!(sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_addr(ioc,
- sas_target_priv_data->sas_address,
- sas_target_priv_data->port);
- if (sas_device && (sas_device->starget == NULL)) {
- sdev_printk(KERN_INFO, sdev,
- "%s : sas_device->starget set to starget @ %d\n",
- __func__, __LINE__);
- sas_device->starget = starget;
- }
- if (sas_device)
- leapioraid_sas_device_put(sas_device);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- }
- return 0;
-}
-
-static void
-leapioraid_scsihost_slave_destroy(struct scsi_device *sdev)
-{
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct scsi_target *starget;
- struct Scsi_Host *shost;
- struct LEAPIORAID_ADAPTER *ioc;
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
-
- if (!sdev->hostdata)
- return;
- starget = scsi_target(sdev);
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->num_luns--;
- shost = dev_to_shost(&starget->dev);
- ioc = leapioraid_shost_private(shost);
- if (!(sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_from_target(ioc,
- sas_target_priv_data);
- if (sas_device && !sas_target_priv_data->num_luns)
- sas_device->starget = NULL;
- if (sas_device)
- leapioraid_sas_device_put(sas_device);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- }
- kfree(sdev->hostdata);
- sdev->hostdata = NULL;
-}
-
-static void
-leapioraid_scsihost_display_sata_capabilities(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, struct scsi_device *sdev)
-{
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasDevP0_t sas_device_pg0;
- u32 ioc_status;
- u16 flags;
- u32 device_info;
-
- if ((leapioraid_config_get_sas_device_pg0
- (ioc, &mpi_reply, &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- flags = le16_to_cpu(sas_device_pg0.Flags);
- device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
- sdev_printk(KERN_INFO, sdev,
- "atapi(%s), ncq(%s), asyn_notify(%s),\n\t\t"
- "smart(%s), fua(%s), sw_preserve(%s)\n",
- (device_info & LEAPIORAID_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" :
- "n",
- (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y"
- : "n",
- (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY)
- ? "y" : "n",
- (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ?
- "y" : "n",
- (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y"
- : "n",
- (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" :
- "n");
-}
-
-static int
-leapioraid_scsihost_is_raid(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
-
- return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
-}
-
-static void
-leapioraid_scsihost_get_resync(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(sdev->host);
- static struct leapioraid_raid_device *raid_device;
- unsigned long flags;
- struct LeapioraidRaidVolP0_t vol_pg0;
- struct LeapioraidCfgRep_t mpi_reply;
- u32 volume_status_flags;
- u8 percent_complete;
- u16 handle;
-
- percent_complete = 0;
- handle = 0;
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_scsihost_raid_device_find_by_id(
- ioc, sdev->id, sdev->channel);
- if (raid_device) {
- handle = raid_device->handle;
- percent_complete = raid_device->percent_complete;
- }
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- if (!handle)
- goto out;
- if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
- LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE,
- handle,
- sizeof
- (struct LeapioraidRaidVolP0_t))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- percent_complete = 0;
- goto out;
- }
- volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
- if (!(volume_status_flags &
- LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
- percent_complete = 0;
-out:
- raid_set_resync(leapioraid_raid_template, dev, percent_complete);
-}
-
-static void
-leapioraid_scsihost_get_state(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(sdev->host);
- static struct leapioraid_raid_device *raid_device;
- unsigned long flags;
- struct LeapioraidRaidVolP0_t vol_pg0;
- struct LeapioraidCfgRep_t mpi_reply;
- u32 volstate;
- enum raid_state state = RAID_STATE_UNKNOWN;
- u16 handle = 0;
-
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_scsihost_raid_device_find_by_id(
- ioc, sdev->id, sdev->channel);
- if (raid_device)
- handle = raid_device->handle;
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- if (!raid_device)
- goto out;
- if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
- LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE,
- handle,
- sizeof
- (struct LeapioraidRaidVolP0_t))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
- if (volstate & LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
- state = RAID_STATE_RESYNCING;
- goto out;
- }
- switch (vol_pg0.VolumeState) {
- case LEAPIORAID_RAID_VOL_STATE_OPTIMAL:
- case LEAPIORAID_RAID_VOL_STATE_ONLINE:
- state = RAID_STATE_ACTIVE;
- break;
- case LEAPIORAID_RAID_VOL_STATE_DEGRADED:
- state = RAID_STATE_DEGRADED;
- break;
- case LEAPIORAID_RAID_VOL_STATE_FAILED:
- case LEAPIORAID_RAID_VOL_STATE_MISSING:
- state = RAID_STATE_OFFLINE;
- break;
- }
-out:
- raid_set_state(leapioraid_raid_template, dev, state);
-}
-
-static void
-leapioraid_scsihost_set_level(struct LEAPIORAID_ADAPTER *ioc,
- struct scsi_device *sdev, u8 volume_type)
-{
- enum raid_level level = RAID_LEVEL_UNKNOWN;
-
- switch (volume_type) {
- case LEAPIORAID_RAID_VOL_TYPE_RAID0:
- level = RAID_LEVEL_0;
- break;
- case LEAPIORAID_RAID_VOL_TYPE_RAID10:
- case LEAPIORAID_RAID_VOL_TYPE_RAID1E:
- level = RAID_LEVEL_10;
- break;
- case LEAPIORAID_RAID_VOL_TYPE_RAID1:
- level = RAID_LEVEL_1;
- break;
- }
- raid_set_level(leapioraid_raid_template, &sdev->sdev_gendev, level);
-}
-
-static int
-leapioraid_scsihost_get_volume_capabilities(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_device *raid_device)
-{
- struct LeapioraidRaidVolP0_t *vol_pg0;
- struct LeapioraidRaidPDP0_t pd_pg0;
- struct LeapioraidSasDevP0_t sas_device_pg0;
- struct LeapioraidCfgRep_t mpi_reply;
- u16 sz;
- u8 num_pds;
-
- if ((leapioraid_config_get_number_pds(ioc, raid_device->handle,
- &num_pds)) || !num_pds) {
- dfailprintk(ioc, pr_warn(
- "%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
- return 1;
- }
- raid_device->num_pds = num_pds;
- sz = offsetof(struct LeapioraidRaidVolP0_t, PhysDisk) + (num_pds *
- sizeof
- (struct LEAPIORAID_RAIDVOL0_PHYS_DISK));
- vol_pg0 = kzalloc(sz, GFP_KERNEL);
- if (!vol_pg0) {
- dfailprintk(ioc, pr_warn(
- "%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
- return 1;
- }
- if ((leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
- LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE,
- raid_device->handle, sz))) {
- dfailprintk(ioc,
- pr_warn(
- "%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
- kfree(vol_pg0);
- return 1;
- }
- raid_device->volume_type = vol_pg0->VolumeType;
- if (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply,
- &pd_pg0,
- LEAPIORAID_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
- vol_pg0->PhysDisk[0].PhysDiskNum))) {
- if (!
- (leapioraid_config_get_sas_device_pg0
- (ioc, &mpi_reply, &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE,
- le16_to_cpu(pd_pg0.DevHandle)))) {
- raid_device->device_info =
- le32_to_cpu(sas_device_pg0.DeviceInfo);
- }
- }
- kfree(vol_pg0);
- return 0;
-}
-
-static void
-leapioraid_scsihost_enable_tlr(
- struct LEAPIORAID_ADAPTER *ioc, struct scsi_device *sdev)
-{
- u8 data[30];
- u8 page_len, ii;
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct leapioraid_sas_device *sas_device;
-
- if (sdev->type != TYPE_TAPE)
- return;
- if (!(ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_TLR))
- return;
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- return;
- sas_target_priv_data = sas_device_priv_data->sas_target;
- if (!sas_target_priv_data)
- return;
- if (leapioraid_scsihost_inquiry_vpd_supported_pages(ioc,
- sas_target_priv_data->handle,
- sdev->lun, data,
- sizeof(data)) !=
- DEVICE_READY) {
- sas_device =
- leapioraid_get_sdev_by_addr(ioc,
- sas_target_priv_data->sas_address,
- sas_target_priv_data->port);
- if (sas_device) {
- sdev_printk(KERN_INFO, sdev,
- "%s: DEVICE NOT READY: handle(0x%04x),\n\t\t"
- "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
- __func__,
- sas_device->handle,
- (unsigned long long)sas_device->sas_address,
- sas_device->phy,
- (unsigned long long)sas_device->device_name);
- leapioraid_scsihost_display_enclosure_chassis_info(NULL,
- sas_device,
- sdev, NULL);
- leapioraid_sas_device_put(sas_device);
- }
- return;
- }
- page_len = data[3];
- for (ii = 4; ii < page_len + 4; ii++) {
- if (data[ii] == 0x90) {
- sas_device_priv_data->flags |= LEAPIORAID_DEVICE_TLR_ON;
- return;
- }
- }
-}
-
-static void
-leapioraid_scsihost_enable_ssu_on_sata(
- struct leapioraid_sas_device *sas_device,
- struct scsi_device *sdev)
-{
- if (!(sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE))
- return;
- if (sas_device->ssd_device) {
- sdev->manage_system_start_stop = 1;
- sdev->manage_runtime_start_stop = 1;
- }
-}
-
-static int
-leapioraid_scsihost_slave_configure(struct scsi_device *sdev)
-{
- struct Scsi_Host *shost = sdev->host;
- struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost);
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_raid_device *raid_device;
- unsigned long flags;
- int qdepth;
- u8 ssp_target = 0;
- char *ds = "";
- char *r_level = "";
- u16 handle, volume_handle = 0;
- u64 volume_wwid = 0;
- u8 *serial_number = NULL;
- enum device_responsive_state retval;
- u8 count = 0;
-
- qdepth = 1;
- sas_device_priv_data = sdev->hostdata;
- sas_device_priv_data->configured_lun = 1;
- sas_device_priv_data->flags &= ~LEAPIORAID_DEVICE_FLAGS_INIT;
- sas_target_priv_data = sas_device_priv_data->sas_target;
- handle = sas_target_priv_data->handle;
- if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) {
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device =
- leapioraid_raid_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- if (!raid_device) {
- dfailprintk(ioc, pr_warn(
- "%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__,
- __func__));
- return 1;
- }
- if (leapioraid_scsihost_get_volume_capabilities(ioc, raid_device)) {
- dfailprintk(ioc, pr_warn(
- "%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__,
- __func__));
- return 1;
- }
- if (raid_device->device_info &
- LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) {
- qdepth = LEAPIORAID_SAS_QUEUE_DEPTH;
- ds = "SSP";
- } else {
- qdepth = LEAPIORAID_SATA_QUEUE_DEPTH;
- if (raid_device->device_info &
- LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE)
- ds = "SATA";
- else
- ds = "STP";
- }
- switch (raid_device->volume_type) {
- case LEAPIORAID_RAID_VOL_TYPE_RAID0:
- r_level = "RAID0";
- break;
- case LEAPIORAID_RAID_VOL_TYPE_RAID1E:
- qdepth = LEAPIORAID_RAID_QUEUE_DEPTH;
- if (ioc->manu_pg10.OEMIdentifier &&
- (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
- 0x00000004) &&
- !(raid_device->num_pds % 2))
- r_level = "RAID10";
- else
- r_level = "RAID1E";
- break;
- case LEAPIORAID_RAID_VOL_TYPE_RAID1:
- qdepth = LEAPIORAID_RAID_QUEUE_DEPTH;
- r_level = "RAID1";
- break;
- case LEAPIORAID_RAID_VOL_TYPE_RAID10:
- qdepth = LEAPIORAID_RAID_QUEUE_DEPTH;
- r_level = "RAID10";
- break;
- case LEAPIORAID_RAID_VOL_TYPE_UNKNOWN:
- default:
- qdepth = LEAPIORAID_RAID_QUEUE_DEPTH;
- r_level = "RAIDX";
- break;
- }
- if (!ioc->warpdrive_msg)
- sdev_printk(
- KERN_INFO, sdev,
- "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n",
- r_level, raid_device->handle,
- (unsigned long long)raid_device->wwid,
- raid_device->num_pds, ds);
- if (shost->max_sectors > LEAPIORAID_RAID_MAX_SECTORS) {
- blk_queue_max_hw_sectors(sdev->request_queue,
- LEAPIORAID_RAID_MAX_SECTORS);
- sdev_printk(KERN_INFO, sdev,
- "Set queue's max_sector to: %u\n",
- LEAPIORAID_RAID_MAX_SECTORS);
- }
- leapioraid__scsihost_change_queue_depth(sdev, qdepth);
- leapioraid_scsihost_set_level(ioc, sdev, raid_device->volume_type);
- return 0;
- }
- if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) {
- if (leapioraid_config_get_volume_handle(ioc, handle,
- &volume_handle)) {
- dfailprintk(ioc, pr_warn(
- "%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__,
- __func__));
- return 1;
- }
- if (volume_handle && leapioraid_config_get_volume_wwid(ioc,
- volume_handle,
- &volume_wwid)) {
- dfailprintk(ioc,
- pr_warn(
- "%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__,
- __func__));
- return 1;
- }
- }
- leapioraid_scsihost_inquiry_vpd_sn(ioc, handle, &serial_number);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_addr(ioc,
- sas_device_priv_data->sas_target->sas_address,
- sas_device_priv_data->sas_target->port);
- if (!sas_device) {
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- dfailprintk(ioc, pr_warn(
- "%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
- kfree(serial_number);
- return 1;
- }
- sas_device->volume_handle = volume_handle;
- sas_device->volume_wwid = volume_wwid;
- sas_device->serial_number = serial_number;
- if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) {
- qdepth = (sas_device->port_type > 1) ?
- ioc->max_wideport_qd : ioc->max_narrowport_qd;
- ssp_target = 1;
- if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SEP) {
- sdev_printk(KERN_WARNING, sdev,
- "set ignore_delay_remove for handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle);
- sas_device_priv_data->ignore_delay_remove = 1;
- ds = "SES";
- } else
- ds = "SSP";
- } else {
- qdepth = ioc->max_sata_qd;
- if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET)
- ds = "STP";
- else if (sas_device->device_info &
- LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE)
- ds = "SATA";
- }
- sdev_printk(
- KERN_INFO, sdev,
- "%s: handle(0x%04x), sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
- ds, handle, (unsigned long long)sas_device->sas_address,
- sas_device->phy,
- (unsigned long long)sas_device->device_name);
- leapioraid_scsihost_display_enclosure_chassis_info(
- NULL, sas_device, sdev, NULL);
- leapioraid_sas_device_put(sas_device);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (!ssp_target) {
- leapioraid_scsihost_display_sata_capabilities(ioc, handle, sdev);
- do {
- retval = leapioraid_scsihost_ata_pass_thru_idd(ioc, handle,
- &sas_device->ssd_device, 30, 0);
- } while ((retval == DEVICE_RETRY || retval == DEVICE_RETRY_UA)
- && count++ < 3);
- }
- leapioraid_scsihost_enable_ssu_on_sata(sas_device, sdev);
- if (serial_number)
- sdev_printk(KERN_INFO, sdev, "serial_number(%s)\n",
- serial_number);
- leapioraid__scsihost_change_queue_depth(sdev, qdepth);
- if (ssp_target) {
- sas_read_port_mode_page(sdev);
- leapioraid_scsihost_enable_tlr(ioc, sdev);
- }
-
- return 0;
-}
-
-static int
-leapioraid_scsihost_bios_param(
- struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int params[])
-{
- int heads;
- int sectors;
- sector_t cylinders;
- ulong dummy;
-
- heads = 64;
- sectors = 32;
- dummy = heads * sectors;
- cylinders = capacity;
- sector_div(cylinders, dummy);
- if ((ulong) capacity >= 0x200000) {
- heads = 255;
- sectors = 63;
- dummy = heads * sectors;
- cylinders = capacity;
- sector_div(cylinders, dummy);
- }
- params[0] = heads;
- params[1] = sectors;
- params[2] = cylinders;
- return 0;
-}
-
-static void
-leapioraid_scsihost_response_code(
- struct LEAPIORAID_ADAPTER *ioc, u8 response_code)
-{
- char *desc;
-
- switch (response_code) {
- case LEAPIORAID_SCSITASKMGMT_RSP_TM_COMPLETE:
- desc = "task management request completed";
- break;
- case LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME:
- desc = "invalid frame";
- break;
- case LEAPIORAID_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
- desc = "task management request not supported";
- break;
- case LEAPIORAID_SCSITASKMGMT_RSP_TM_FAILED:
- desc = "task management request failed";
- break;
- case LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED:
- desc = "task management request succeeded";
- break;
- case LEAPIORAID_SCSITASKMGMT_RSP_TM_INVALID_LUN:
- desc = "invalid lun";
- break;
- case 0xA:
- desc = "overlapped tag attempted";
- break;
- case LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
- desc = "task queued, however not sent to target";
- break;
- default:
- desc = "unknown";
- break;
- }
- pr_warn("%s response_code(0x%01x): %s\n",
- ioc->name, response_code, desc);
-}
-
-static u8
-leapioraid_scsihost_tm_done(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index,
- u32 reply)
-{
- struct LeapioraidDefaultRep_t *mpi_reply;
-
- if (ioc->tm_cmds.status == LEAPIORAID_CMD_NOT_USED)
- return 1;
- if (ioc->tm_cmds.smid != smid)
- return 1;
- ioc->tm_cmds.status |= LEAPIORAID_CMD_COMPLETE;
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (mpi_reply) {
- memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength * 4);
- ioc->tm_cmds.status |= LEAPIORAID_CMD_REPLY_VALID;
- }
- ioc->tm_cmds.status &= ~LEAPIORAID_CMD_PENDING;
- complete(&ioc->tm_cmds.done);
- return 1;
-}
-
-void
-leapioraid_scsihost_set_tm_flag(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct scsi_device *sdev;
- u8 skip = 0;
-
- shost_for_each_device(sdev, ioc->shost) {
- if (skip)
- continue;
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- continue;
- if (sas_device_priv_data->sas_target->handle == handle) {
- sas_device_priv_data->sas_target->tm_busy = 1;
- skip = 1;
- ioc->ignore_loginfos = 1;
- }
- }
-}
-
-void
-leapioraid_scsihost_clear_tm_flag(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct scsi_device *sdev;
- u8 skip = 0;
-
- shost_for_each_device(sdev, ioc->shost) {
- if (skip)
- continue;
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- continue;
- if (sas_device_priv_data->sas_target->handle == handle) {
- sas_device_priv_data->sas_target->tm_busy = 0;
- skip = 1;
- ioc->ignore_loginfos = 0;
- }
- }
-}
-
-static int
-leapioraid_scsihost_tm_cmd_map_status(
- struct LEAPIORAID_ADAPTER *ioc, uint channel,
- uint id, uint lun, u8 type, u16 smid_task)
-{
- if (smid_task <= ioc->shost->can_queue) {
- switch (type) {
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- if (!
- (leapioraid_scsihost_scsi_lookup_find_by_target
- (ioc, id, channel)))
- return SUCCESS;
- break;
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
- if (!
- (leapioraid_scsihost_scsi_lookup_find_by_lun
- (ioc, id, lun, channel)))
- return SUCCESS;
- break;
- default:
- return SUCCESS;
- }
- } else if (smid_task == ioc->scsih_cmds.smid) {
- if ((ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE) ||
- (ioc->scsih_cmds.status & LEAPIORAID_CMD_NOT_USED))
- return SUCCESS;
- } else if (smid_task == ioc->ctl_cmds.smid) {
- if ((ioc->ctl_cmds.status & LEAPIORAID_CMD_COMPLETE) ||
- (ioc->ctl_cmds.status & LEAPIORAID_CMD_NOT_USED))
- return SUCCESS;
- }
- return FAILED;
-}
-
-static int
-leapioraid_scsihost_tm_post_processing(struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type,
- u16 smid_task)
-{
- int rc;
-
- rc = leapioraid_scsihost_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
- if (rc == SUCCESS)
- return rc;
- pr_err(
- "%s Poll finish of smid(%d),task_type(0x%02x),handle(0x%04x)\n",
- ioc->name,
- smid_task,
- type,
- handle);
- leapioraid_base_mask_interrupts(ioc);
- leapioraid_base_sync_reply_irqs(ioc, 1);
- leapioraid_base_unmask_interrupts(ioc);
- return leapioraid_scsihost_tm_cmd_map_status(
- ioc, channel, id, lun, type, smid_task);
-}
-
-int
-leapioraid_scsihost_issue_tm(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type,
- u16 smid_task, u8 timeout, u8 tr_method)
-{
- struct LeapioraidSCSITmgReq_t *mpi_request;
- struct LeapioraidSCSITmgRep_t *mpi_reply;
- struct LeapioraidSCSIIOReq_t *request;
- u16 smid = 0;
- u32 ioc_state;
- struct leapioraid_scsiio_tracker *scsi_lookup = NULL;
- int rc;
- u16 msix_task = 0;
- u8 issue_reset = 0;
-
- lockdep_assert_held(&ioc->tm_cmds.mutex);
- if (ioc->tm_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_info("%s %s: tm_cmd busy!!!\n",
- __func__, ioc->name);
- return FAILED;
- }
- if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) {
- pr_info("%s %s: host reset in progress!\n",
- __func__, ioc->name);
- return FAILED;
- }
- ioc_state = leapioraid_base_get_iocstate(ioc, 0);
- if (ioc_state & LEAPIORAID_DOORBELL_USED) {
- pr_info("%s unexpected doorbell active!\n",
- ioc->name);
- rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- return (!rc) ? SUCCESS : FAILED;
- }
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) {
- leapioraid_print_fault_code(ioc, ioc_state &
- LEAPIORAID_DOORBELL_DATA_MASK);
- rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- return (!rc) ? SUCCESS : FAILED;
- } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP) {
- leapioraid_base_coredump_info(ioc,
- ioc_state &
- LEAPIORAID_DOORBELL_DATA_MASK);
- rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- return (!rc) ? SUCCESS : FAILED;
- }
- smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- return FAILED;
- }
- if (type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
- scsi_lookup = leapioraid_get_st_from_smid(ioc, smid_task);
- dtmprintk(ioc, pr_info(
- "%s sending tm: handle(0x%04x),\n\t\t"
- "task_type(0x%02x), timeout(%d) tr_method(0x%x) smid(%d)\n",
- ioc->name,
- handle,
- type,
- timeout,
- tr_method,
- smid_task));
- ioc->tm_cmds.status = LEAPIORAID_CMD_PENDING;
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->tm_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t));
- memset(ioc->tm_cmds.reply, 0, sizeof(struct LeapioraidSCSITmgRep_t));
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT;
- mpi_request->DevHandle = cpu_to_le16(handle);
- mpi_request->TaskType = type;
- mpi_request->MsgFlags = tr_method;
- if (type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
- type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
- mpi_request->TaskMID = cpu_to_le16(smid_task);
- int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
- leapioraid_scsihost_set_tm_flag(ioc, handle);
- init_completion(&ioc->tm_cmds.done);
- if ((type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
- (scsi_lookup && (scsi_lookup->msix_io < ioc->reply_queue_count)))
- msix_task = scsi_lookup->msix_io;
- else
- msix_task = 0;
- ioc->put_smid_hi_priority(ioc, smid, msix_task);
- wait_for_completion_timeout(&ioc->tm_cmds.done, timeout * HZ);
- if (!(ioc->tm_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- leapioraid_check_cmd_timeout(ioc,
- ioc->tm_cmds.status, mpi_request,
- sizeof
- (struct LeapioraidSCSITmgReq_t)
- / 4, issue_reset);
- if (issue_reset) {
- rc = leapioraid_base_hard_reset_handler(ioc,
- FORCE_BIG_HAMMER);
- rc = (!rc) ? SUCCESS : FAILED;
- goto out;
- }
- }
- leapioraid_base_sync_reply_irqs(ioc, 0);
- if (ioc->tm_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- mpi_reply = ioc->tm_cmds.reply;
- dtmprintk(ioc, pr_info(
- "%s complete tm: ioc_status(0x%04x),\n\t\t"
- "loginfo(0x%08x), term_count(0x%08x)\n",
- ioc->name,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
- if (ioc->logging_level & LEAPIORAID_DEBUG_TM) {
- leapioraid_scsihost_response_code(
- ioc, mpi_reply->ResponseCode);
- if (mpi_reply->IOCStatus)
- leapioraid_debug_dump_mf(
- mpi_request,
- sizeof(struct LeapioraidSCSITmgReq_t) / 4);
- }
- }
- switch (type) {
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
- rc = SUCCESS;
- request = leapioraid_base_get_msg_frame(ioc, smid_task);
- if (le16_to_cpu(request->DevHandle) != handle)
- break;
- pr_err(
- "%s Task abort tm failed:\n\t\t"
- "handle(0x%04x), timeout(%d),\n\t\t"
- "tr_method(0x%x), smid(%d), msix_index(%d)\n",
- ioc->name,
- handle,
- timeout,
- tr_method,
- smid_task,
- msix_task);
- rc = FAILED;
- break;
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
- rc = leapioraid_scsihost_tm_post_processing(
- ioc, handle, channel, id, lun, type, smid_task);
- break;
- case LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
- rc = SUCCESS;
- break;
- default:
- rc = FAILED;
- break;
- }
-out:
- leapioraid_scsihost_clear_tm_flag(ioc, handle);
- ioc->tm_cmds.status = LEAPIORAID_CMD_NOT_USED;
- return rc;
-}
-
-int
-leapioraid_scsihost_issue_locked_tm(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type,
- u16 smid_task, u8 timeout, u8 tr_method)
-{
- int ret;
-
- mutex_lock(&ioc->tm_cmds.mutex);
- ret = leapioraid_scsihost_issue_tm(
- ioc, handle, channel, id, lun, type,
- smid_task, timeout, tr_method);
- mutex_unlock(&ioc->tm_cmds.mutex);
- return ret;
-}
-
-static void
-leapioraid_scsihost_tm_display_info(
- struct LEAPIORAID_ADAPTER *ioc,
- struct scsi_cmnd *scmd)
-{
- struct scsi_target *starget = scmd->device->sdev_target;
- struct LEAPIORAID_TARGET *priv_target = starget->hostdata;
- struct leapioraid_sas_device *sas_device = NULL;
- unsigned long flags;
- char *device_str = NULL;
-
- if (!priv_target)
- return;
- if (ioc->warpdrive_msg)
- device_str = "WarpDrive";
- else
- device_str = "volume";
- scsi_print_command(scmd);
- if (priv_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) {
- starget_printk(
- KERN_INFO, starget, "%s handle(0x%04x), %s wwid(0x%016llx)\n",
- device_str,
- priv_target->handle, device_str,
- (unsigned long long)priv_target->sas_address);
- } else {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device =
- __leapioraid_get_sdev_from_target(ioc, priv_target);
- if (sas_device) {
- if (priv_target->flags &
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) {
- starget_printk(KERN_INFO, starget,
- "volume handle(0x%04x), volume wwid(0x%016llx)\n",
- sas_device->volume_handle,
- (unsigned long long)sas_device->volume_wwid);
- }
- starget_printk(KERN_INFO, starget,
- "%s: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
- __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address, sas_device->phy);
- leapioraid_scsihost_display_enclosure_chassis_info(NULL,
- sas_device,
- NULL, starget);
- leapioraid_sas_device_put(sas_device);
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- }
-}
-
-static int
-leapioraid_scsihost_abort(struct scsi_cmnd *scmd)
-{
- struct LEAPIORAID_ADAPTER *ioc
- = leapioraid_shost_private(scmd->device->host);
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- u16 handle;
- int r;
- struct leapioraid_scsiio_tracker *st
- = leapioraid_base_scsi_cmd_priv(scmd);
- u8 timeout = 30;
-
- sdev_printk(
- KERN_INFO, scmd->device,
- "attempting task abort! scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
- scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
- (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
- leapioraid_scsihost_tm_display_info(ioc, scmd);
- if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) {
- sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n",
- ((ioc->remove_host) ? ("shost is getting removed!")
- : ("pci device been removed!")), scmd);
- if (st && st->smid)
- leapioraid_base_free_smid(ioc, st->smid);
- scmd->result = DID_NO_CONNECT << 16;
- r = FAILED;
- goto out;
- }
- sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
- sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(0x%p)\n", scmd);
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- r = SUCCESS;
- goto out;
- }
- if (st == NULL || st->cb_idx == 0xFF) {
- sdev_printk(KERN_INFO, scmd->device,
- "No ref at driver, assuming scmd(0x%p) might have completed\n",
- scmd);
- scmd->result = DID_RESET << 16;
- r = SUCCESS;
- goto out;
- }
- if (sas_device_priv_data->sas_target->flags &
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT ||
- sas_device_priv_data->sas_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) {
- scmd->result = DID_RESET << 16;
- r = FAILED;
- goto out;
- }
- leapioraid_halt_firmware(ioc, 0);
- handle = sas_device_priv_data->sas_target->handle;
- r = leapioraid_scsihost_issue_locked_tm(
- ioc, handle,
- scmd->device->channel,
- scmd->device->id,
- scmd->device->lun,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
- st->smid, timeout, 0);
-out:
- sdev_printk(
- KERN_INFO, scmd->device,
- "task abort: %s scmd(0x%p)\n",
- ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
- return r;
-}
-
-static int
-leapioraid_scsihost_dev_reset(struct scsi_cmnd *scmd)
-{
- struct LEAPIORAID_ADAPTER *ioc
- = leapioraid_shost_private(scmd->device->host);
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct leapioraid_sas_device *sas_device = NULL;
- u16 handle;
- u8 tr_method = 0;
- u8 tr_timeout = 30;
- int r;
- struct scsi_target *starget = scmd->device->sdev_target;
- struct LEAPIORAID_TARGET *target_priv_data = starget->hostdata;
-
- sdev_printk(KERN_INFO, scmd->device,
- "attempting device reset! scmd(0x%p)\n", scmd);
- leapioraid_scsihost_tm_display_info(ioc, scmd);
- if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) {
- sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n",
- ((ioc->remove_host) ? ("shost is getting removed!")
- : ("pci device been removed!")), scmd);
- scmd->result = DID_NO_CONNECT << 16;
- r = FAILED;
- goto out;
- }
- sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
- sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(0x%p)\n", scmd);
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- r = SUCCESS;
- goto out;
- }
- handle = 0;
- if (sas_device_priv_data->sas_target->flags &
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) {
- sas_device = leapioraid_get_sdev_from_target(ioc,
- target_priv_data);
- if (sas_device)
- handle = sas_device->volume_handle;
- } else
- handle = sas_device_priv_data->sas_target->handle;
- if (!handle) {
- scmd->result = DID_RESET << 16;
- r = FAILED;
- goto out;
- }
- tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- r = leapioraid_scsihost_issue_locked_tm(ioc, handle,
- scmd->device->channel,
- scmd->device->id,
- scmd->device->lun,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET,
- 0, tr_timeout, tr_method);
-out:
- sdev_printk(KERN_INFO, scmd->device,
- "device reset: %s scmd(0x%p)\n",
- ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
- if (sas_device)
- leapioraid_sas_device_put(sas_device);
- return r;
-}
-
-static int
-leapioraid_scsihost_target_reset(struct scsi_cmnd *scmd)
-{
- struct LEAPIORAID_ADAPTER *ioc
- = leapioraid_shost_private(scmd->device->host);
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct leapioraid_sas_device *sas_device = NULL;
- u16 handle;
- u8 tr_method = 0;
- u8 tr_timeout = 30;
- int r;
- struct scsi_target *starget = scmd->device->sdev_target;
- struct LEAPIORAID_TARGET *target_priv_data = starget->hostdata;
-
- starget_printk(KERN_INFO, starget,
- "attempting target reset! scmd(0x%p)\n", scmd);
- leapioraid_scsihost_tm_display_info(ioc, scmd);
- if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) {
- sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n",
- ((ioc->remove_host) ? ("shost is getting removed!")
- : ("pci device been removed!")), scmd);
- scmd->result = DID_NO_CONNECT << 16;
- r = FAILED;
- goto out;
- }
- sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
- starget_printk(KERN_INFO, starget,
- "target been deleted! scmd(0x%p)\n", scmd);
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- r = SUCCESS;
- goto out;
- }
- handle = 0;
- if (sas_device_priv_data->sas_target->flags &
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) {
- sas_device = leapioraid_get_sdev_from_target(ioc,
- target_priv_data);
- if (sas_device)
- handle = sas_device->volume_handle;
- } else
- handle = sas_device_priv_data->sas_target->handle;
- if (!handle) {
- scmd->result = DID_RESET << 16;
- r = FAILED;
- goto out;
- }
- tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- r = leapioraid_scsihost_issue_locked_tm(ioc, handle,
- scmd->device->channel,
- scmd->device->id, 0,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
- 0, tr_timeout, tr_method);
-out:
- starget_printk(KERN_INFO, starget,
- "target reset: %s scmd(0x%p)\n",
- ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
- if (sas_device)
- leapioraid_sas_device_put(sas_device);
- return r;
-}
-
-static int
-leapioraid_scsihost_host_reset(struct scsi_cmnd *scmd)
-{
- struct LEAPIORAID_ADAPTER *ioc
- = leapioraid_shost_private(scmd->device->host);
- int r, retval;
-
- pr_info("%s attempting host reset! scmd(0x%p)\n",
- ioc->name, scmd);
- scsi_print_command(scmd);
- if (ioc->is_driver_loading || ioc->remove_host) {
- pr_info("%s Blocking the host reset\n",
- ioc->name);
- r = FAILED;
- goto out;
- }
- retval = leapioraid_base_hard_reset_handler(
- ioc, FORCE_BIG_HAMMER);
- r = (retval < 0) ? FAILED : SUCCESS;
-out:
- pr_info("%s host reset: %s scmd(0x%p)\n",
- ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"),
- scmd);
- return r;
-}
-
-static void
-leapioraid_scsihost_fw_event_add(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- unsigned long flags;
-
- if (ioc->firmware_event_thread == NULL)
- return;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- leapioraid_fw_event_work_get(fw_event);
- INIT_LIST_HEAD(&fw_event->list);
- list_add_tail(&fw_event->list, &ioc->fw_event_list);
- INIT_WORK(&fw_event->work, leapioraid_firmware_event_work);
- leapioraid_fw_event_work_get(fw_event);
- queue_work(ioc->firmware_event_thread, &fw_event->work);
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
-}
-
-static void
-leapioraid_scsihost_fw_event_del_from_list(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- if (!list_empty(&fw_event->list)) {
- list_del_init(&fw_event->list);
- leapioraid_fw_event_work_put(fw_event);
- }
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
-}
-
-static void
-leapioraid_scsihost_fw_event_requeue(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event, unsigned long delay)
-{
- unsigned long flags;
-
- if (ioc->firmware_event_thread == NULL)
- return;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- leapioraid_fw_event_work_get(fw_event);
- list_add_tail(&fw_event->list, &ioc->fw_event_list);
- if (!fw_event->delayed_work_active) {
- fw_event->delayed_work_active = 1;
- INIT_DELAYED_WORK(&fw_event->delayed_work,
- leapioraid_firmware_event_work_delayed);
- }
- queue_delayed_work(ioc->firmware_event_thread, &fw_event->delayed_work,
- msecs_to_jiffies(delay));
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
-}
-
-static void
-leapioraid_scsihost_error_recovery_delete_devices(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_fw_event_work *fw_event;
-
- fw_event = leapioraid_alloc_fw_event_work(0);
- if (!fw_event)
- return;
- fw_event->event = LEAPIORAID_REMOVE_UNRESPONDING_DEVICES;
- fw_event->ioc = ioc;
- leapioraid_scsihost_fw_event_add(ioc, fw_event);
- leapioraid_fw_event_work_put(fw_event);
-}
-
-void
-leapioraid_port_enable_complete(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_fw_event_work *fw_event;
-
- fw_event = leapioraid_alloc_fw_event_work(0);
- if (!fw_event)
- return;
- fw_event->event = LEAPIORAID_PORT_ENABLE_COMPLETE;
- fw_event->ioc = ioc;
- leapioraid_scsihost_fw_event_add(ioc, fw_event);
- leapioraid_fw_event_work_put(fw_event);
-}
-
-static struct leapioraid_fw_event_work *dequeue_next_fw_event(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- unsigned long flags;
- struct leapioraid_fw_event_work *fw_event = NULL;
-
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- if (!list_empty(&ioc->fw_event_list)) {
- fw_event = list_first_entry(&ioc->fw_event_list,
- struct leapioraid_fw_event_work, list);
- list_del_init(&fw_event->list);
- leapioraid_fw_event_work_put(fw_event);
- }
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
- return fw_event;
-}
-
-static void
-leapioraid_scsihost_fw_event_cleanup_queue(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_fw_event_work *fw_event;
- bool rc = false;
-
- if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
- !ioc->firmware_event_thread || in_interrupt())
- return;
-
- ioc->fw_events_cleanup = 1;
- if (ioc->shost_recovery && ioc->current_event)
- ioc->current_event->ignore = 1;
- while ((fw_event = dequeue_next_fw_event(ioc)) ||
- (fw_event = ioc->current_event)) {
- if (fw_event == ioc->current_event &&
- ioc->current_event->event !=
- LEAPIORAID_REMOVE_UNRESPONDING_DEVICES) {
- ioc->current_event = NULL;
- continue;
- }
- if (fw_event->event == LEAPIORAID_PORT_ENABLE_COMPLETE) {
- ioc->port_enable_cmds.status |= LEAPIORAID_CMD_RESET;
- ioc->start_scan = 0;
- }
- if (fw_event->delayed_work_active)
- rc = cancel_delayed_work_sync(&fw_event->delayed_work);
- else
- rc = cancel_work_sync(&fw_event->work);
- if (rc)
- leapioraid_fw_event_work_put(fw_event);
- }
- ioc->fw_events_cleanup = 0;
-}
-
-static void
-leapioraid_scsihost_internal_device_block(
- struct scsi_device *sdev,
- struct LEAPIORAID_DEVICE
- *sas_device_priv_data)
-{
- int r = 0;
-
- sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle);
- sas_device_priv_data->block = 1;
-
- r = scsi_internal_device_block_nowait(sdev);
- if (r == -EINVAL)
- sdev_printk(KERN_WARNING, sdev,
- "device_block failed with return(%d) for handle(0x%04x)\n",
- r, sas_device_priv_data->sas_target->handle);
-}
-
-static void
-leapioraid_scsihost_internal_device_unblock(struct scsi_device *sdev,
- struct LEAPIORAID_DEVICE
- *sas_device_priv_data)
-{
- int r = 0;
-
- sdev_printk(KERN_WARNING, sdev,
- "device_unblock and setting to running, handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle);
- sas_device_priv_data->block = 0;
-
- r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
- if (r == -EINVAL) {
- sdev_printk(KERN_WARNING, sdev,
- "device_unblock failed with return(%d)\n\t\t"
- "for handle(0x%04x) performing a block followed by an unblock\n",
- r,
- sas_device_priv_data->sas_target->handle);
- sas_device_priv_data->block = 1;
- r = scsi_internal_device_block_nowait(sdev);
- if (r)
- sdev_printk(KERN_WARNING, sdev,
- "retried device_block failed with return(%d)\n\t\t"
- "for handle(0x%04x)\n",
- r,
- sas_device_priv_data->sas_target->handle);
- sas_device_priv_data->block = 0;
-
- r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
- if (r)
- sdev_printk(KERN_WARNING, sdev,
- "retried device_unblock failed\n\t\t"
- "with return(%d) for handle(0x%04x)\n",
- r,
- sas_device_priv_data->sas_target->handle);
- }
-}
-
-static void
-leapioraid_scsihost_ublock_io_all_device(
- struct LEAPIORAID_ADAPTER *ioc, u8 no_turs)
-{
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct LEAPIORAID_TARGET *sas_target;
- enum device_responsive_state rc;
- struct scsi_device *sdev;
- struct leapioraid_sas_device *sas_device = NULL;
- int count;
- u8 tr_timeout = 30;
- u8 tr_method = 0;
-
- shost_for_each_device(sdev, ioc->shost) {
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- continue;
- sas_target = sas_device_priv_data->sas_target;
- if (!sas_target || sas_target->deleted)
- continue;
- if (!sas_device_priv_data->block)
- continue;
- count = 0;
- if (no_turs) {
- sdev_printk(KERN_WARNING, sdev,
- "device_unblocked, handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle);
- leapioraid_scsihost_internal_device_unblock(sdev,
- sas_device_priv_data);
- continue;
- }
- do {
- rc = leapioraid_scsihost_wait_for_device_to_become_ready(
- ioc,
- sas_target->handle,
- 0,
- (sas_target->flags
- & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT),
- sdev->lun,
- tr_timeout,
- tr_method);
- if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT
- || rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
- ssleep(1);
- } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
- rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
- && count++ < 144);
- sas_device_priv_data->block = 0;
- if (rc != DEVICE_READY)
- sas_device_priv_data->deleted = 1;
- leapioraid_scsihost_internal_device_unblock(
- sdev, sas_device_priv_data);
- if (rc != DEVICE_READY) {
- sdev_printk(KERN_WARNING, sdev,
- "%s: device_offlined, handle(0x%04x)\n",
- __func__,
- sas_device_priv_data->sas_target->handle);
- scsi_device_set_state(sdev, SDEV_OFFLINE);
- sas_device = leapioraid_get_sdev_by_addr(ioc,
- sas_device_priv_data->sas_target->sas_address,
- sas_device_priv_data->sas_target->port);
- if (sas_device) {
- leapioraid_scsihost_display_enclosure_chassis_info(
- NULL,
- sas_device,
- sdev,
- NULL);
- leapioraid_sas_device_put(sas_device);
- }
- } else
- sdev_printk(KERN_WARNING, sdev,
- "device_unblocked, handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle);
- }
-}
-
-static void
-leapioraid_scsihost_ublock_io_device_wait(
- struct LEAPIORAID_ADAPTER *ioc, u64 sas_address,
- struct leapioraid_hba_port *port)
-{
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct LEAPIORAID_TARGET *sas_target;
- enum device_responsive_state rc;
- struct scsi_device *sdev;
- int count, host_reset_completion_count;
- struct leapioraid_sas_device *sas_device;
- u8 tr_timeout = 30;
- u8 tr_method = 0;
-
- shost_for_each_device(sdev, ioc->shost) {
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- continue;
- sas_target = sas_device_priv_data->sas_target;
- if (!sas_target)
- continue;
- if (sas_target->sas_address != sas_address ||
- sas_target->port != port)
- continue;
- if (sdev->sdev_state == SDEV_OFFLINE) {
- sas_device_priv_data->block = 1;
- sas_device_priv_data->deleted = 0;
- scsi_device_set_state(sdev, SDEV_RUNNING);
- scsi_internal_device_block_nowait(sdev);
- }
- }
- shost_for_each_device(sdev, ioc->shost) {
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- continue;
- sas_target = sas_device_priv_data->sas_target;
- if (!sas_target)
- continue;
- if (sas_target->sas_address != sas_address ||
- sas_target->port != port)
- continue;
- if (!sas_device_priv_data->block)
- continue;
- count = 0;
- do {
- host_reset_completion_count = 0;
- rc = leapioraid_scsihost_wait_for_device_to_become_ready(
- ioc,
- sas_target->handle,
- 0,
- (sas_target->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT),
- sdev->lun,
- tr_timeout,
- tr_method);
- if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT
- || rc == DEVICE_STOP_UNIT
- || rc == DEVICE_RETRY_UA) {
- do {
- msleep(500);
- host_reset_completion_count++;
- } while (rc == DEVICE_RETRY &&
- ioc->shost_recovery);
- if (host_reset_completion_count > 1) {
- rc = leapioraid_scsihost_wait_for_device_to_become_ready(
- ioc, sas_target->handle, 0,
- (sas_target->flags
- & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT),
- sdev->lun, tr_timeout, tr_method);
- if (rc == DEVICE_RETRY
- || rc == DEVICE_START_UNIT
- || rc == DEVICE_STOP_UNIT
- || rc == DEVICE_RETRY_UA)
- msleep(500);
- }
- continue;
- }
- } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
- rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
- && count++ <= 144);
- sas_device_priv_data->block = 0;
- if (rc != DEVICE_READY)
- sas_device_priv_data->deleted = 1;
-
- scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
-
- if (rc != DEVICE_READY) {
- sdev_printk(KERN_WARNING, sdev,
- "%s: device_offlined, handle(0x%04x)\n",
- __func__,
- sas_device_priv_data->sas_target->handle);
- sas_device =
- leapioraid_get_sdev_by_handle(ioc,
- sas_device_priv_data->sas_target->handle);
- if (sas_device) {
- leapioraid_scsihost_display_enclosure_chassis_info(NULL,
- sas_device,
- sdev,
- NULL);
- leapioraid_sas_device_put(sas_device);
- }
- scsi_device_set_state(sdev, SDEV_OFFLINE);
- } else {
- sdev_printk(KERN_WARNING, sdev,
- "device_unblocked, handle(0x%04x)\n",
- sas_device_priv_data->sas_target->handle);
- }
- }
-}
-
-static void
-leapioraid_scsihost_ublock_io_device(
- struct LEAPIORAID_ADAPTER *ioc, u64 sas_address,
- struct leapioraid_hba_port *port)
-{
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, ioc->shost) {
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
- continue;
- if (sas_device_priv_data->sas_target->sas_address
- != sas_address ||
- sas_device_priv_data->sas_target->port != port)
- continue;
- if (sas_device_priv_data->block) {
- leapioraid_scsihost_internal_device_unblock(sdev,
- sas_device_priv_data);
- }
- scsi_device_set_state(sdev, SDEV_OFFLINE);
- }
-}
-
-static void leapioraid_scsihost_block_io_all_device(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, ioc->shost) {
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- continue;
- if (sas_device_priv_data->block)
- continue;
- if (sas_device_priv_data->ignore_delay_remove) {
- sdev_printk(KERN_INFO, sdev,
- "%s skip device_block for SES handle(0x%04x)\n",
- __func__,
- sas_device_priv_data->sas_target->handle);
- continue;
- }
- leapioraid_scsihost_internal_device_block(
- sdev, sas_device_priv_data);
- }
-}
-
-static void
-leapioraid_scsihost_block_io_device(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct scsi_device *sdev;
- struct leapioraid_sas_device *sas_device;
-
- sas_device = leapioraid_get_sdev_by_handle(ioc, handle);
- shost_for_each_device(sdev, ioc->shost) {
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data)
- continue;
- if (sas_device_priv_data->sas_target->handle != handle)
- continue;
- if (sas_device_priv_data->block)
- continue;
- if (sas_device && sas_device->pend_sas_rphy_add)
- continue;
- if (sas_device_priv_data->ignore_delay_remove) {
- sdev_printk(KERN_INFO, sdev,
- "%s skip device_block for SES handle(0x%04x)\n",
- __func__,
- sas_device_priv_data->sas_target->handle);
- continue;
- }
- leapioraid_scsihost_internal_device_block(
- sdev, sas_device_priv_data);
- }
- if (sas_device)
- leapioraid_sas_device_put(sas_device);
-}
-
-static void
-leapioraid_scsihost_block_io_to_children_attached_to_ex(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_sas_node *sas_expander)
-{
- struct leapioraid_sas_port *leapioraid_port;
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_raid_sas_node *expander_sibling;
- unsigned long flags;
-
- if (!sas_expander)
- return;
- list_for_each_entry(leapioraid_port,
- &sas_expander->sas_port_list, port_list) {
- if (leapioraid_port->remote_identify.device_type ==
- SAS_END_DEVICE) {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_addr(ioc,
- leapioraid_port->remote_identify.sas_address,
- leapioraid_port->hba_port);
- if (sas_device) {
- set_bit(sas_device->handle,
- ioc->blocking_handles);
- leapioraid_sas_device_put(sas_device);
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- }
- }
- list_for_each_entry(leapioraid_port,
- &sas_expander->sas_port_list, port_list) {
- if (leapioraid_port->remote_identify.device_type ==
- SAS_EDGE_EXPANDER_DEVICE ||
- leapioraid_port->remote_identify.device_type ==
- SAS_FANOUT_EXPANDER_DEVICE) {
- expander_sibling =
- leapioraid_scsihost_expander_find_by_sas_address
- (ioc, leapioraid_port->remote_identify.sas_address,
- leapioraid_port->hba_port);
- leapioraid_scsihost_block_io_to_children_attached_to_ex(
- ioc, expander_sibling);
- }
- }
-}
-
-static void
-leapioraid_scsihost_block_io_to_children_attached_directly(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataSasTopoChangeList_t *event_data)
-{
- int i;
- u16 handle;
- u16 reason_code;
-
- for (i = 0; i < event_data->NumEntries; i++) {
- handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
- if (!handle)
- continue;
- reason_code = event_data->PHY[i].PhyStatus &
- LEAPIORAID_EVENT_SAS_TOPO_RC_MASK;
- if (reason_code ==
- LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
- leapioraid_scsihost_block_io_device(ioc, handle);
- }
-}
-
-static void
-leapioraid_scsihost_tm_tr_send(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct LeapioraidSCSITmgReq_t *mpi_request;
- u16 smid;
- struct leapioraid_sas_device *sas_device = NULL;
- struct LEAPIORAID_TARGET *sas_target_priv_data = NULL;
- u64 sas_address = 0;
- unsigned long flags;
- struct leapioraid_tr_list *delayed_tr;
- u32 ioc_state;
- struct leapioraid_hba_port *port = NULL;
- u8 tr_method = 0;
-
- if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(
- "%s %s: host in pci error recovery: handle(0x%04x)\n",
- __func__, ioc->name, handle));
- return;
- }
- ioc_state = leapioraid_base_get_iocstate(ioc, 1);
- if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(
- "%s %s: host is not operational: handle(0x%04x)\n",
- __func__, ioc->name, handle));
- return;
- }
- if (test_bit(handle, ioc->pd_handles))
- return;
- clear_bit(handle, ioc->pend_os_device_add);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_handle(ioc, handle);
- if (sas_device && sas_device->starget && sas_device->starget->hostdata) {
- sas_target_priv_data = sas_device->starget->hostdata;
- sas_target_priv_data->deleted = 1;
- sas_address = sas_device->sas_address;
- port = sas_device->port;
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (!sas_device)
- tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
-
- if (sas_target_priv_data) {
- dewtprintk(ioc, pr_err(
- "%s %s: setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__, handle,
- (unsigned long long)sas_address));
- if (sas_device) {
- dewtprintk(ioc,
- leapioraid_scsihost_display_enclosure_chassis_info(
- ioc,
- sas_device,
- NULL,
- NULL));
- }
- leapioraid_scsihost_ublock_io_device(ioc, sas_address, port);
- sas_target_priv_data->handle =
- LEAPIORAID_INVALID_DEVICE_HANDLE;
- }
- smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
- if (!smid) {
- delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
- if (!delayed_tr)
- goto out;
- INIT_LIST_HEAD(&delayed_tr->list);
- delayed_tr->handle = handle;
- list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_err(
- "%s DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
- goto out;
- }
- dewtprintk(ioc, pr_info(
- "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle,
- smid, ioc->tm_tr_cb_idx));
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT;
- mpi_request->DevHandle = cpu_to_le16(handle);
- mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpi_request->MsgFlags = tr_method;
- set_bit(handle, ioc->device_remove_in_progress);
- ioc->put_smid_hi_priority(ioc, smid, 0);
-out:
- if (sas_device)
- leapioraid_sas_device_put(sas_device);
-}
-
-static u8
-leapioraid_scsihost_tm_tr_complete(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply)
-{
- u16 handle;
- struct LeapioraidSCSITmgReq_t *mpi_request_tm;
- struct LeapioraidSCSITmgRep_t *mpi_reply =
- leapioraid_base_get_reply_virt_addr(ioc, reply);
- struct LeapioraidSasIoUnitControlReq_t *mpi_request;
- u16 smid_sas_ctrl;
- u32 ioc_state;
- struct leapioraid_sc_list *delayed_sc;
-
- if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(
- "%s %s: host in pci error recovery\n", __func__,
- ioc->name));
- return 1;
- }
- ioc_state = leapioraid_base_get_iocstate(ioc, 1);
- if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(
- "%s %s: host is not operational\n", __func__, ioc->name));
- return 1;
- }
- if (unlikely(!mpi_reply)) {
- pr_err(
- "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- return 1;
- }
- mpi_request_tm = leapioraid_base_get_msg_frame(ioc, smid);
- handle = le16_to_cpu(mpi_request_tm->DevHandle);
- if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(
- "%s spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
- return 0;
- }
- dewtprintk(ioc, pr_err(
- "%s tr_complete: handle(0x%04x), (open) smid(%d),\n\t\t"
- "ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
- ioc->name,
- handle,
- smid,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
- smid_sas_ctrl =
- leapioraid_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
- if (!smid_sas_ctrl) {
- delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
- if (!delayed_sc)
- return leapioraid_scsihost_check_for_pending_tm(ioc, smid);
- INIT_LIST_HEAD(&delayed_sc->list);
- delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
- list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
- dewtprintk(ioc, pr_err(
- "%s DELAYED:sc:handle(0x%04x), (open)\n",
- ioc->name, handle));
- return leapioraid_scsihost_check_for_pending_tm(ioc, smid);
- }
- dewtprintk(ioc, pr_info(
- "%s sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle,
- smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid_sas_ctrl);
- memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL;
- mpi_request->Operation = LEAPIORAID_CTRL_OP_REMOVE_DEVICE;
- mpi_request->DevHandle = mpi_request_tm->DevHandle;
- ioc->put_smid_default(ioc, smid_sas_ctrl);
- return leapioraid_scsihost_check_for_pending_tm(ioc, smid);
-}
-
-inline bool
-leapioraid_scsihost_allow_scmd_to_device(
- struct LEAPIORAID_ADAPTER *ioc,
- struct scsi_cmnd *scmd)
-{
- if (ioc->pci_error_recovery)
- return false;
- if (ioc->adapter_over_temp)
- return false;
- if (ioc->remove_host) {
- if (leapioraid_base_pci_device_is_unplugged(ioc))
- return false;
- switch (scmd->cmnd[0]) {
- case SYNCHRONIZE_CACHE:
- case START_STOP:
- return true;
- default:
- return false;
- }
- }
- return true;
-}
-
-static u8
-leapioraid_scsihost_sas_control_complete(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply)
-{
- struct LeapioraidDefaultRep_t *mpi_reply =
- leapioraid_base_get_reply_virt_addr(ioc, reply);
- u16 dev_handle;
-
- if (likely(mpi_reply)) {
- dev_handle
- = ((struct LeapioraidIoUnitControlRep_t *)mpi_reply)->DevHandle;
- dewtprintk(ioc, pr_err(
- "%s sc_complete:handle(0x%04x), (open) smid(%d),\n\t\t"
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name,
- le16_to_cpu(dev_handle),
- smid,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
- if (le16_to_cpu(mpi_reply->IOCStatus) ==
- LEAPIORAID_IOCSTATUS_SUCCESS) {
- clear_bit(le16_to_cpu(dev_handle),
- ioc->device_remove_in_progress);
- ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0;
- } else if (ioc->tm_tr_retry[le16_to_cpu(dev_handle)] < 3) {
- dewtprintk(ioc, pr_err(
- "%s re-initiating tm_tr_send:handle(0x%04x)\n",
- ioc->name,
- le16_to_cpu(dev_handle)));
- ioc->tm_tr_retry[le16_to_cpu(dev_handle)]++;
- leapioraid_scsihost_tm_tr_send(ioc, le16_to_cpu(dev_handle));
- } else {
- dewtprintk(ioc, pr_err(
- "%s Exiting out of tm_tr_send retries:handle(0x%04x)\n",
- ioc->name,
- le16_to_cpu(dev_handle)));
- ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0;
- clear_bit(le16_to_cpu(dev_handle),
- ioc->device_remove_in_progress);
- }
- } else {
- pr_err(
- "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- }
- return leapioraid_check_for_pending_internal_cmds(ioc, smid);
-}
-
-static void
-leapioraid_scsihost_tm_tr_volume_send(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct LeapioraidSCSITmgReq_t *mpi_request;
- u16 smid;
- struct leapioraid_tr_list *delayed_tr;
-
- if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(
- "%s %s: host reset in progress!\n", __func__, ioc->name));
- return;
- }
- smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
- if (!smid) {
- delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
- if (!delayed_tr)
- return;
- INIT_LIST_HEAD(&delayed_tr->list);
- delayed_tr->handle = handle;
- list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
- dewtprintk(ioc, pr_err(
- "%s DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
- return;
- }
- dewtprintk(ioc, pr_info(
- "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle,
- smid, ioc->tm_tr_volume_cb_idx));
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT;
- mpi_request->DevHandle = cpu_to_le16(handle);
- mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- ioc->put_smid_hi_priority(ioc, smid, 0);
-}
-
-static u8
-leapioraid_scsihost_tm_volume_tr_complete(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply)
-{
- u16 handle;
- struct LeapioraidSCSITmgReq_t *mpi_request_tm;
- struct LeapioraidSCSITmgRep_t *mpi_reply =
- leapioraid_base_get_reply_virt_addr(ioc, reply);
-
- if (ioc->shost_recovery || ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(
- "%s %s: host reset in progress!\n", __func__, ioc->name));
- return 1;
- }
- if (unlikely(!mpi_reply)) {
- pr_err(
- "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- return 1;
- }
- mpi_request_tm = leapioraid_base_get_msg_frame(ioc, smid);
- handle = le16_to_cpu(mpi_request_tm->DevHandle);
- if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(
- "%s spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
- return 0;
- }
- dewtprintk(ioc, pr_err(
- "%s tr_complete:handle(0x%04x), (open) smid(%d),\n\t\t"
- "ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
- ioc->name,
- handle,
- smid,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
- return leapioraid_scsihost_check_for_pending_tm(ioc, smid);
-}
-
-static void
-leapioraid_scsihost_tm_internal_tr_send(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_tr_list *delayed_tr;
- struct LeapioraidSCSITmgReq_t *mpi_request;
- u16 smid;
- u8 tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
-
- smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_internal_cb_idx);
- if (!smid) {
- delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
- if (!delayed_tr)
- return;
- INIT_LIST_HEAD(&delayed_tr->list);
- delayed_tr->handle = handle;
- list_add_tail(&delayed_tr->list,
- &ioc->delayed_internal_tm_list);
- dewtprintk(ioc,
- pr_err(
- "%s DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
- return;
- }
- dewtprintk(ioc, pr_info(
- "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle,
- smid, ioc->tm_tr_internal_cb_idx));
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT;
- mpi_request->DevHandle = cpu_to_le16(handle);
- mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpi_request->MsgFlags = tr_method;
- ioc->put_smid_hi_priority(ioc, smid, 0);
-}
-
-static u8
-leapioraid_scsihost_tm_internal_tr_complete(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply)
-{
- struct LeapioraidSCSITmgRep_t *mpi_reply =
- leapioraid_base_get_reply_virt_addr(ioc, reply);
-
- if (likely(mpi_reply)) {
- dewtprintk(ioc, pr_err(
- "%s tr_complete:handle(0x%04x),\n\t\t"
- "(open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name,
- le16_to_cpu(mpi_reply->DevHandle),
- smid,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
- } else {
- pr_err("%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- return 1;
- }
- return leapioraid_scsihost_check_for_pending_tm(ioc, smid);
-}
-
-static void
-leapioraid_scsihost_issue_delayed_event_ack(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- U16 event, U32 event_context)
-{
- struct LeapioraidEventAckReq_t *ack_request;
- int i = smid - ioc->internal_smid;
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- dewtprintk(ioc, pr_info(
- "%s EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
- ioc->name, le16_to_cpu(event),
- smid, ioc->base_cb_idx));
- ack_request = leapioraid_base_get_msg_frame(ioc, smid);
- memset(ack_request, 0, sizeof(struct LeapioraidEventAckReq_t));
- ack_request->Function = LEAPIORAID_FUNC_EVENT_ACK;
- ack_request->Event = event;
- ack_request->EventContext = event_context;
- ack_request->VF_ID = 0;
- ack_request->VP_ID = 0;
- ioc->put_smid_default(ioc, smid);
-}
-
-static void
-leapioraid_scsihost_issue_delayed_sas_io_unit_ctrl(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 smid, u16 handle)
-{
- struct LeapioraidSasIoUnitControlReq_t *mpi_request;
- u32 ioc_state;
- int i = smid - ioc->internal_smid;
- unsigned long flags;
-
- if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(
- "%s %s: host has been removed\n", __func__, ioc->name));
- return;
- } else if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(
- "%s %s: host in pci error recovery\n", __func__,
- ioc->name));
- return;
- }
- ioc_state = leapioraid_base_get_iocstate(ioc, 1);
- if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(
- "%s %s: host is not operational\n", __func__, ioc->name));
- return;
- }
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- dewtprintk(ioc, pr_info(
- "%s sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle,
- smid, ioc->tm_sas_control_cb_idx));
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL;
- mpi_request->Operation = 0x0D;
- mpi_request->DevHandle = cpu_to_le16(handle);
- ioc->put_smid_default(ioc, smid);
-}
-
-u8
-leapioraid_check_for_pending_internal_cmds(struct LEAPIORAID_ADAPTER *ioc,
- u16 smid)
-{
- struct leapioraid_sc_list *delayed_sc;
- struct leapioraid_event_ack_list *delayed_event_ack;
-
- if (!list_empty(&ioc->delayed_event_ack_list)) {
- delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
- struct leapioraid_event_ack_list, list);
- leapioraid_scsihost_issue_delayed_event_ack(ioc, smid,
- delayed_event_ack->Event,
- delayed_event_ack->EventContext);
- list_del(&delayed_event_ack->list);
- kfree(delayed_event_ack);
- return 0;
- }
- if (!list_empty(&ioc->delayed_sc_list)) {
- delayed_sc = list_entry(ioc->delayed_sc_list.next,
- struct leapioraid_sc_list, list);
- leapioraid_scsihost_issue_delayed_sas_io_unit_ctrl(ioc, smid,
- delayed_sc->handle);
- list_del(&delayed_sc->list);
- kfree(delayed_sc);
- return 0;
- }
- return 1;
-}
-
-static u8
-leapioraid_scsihost_check_for_pending_tm(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid)
-{
- struct leapioraid_tr_list *delayed_tr;
-
- if (!list_empty(&ioc->delayed_tr_volume_list)) {
- delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
- struct leapioraid_tr_list, list);
- leapioraid_base_free_smid(ioc, smid);
- leapioraid_scsihost_tm_tr_volume_send(ioc, delayed_tr->handle);
- list_del(&delayed_tr->list);
- kfree(delayed_tr);
- return 0;
- }
- if (!list_empty(&ioc->delayed_tr_list)) {
- delayed_tr = list_entry(ioc->delayed_tr_list.next,
- struct leapioraid_tr_list, list);
- leapioraid_base_free_smid(ioc, smid);
- leapioraid_scsihost_tm_tr_send(ioc, delayed_tr->handle);
- list_del(&delayed_tr->list);
- kfree(delayed_tr);
- return 0;
- }
- if (!list_empty(&ioc->delayed_internal_tm_list)) {
- delayed_tr = list_entry(ioc->delayed_internal_tm_list.next,
- struct leapioraid_tr_list, list);
- leapioraid_base_free_smid(ioc, smid);
- leapioraid_scsihost_tm_internal_tr_send(
- ioc, delayed_tr->handle);
- list_del(&delayed_tr->list);
- kfree(delayed_tr);
- return 0;
- }
- return 1;
-}
-
-static void
-leapioraid_scsihost_check_topo_delete_events(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataSasTopoChangeList_t *event_data)
-{
- struct leapioraid_fw_event_work *fw_event;
- struct LeapioraidEventDataSasTopoChangeList_t *local_event_data;
- u16 expander_handle;
- struct leapioraid_raid_sas_node *sas_expander;
- unsigned long flags;
- int i, reason_code;
- u16 handle;
-
- for (i = 0; i < event_data->NumEntries; i++) {
- handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
- if (!handle)
- continue;
- reason_code = event_data->PHY[i].PhyStatus &
- LEAPIORAID_EVENT_SAS_TOPO_RC_MASK;
- if (reason_code ==
- LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
- leapioraid_scsihost_tm_tr_send(ioc, handle);
- }
- expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
- if (expander_handle < ioc->sas_hba.num_phys) {
- leapioraid_scsihost_block_io_to_children_attached_directly(
- ioc, event_data);
- return;
- }
- if (event_data->ExpStatus ==
- LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_expander = leapioraid_scsihost_expander_find_by_handle(
- ioc, expander_handle);
- leapioraid_scsihost_block_io_to_children_attached_to_ex(
- ioc, sas_expander);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- do {
- handle = find_first_bit(ioc->blocking_handles,
- ioc->facts.MaxDevHandle);
- if (handle < ioc->facts.MaxDevHandle)
- leapioraid_scsihost_block_io_device(ioc, handle);
- } while (test_and_clear_bit(handle, ioc->blocking_handles));
- } else if (event_data->ExpStatus ==
- LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING)
- leapioraid_scsihost_block_io_to_children_attached_directly(
- ioc, event_data);
- if (event_data->ExpStatus != LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
- return;
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
- if (fw_event->event != LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
- fw_event->ignore)
- continue;
- local_event_data = fw_event->event_data;
- if (local_event_data->ExpStatus ==
- LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED ||
- local_event_data->ExpStatus ==
- LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING) {
- if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
- expander_handle) {
- dewtprintk(ioc, pr_err(
- "%s setting ignoring flag\n",
- ioc->name));
- fw_event->ignore = 1;
- }
- }
- }
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
-}
-
-static void
-leapioraid_scsihost_set_volume_delete_flag(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_raid_device *raid_device;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_raid_device_find_by_handle(
- ioc, handle);
- if (raid_device && raid_device->starget &&
- raid_device->starget->hostdata) {
- sas_target_priv_data = raid_device->starget->hostdata;
- sas_target_priv_data->deleted = 1;
- dewtprintk(ioc, pr_err(
- "%s setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, handle,
- (unsigned long long)raid_device->wwid));
- }
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
-}
-
-static void
-leapioraid_scsihost_set_volume_handle_for_tr(
- u16 handle, u16 *a, u16 *b)
-{
- if (!handle || handle == *a || handle == *b)
- return;
- if (!*a)
- *a = handle;
- else if (!*b)
- *b = handle;
-}
-
-static void
-leapioraid_scsihost_check_ir_config_unhide_events(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataIrCfgChangeList_t *event_data)
-{
- struct LeapioraidEventIrCfgEle_t *element;
- int i;
- u16 handle, volume_handle, a, b;
- struct leapioraid_tr_list *delayed_tr;
-
- a = 0;
- b = 0;
- element =
- (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0];
- for (i = 0; i < event_data->NumElements; i++, element++) {
- if (le32_to_cpu(event_data->Flags) &
- LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
- continue;
- if (element->ReasonCode ==
- LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
- element->ReasonCode == LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED) {
- volume_handle = le16_to_cpu(element->VolDevHandle);
- leapioraid_scsihost_set_volume_delete_flag(ioc, volume_handle);
- leapioraid_scsihost_set_volume_handle_for_tr(
- volume_handle, &a, &b);
- }
- }
- element =
- (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0];
- for (i = 0; i < event_data->NumElements; i++, element++) {
- if (le32_to_cpu(event_data->Flags) &
- LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
- continue;
- if (element->ReasonCode == LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE) {
- volume_handle = le16_to_cpu(element->VolDevHandle);
- leapioraid_scsihost_set_volume_handle_for_tr(
- volume_handle, &a, &b);
- }
- }
- if (a)
- leapioraid_scsihost_tm_tr_volume_send(ioc, a);
- if (b)
- leapioraid_scsihost_tm_tr_volume_send(ioc, b);
- element =
- (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0];
- for (i = 0; i < event_data->NumElements; i++, element++) {
- if (element->ReasonCode != LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE)
- continue;
- handle = le16_to_cpu(element->PhysDiskDevHandle);
- volume_handle = le16_to_cpu(element->VolDevHandle);
- clear_bit(handle, ioc->pd_handles);
- if (!volume_handle)
- leapioraid_scsihost_tm_tr_send(ioc, handle);
- else if (volume_handle == a || volume_handle == b) {
- delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
- BUG_ON(!delayed_tr);
- INIT_LIST_HEAD(&delayed_tr->list);
- delayed_tr->handle = handle;
- list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_err(
- "%s DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
- } else
- leapioraid_scsihost_tm_tr_send(ioc, handle);
- }
-}
-
-static void
-leapioraid_scsihost_check_volume_delete_events(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataIrVol_t *event_data)
-{
- u32 state;
-
- if (event_data->ReasonCode != LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED)
- return;
- state = le32_to_cpu(event_data->NewValue);
- if (state == LEAPIORAID_RAID_VOL_STATE_MISSING || state ==
- LEAPIORAID_RAID_VOL_STATE_FAILED)
- leapioraid_scsihost_set_volume_delete_flag(
- ioc, le16_to_cpu(event_data->VolDevHandle));
-}
-
-static int
-leapioraid_scsihost_set_satl_pending(
- struct scsi_cmnd *scmd, bool pending)
-{
- struct LEAPIORAID_DEVICE *priv = scmd->device->hostdata;
-
- if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
- return 0;
- if (pending)
- return test_and_set_bit(LEAPIORAID_CMND_PENDING_BIT,
- &priv->ata_command_pending);
- clear_bit(LEAPIORAID_CMND_PENDING_BIT, &priv->ata_command_pending);
- return 0;
-}
-
-void
-leapioraid_scsihost_flush_running_cmds(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct scsi_cmnd *scmd;
- struct leapioraid_scsiio_tracker *st;
- u16 smid;
- u16 count = 0;
-
- for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
- scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid);
- if (!scmd)
- continue;
- count++;
- st = leapioraid_base_scsi_cmd_priv(scmd);
- if (st && st->smid == 0)
- continue;
- leapioraid_scsihost_set_satl_pending(scmd, false);
- leapioraid_base_get_msg_frame(ioc, smid);
- scsi_dma_unmap(scmd);
-
- leapioraid_base_clear_st(ioc, st);
- if ((!leapioraid_base_pci_device_is_available(ioc)) ||
- (ioc->ioc_reset_status != 0)
- || ioc->adapter_over_temp || ioc->remove_host)
- scmd->result = DID_NO_CONNECT << 16;
- else
- scmd->result = DID_RESET << 16;
- scsi_done(scmd);
- }
- dtmprintk(ioc, pr_info("%s completing %d cmds\n",
- ioc->name, count));
-}
-
-static inline u8 scsih_is_io_belongs_to_RT_class(
- struct scsi_cmnd *scmd)
-{
- struct request *rq = scsi_cmd_to_rq(scmd);
-
- return (IOPRIO_PRIO_CLASS(req_get_ioprio(rq)) == IOPRIO_CLASS_RT);
-}
-
-static int
-leapioraid_scsihost_qcmd(
- struct Scsi_Host *shost, struct scsi_cmnd *scmd)
-{
- struct LEAPIORAID_ADAPTER *ioc
- = leapioraid_shost_private(scmd->device->host);
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct LeapioraidSCSIIOReq_t *mpi_request;
- u32 mpi_control;
- u16 smid;
- u16 handle;
- int rc = 0;
-
- if (ioc->logging_level & LEAPIORAID_DEBUG_SCSI)
- scsi_print_command(scmd);
- sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- goto out;
- }
- if (!(leapioraid_scsihost_allow_scmd_to_device(ioc, scmd))) {
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- goto out;
- }
- sas_target_priv_data = sas_device_priv_data->sas_target;
- handle = sas_target_priv_data->handle;
- if (handle == LEAPIORAID_INVALID_DEVICE_HANDLE) {
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- goto out;
- }
- if (sas_device_priv_data->block &&
- scmd->device->host->shost_state == SHOST_RECOVERY &&
- scmd->cmnd[0] == TEST_UNIT_READY) {
- scsi_build_sense(scmd, 0, UNIT_ATTENTION,
- 0x29, 0x07);
- scsi_done(scmd);
- goto out;
- }
- if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- } else if (sas_target_priv_data->deleted ||
- sas_device_priv_data->deleted) {
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- goto out;
- } else if (sas_target_priv_data->tm_busy || sas_device_priv_data->block) {
- rc = SCSI_MLQUEUE_DEVICE_BUSY;
- goto out;
- }
- do {
- if (test_bit(LEAPIORAID_CMND_PENDING_BIT,
- &sas_device_priv_data->ata_command_pending)) {
- rc = SCSI_MLQUEUE_DEVICE_BUSY;
- goto out;
- }
- } while (leapioraid_scsihost_set_satl_pending(scmd, true));
- if (scmd->sc_data_direction == DMA_FROM_DEVICE)
- mpi_control = LEAPIORAID_SCSIIO_CONTROL_READ;
- else if (scmd->sc_data_direction == DMA_TO_DEVICE)
- mpi_control = LEAPIORAID_SCSIIO_CONTROL_WRITE;
- else
- mpi_control = LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER;
- mpi_control |= LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ;
- if (sas_device_priv_data->ncq_prio_enable) {
- if (scsih_is_io_belongs_to_RT_class(scmd))
- mpi_control |= 1 << LEAPIORAID_SCSIIO_CONTROL_CMDPRI_SHIFT;
- }
- if ((sas_device_priv_data->flags & LEAPIORAID_DEVICE_TLR_ON) &&
- scmd->cmd_len != 32)
- mpi_control |= LEAPIORAID_SCSIIO_CONTROL_TLR_ON;
- smid = leapioraid_base_get_smid_scsiio(
- ioc, ioc->scsi_io_cb_idx, scmd);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- leapioraid_scsihost_set_satl_pending(scmd, false);
- goto out;
- }
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- if (scmd->cmd_len == 32)
- mpi_control |= 4 << LEAPIORAID_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST;
- if (sas_device_priv_data->sas_target->flags &
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT)
- mpi_request->Function =
- LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH;
- else
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST;
- mpi_request->DevHandle = cpu_to_le16(handle);
- mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
- mpi_request->Control = cpu_to_le32(mpi_control);
- mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
- mpi_request->MsgFlags = LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
- mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
- mpi_request->SenseBufferLowAddress =
- leapioraid_base_get_sense_buffer_dma(ioc, smid);
- mpi_request->SGLOffset0 = offsetof(struct LeapioraidSCSIIOReq_t, SGL) / 4;
- int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
- mpi_request->LUN);
- memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
- if (mpi_request->DataLength) {
- if (ioc->build_sg_scmd(ioc, scmd, smid)) {
- leapioraid_base_free_smid(ioc, smid);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- leapioraid_scsihost_set_satl_pending(scmd, false);
- goto out;
- }
- } else
- ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
- if (likely(mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST)) {
- if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FASTPATH_IO) {
- mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 0x4000);
- ioc->put_smid_fast_path(ioc, smid, handle);
- } else
- ioc->put_smid_scsi_io(ioc, smid,
- le16_to_cpu(mpi_request->DevHandle));
- } else
- ioc->put_smid_default(ioc, smid);
-out:
- return rc;
-}
-
-static void
-leapioraid_scsihost_normalize_sense(
- char *sense_buffer, struct sense_info *data)
-{
- if ((sense_buffer[0] & 0x7F) >= 0x72) {
- data->skey = sense_buffer[1] & 0x0F;
- data->asc = sense_buffer[2];
- data->ascq = sense_buffer[3];
- } else {
- data->skey = sense_buffer[2] & 0x0F;
- data->asc = sense_buffer[12];
- data->ascq = sense_buffer[13];
- }
-}
-
-static void
-leapioraid_scsihost_scsi_ioc_info(
- struct LEAPIORAID_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct LeapioraidSCSIIORep_t *mpi_reply, u16 smid,
- u8 scsi_status, u16 error_response_count)
-{
- u32 response_info;
- u8 *response_bytes;
- u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- u8 scsi_state = mpi_reply->SCSIState;
- char *desc_ioc_state = NULL;
- char *desc_scsi_status = NULL;
- char *desc_scsi_state = ioc->tmp_string;
- u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
- struct leapioraid_sas_device *sas_device = NULL;
- struct scsi_target *starget = scmd->device->sdev_target;
- struct LEAPIORAID_TARGET *priv_target = starget->hostdata;
- char *device_str = NULL;
-
- if (!priv_target)
- return;
- if (ioc->warpdrive_msg)
- device_str = "WarpDrive";
- else
- device_str = "volume";
- if (log_info == 0x31170000)
- return;
- switch (ioc_status) {
- case LEAPIORAID_IOCSTATUS_SUCCESS:
- desc_ioc_state = "success";
- break;
- case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION:
- desc_ioc_state = "invalid function";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR:
- desc_ioc_state = "scsi recovered error";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
- desc_ioc_state = "scsi invalid dev handle";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
- desc_ioc_state = "scsi device not there";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN:
- desc_ioc_state = "scsi data overrun";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN:
- desc_ioc_state = "scsi data underrun";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR:
- desc_ioc_state = "scsi io data error";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR:
- desc_ioc_state = "scsi protocol error";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED:
- desc_ioc_state = "scsi task terminated";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
- desc_ioc_state = "scsi residual mismatch";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
- desc_ioc_state = "scsi task mgmt failed";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED:
- desc_ioc_state = "scsi ioc terminated";
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED:
- desc_ioc_state = "scsi ext terminated";
- break;
- case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR:
- if (!ioc->disable_eedp_support) {
- desc_ioc_state = "eedp guard error";
- break;
- }
- fallthrough;
- case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR:
- if (!ioc->disable_eedp_support) {
- desc_ioc_state = "eedp ref tag error";
- break;
- }
- fallthrough;
- case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR:
- if (!ioc->disable_eedp_support) {
- desc_ioc_state = "eedp app tag error";
- break;
- }
- fallthrough;
- case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER:
- desc_ioc_state = "insufficient power";
- break;
- default:
- desc_ioc_state = "unknown";
- break;
- }
- switch (scsi_status) {
- case LEAPIORAID_SCSI_STATUS_GOOD:
- desc_scsi_status = "good";
- break;
- case LEAPIORAID_SCSI_STATUS_CHECK_CONDITION:
- desc_scsi_status = "check condition";
- break;
- case LEAPIORAID_SCSI_STATUS_CONDITION_MET:
- desc_scsi_status = "condition met";
- break;
- case LEAPIORAID_SCSI_STATUS_BUSY:
- desc_scsi_status = "busy";
- break;
- case LEAPIORAID_SCSI_STATUS_INTERMEDIATE:
- desc_scsi_status = "intermediate";
- break;
- case LEAPIORAID_SCSI_STATUS_INTERMEDIATE_CONDMET:
- desc_scsi_status = "intermediate condmet";
- break;
- case LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT:
- desc_scsi_status = "reservation conflict";
- break;
- case LEAPIORAID_SCSI_STATUS_COMMAND_TERMINATED:
- desc_scsi_status = "command terminated";
- break;
- case LEAPIORAID_SCSI_STATUS_TASK_SET_FULL:
- desc_scsi_status = "task set full";
- break;
- case LEAPIORAID_SCSI_STATUS_ACA_ACTIVE:
- desc_scsi_status = "aca active";
- break;
- case LEAPIORAID_SCSI_STATUS_TASK_ABORTED:
- desc_scsi_status = "task aborted";
- break;
- default:
- desc_scsi_status = "unknown";
- break;
- }
- desc_scsi_state[0] = '\0';
- if (!scsi_state)
- desc_scsi_state = " ";
- if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID)
- strcat(desc_scsi_state, "response info ");
- if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED)
- strcat(desc_scsi_state, "state terminated ");
- if (scsi_state & LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS)
- strcat(desc_scsi_state, "no status ");
- if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED)
- strcat(desc_scsi_state, "autosense failed ");
- if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID)
- strcat(desc_scsi_state, "autosense valid ");
- scsi_print_command(scmd);
- if (priv_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) {
- pr_warn("%s \t%s wwid(0x%016llx)\n",
- ioc->name, device_str,
- (unsigned long long)priv_target->sas_address);
- } else {
- sas_device = leapioraid_get_sdev_from_target(ioc, priv_target);
- if (sas_device) {
- pr_warn(
- "%s \t%s: sas_address(0x%016llx), phy(%d)\n",
- ioc->name, __func__, (unsigned long long)
- sas_device->sas_address, sas_device->phy);
- leapioraid_scsihost_display_enclosure_chassis_info(ioc,
- sas_device,
- NULL, NULL);
- leapioraid_sas_device_put(sas_device);
- }
- }
- pr_warn(
- "%s \thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle), desc_ioc_state,
- ioc_status, smid);
- pr_warn("%s \trequest_len(%d), underflow(%d), resid(%d)\n",
- ioc->name, scsi_bufflen(scmd), scmd->underflow,
- scsi_get_resid(scmd));
- pr_warn("%s \ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
- ioc->name,
- le16_to_cpu(mpi_reply->TaskTag),
- le32_to_cpu(mpi_reply->TransferCount), scmd->result);
- pr_warn("%s \tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
- ioc->name, desc_scsi_status,
- scsi_status, desc_scsi_state, scsi_state);
- if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) {
- struct sense_info data;
-
- leapioraid_scsihost_normalize_sense(scmd->sense_buffer, &data);
- pr_warn(
- "%s \t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
- ioc->name,
- data.skey, data.asc, data.ascq,
- le32_to_cpu(mpi_reply->SenseCount));
- }
- if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) {
- response_info = le32_to_cpu(mpi_reply->ResponseInfo);
- response_bytes = (u8 *) &response_info;
- leapioraid_scsihost_response_code(ioc, response_bytes[0]);
- }
-}
-
-static void
-leapioraid_scsihost_turn_on_pfa_led(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct LeapioraidSepRep_t mpi_reply;
- struct LeapioraidSepReq_t mpi_request;
- struct leapioraid_sas_device *sas_device;
-
- sas_device = leapioraid_get_sdev_by_handle(ioc, handle);
- if (!sas_device)
- return;
- memset(&mpi_request, 0, sizeof(struct LeapioraidSepReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR;
- mpi_request.Action = LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS;
- mpi_request.SlotStatus =
- cpu_to_le32(LEAPIORAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
- mpi_request.DevHandle = cpu_to_le16(handle);
- mpi_request.Flags = LEAPIORAID_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
- if ((leapioraid_base_scsi_enclosure_processor(ioc, &mpi_reply,
- &mpi_request)) != 0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- sas_device->pfa_led_on = 1;
- if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, pr_info(
- "%s enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
- goto out;
- }
-out:
- leapioraid_sas_device_put(sas_device);
-}
-
-static void
-leapioraid_scsihost_turn_off_pfa_led(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_device *sas_device)
-{
- struct LeapioraidSepRep_t mpi_reply;
- struct LeapioraidSepReq_t mpi_request;
-
- memset(&mpi_request, 0, sizeof(struct LeapioraidSepReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR;
- mpi_request.Action = LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS;
- mpi_request.SlotStatus = 0;
- mpi_request.Slot = cpu_to_le16(sas_device->slot);
- mpi_request.DevHandle = 0;
- mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
- mpi_request.Flags = LEAPIORAID_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
- if ((leapioraid_base_scsi_enclosure_processor(ioc, &mpi_reply,
- &mpi_request)) != 0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, pr_info(
- "%s enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
- return;
- }
-}
-
-static void
-leapioraid_scsihost_send_event_to_turn_on_pfa_led(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle)
-{
- struct leapioraid_fw_event_work *fw_event;
-
- fw_event = leapioraid_alloc_fw_event_work(0);
- if (!fw_event)
- return;
- fw_event->event = LEAPIORAID_TURN_ON_PFA_LED;
- fw_event->device_handle = handle;
- fw_event->ioc = ioc;
- leapioraid_scsihost_fw_event_add(ioc, fw_event);
- leapioraid_fw_event_work_put(fw_event);
-}
-
-static void
-leapioraid_scsihost_smart_predicted_fault(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- u8 from_sata_smart_polling)
-{
- struct scsi_target *starget;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct LeapioraidEventNotificationRep_t *event_reply;
- struct LeapioraidEventDataSasDeviceStatusChange_t *event_data;
- struct leapioraid_sas_device *sas_device;
- ssize_t sz;
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_handle(ioc, handle);
- if (!sas_device)
- goto out_unlock;
-
- starget = sas_device->starget;
- sas_target_priv_data = starget->hostdata;
- if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT)
- || ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)))
- goto out_unlock;
- leapioraid_scsihost_display_enclosure_chassis_info(NULL, sas_device, NULL,
- starget);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (from_sata_smart_polling)
- leapioraid_scsihost_send_event_to_turn_on_pfa_led(ioc, handle);
- sz = offsetof(struct LeapioraidEventNotificationRep_t, EventData) +
- sizeof(struct LeapioraidEventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_ATOMIC);
- if (!event_reply) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- event_reply->Function = LEAPIORAID_FUNC_EVENT_NOTIFICATION;
- event_reply->Event =
- cpu_to_le16(LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE);
- event_reply->MsgLength = sz / 4;
- event_reply->EventDataLength =
- cpu_to_le16(sizeof(struct LeapioraidEventDataSasDeviceStatusChange_t) / 4);
- event_data = (struct LeapioraidEventDataSasDeviceStatusChange_t *)
- event_reply->EventData;
- event_data->ReasonCode = LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
- event_data->ASC = 0x5D;
- event_data->DevHandle = cpu_to_le16(handle);
- event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
- leapioraid_ctl_add_to_event_log(ioc, event_reply);
- kfree(event_reply);
-out:
- if (sas_device)
- leapioraid_sas_device_put(sas_device);
- return;
-out_unlock:
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- goto out;
-}
-
-static u8
-leapioraid_scsihost_io_done(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index,
- u32 reply)
-{
- struct LeapioraidSCSIIOReq_t *mpi_request;
- struct LeapioraidSCSIIORep_t *mpi_reply;
- struct scsi_cmnd *scmd;
- u16 ioc_status, error_response_count = 0;
- u32 xfer_cnt;
- u8 scsi_state;
- u8 scsi_status;
- u32 log_info;
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- u32 response_code = 0;
- struct leapioraid_scsiio_tracker *st;
-
- scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid);
- if (scmd == NULL)
- return 1;
- leapioraid_scsihost_set_satl_pending(scmd, false);
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (mpi_reply == NULL) {
- scmd->result = DID_OK << 16;
- goto out;
- }
- sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
- sas_device_priv_data->sas_target->deleted) {
- scmd->result = DID_NO_CONNECT << 16;
- goto out;
- }
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
- st = leapioraid_base_scsi_cmd_priv(scmd);
- if (st->direct_io && ((ioc_status & LEAPIORAID_IOCSTATUS_MASK)
- != LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED)) {
- st->scmd = scmd;
- st->direct_io = 0;
- memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
- mpi_request->DevHandle =
- cpu_to_le16(sas_device_priv_data->sas_target->handle);
- ioc->put_smid_scsi_io(ioc, smid,
- sas_device_priv_data->sas_target->handle);
- return 0;
- }
- scsi_state = mpi_reply->SCSIState;
- if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID)
- response_code = le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
- if (!sas_device_priv_data->tlr_snoop_check) {
- sas_device_priv_data->tlr_snoop_check++;
- if ((sas_device_priv_data->flags & LEAPIORAID_DEVICE_TLR_ON) &&
- response_code == LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME)
- sas_device_priv_data->flags &= ~LEAPIORAID_DEVICE_TLR_ON;
- }
- if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
- log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
- else
- log_info = 0;
- ioc_status &= LEAPIORAID_IOCSTATUS_MASK;
- scsi_status = mpi_reply->SCSIStatus;
- xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
- scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
- if (ioc_status == LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN
- && xfer_cnt == 0
- && (scsi_status == LEAPIORAID_SCSI_STATUS_BUSY
- || scsi_status == LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT
- || scsi_status == LEAPIORAID_SCSI_STATUS_TASK_SET_FULL)) {
- ioc_status = LEAPIORAID_IOCSTATUS_SUCCESS;
- }
- if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) {
- struct sense_info data;
- const void *sense_data = leapioraid_base_get_sense_buffer(ioc,
- smid);
- u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
- le32_to_cpu(mpi_reply->SenseCount));
- memcpy(scmd->sense_buffer, sense_data, sz);
- leapioraid_scsihost_normalize_sense(scmd->sense_buffer, &data);
- if (data.asc == 0x5D)
- leapioraid_scsihost_smart_predicted_fault(ioc,
- le16_to_cpu(mpi_reply->DevHandle),
- 0);
- }
- switch (ioc_status) {
- case LEAPIORAID_IOCSTATUS_BUSY:
- case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES:
- scmd->result = SAM_STAT_BUSY;
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
- scmd->result = DID_NO_CONNECT << 16;
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED:
- if (sas_device_priv_data->block) {
- scmd->result = DID_TRANSPORT_DISRUPTED << 16;
- goto out;
- }
- if (log_info == 0x31110630) {
- if (scmd->retries > 2) {
- scmd->result = DID_NO_CONNECT << 16;
- scsi_device_set_state(scmd->device,
- SDEV_OFFLINE);
- } else {
- scmd->result = DID_SOFT_ERROR << 16;
- scmd->device->expecting_cc_ua = 1;
- }
- break;
- } else if (log_info == 0x32010081) {
- scmd->result = DID_RESET << 16;
- break;
- } else if ((scmd->device->channel == RAID_CHANNEL) &&
- (scsi_state == (LEAPIORAID_SCSI_STATE_TERMINATED |
- LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS))) {
- scmd->result = DID_RESET << 16;
- break;
- }
- scmd->result = DID_SOFT_ERROR << 16;
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED:
- case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED:
- scmd->result = DID_RESET << 16;
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
- if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
- scmd->result = DID_SOFT_ERROR << 16;
- else
- scmd->result = (DID_OK << 16) | scsi_status;
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN:
- scmd->result = (DID_OK << 16) | scsi_status;
- if ((scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID))
- break;
- if (xfer_cnt < scmd->underflow) {
- if (scsi_status == SAM_STAT_BUSY)
- scmd->result = SAM_STAT_BUSY;
- else
- scmd->result = DID_SOFT_ERROR << 16;
- } else if (scsi_state & (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED |
- LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS))
- scmd->result = DID_SOFT_ERROR << 16;
- else if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED)
- scmd->result = DID_RESET << 16;
- else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
- mpi_reply->SCSIState =
- LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID;
- mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
- scsi_build_sense(scmd, 0,
- ILLEGAL_REQUEST, 0x20,
- 0);
- }
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN:
- scsi_set_resid(scmd, 0);
- fallthrough;
- case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR:
- case LEAPIORAID_IOCSTATUS_SUCCESS:
- scmd->result = (DID_OK << 16) | scsi_status;
- if (response_code ==
- LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME ||
- (scsi_state & (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED |
- LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS)))
- scmd->result = DID_SOFT_ERROR << 16;
- else if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED)
- scmd->result = DID_RESET << 16;
- break;
- case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR:
- case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR:
- fallthrough;
- case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR:
- fallthrough;
- case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR:
- case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION:
- case LEAPIORAID_IOCSTATUS_INVALID_SGL:
- case LEAPIORAID_IOCSTATUS_INTERNAL_ERROR:
- case LEAPIORAID_IOCSTATUS_INVALID_FIELD:
- case LEAPIORAID_IOCSTATUS_INVALID_STATE:
- case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR:
- case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
- case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER:
- default:
- scmd->result = DID_SOFT_ERROR << 16;
- break;
- }
- if (scmd->result && (ioc->logging_level & LEAPIORAID_DEBUG_REPLY))
- leapioraid_scsihost_scsi_ioc_info(
- ioc, scmd, mpi_reply, smid, scsi_status,
- error_response_count);
-out:
- scsi_dma_unmap(scmd);
- leapioraid_base_free_smid(ioc, smid);
- scsi_done(scmd);
- return 0;
-}
-
-static void
-leapioraid_scsihost_update_vphys_after_reset(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- u16 sz, ioc_status;
- int i;
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL;
- u16 attached_handle;
- u64 attached_sas_addr;
- u8 found = 0, port_id;
- struct LeapioraidSasPhyP0_t phy_pg0;
- struct leapioraid_hba_port *port, *port_next, *mport;
- struct leapioraid_virtual_phy *vphy, *vphy_next;
- struct leapioraid_sas_device *sas_device;
-
- list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) {
- if (!port->vphys_mask)
- continue;
- list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list,
- list) {
- vphy->flags |= LEAPIORAID_VPHY_FLAG_DIRTY_PHY;
- }
- }
- sz = offsetof(struct LeapioraidSasIOUnitP0_t, PhyData)
- + (ioc->sas_hba.num_phys
- * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA));
- sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
- if (!sas_iounit_pg0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply,
- sas_iounit_pg0, sz)) != 0)
- goto out;
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS)
- goto out;
- for (i = 0; i < ioc->sas_hba.num_phys; i++) {
- if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
- LEAPIORAID_SAS_NEG_LINK_RATE_1_5)
- continue;
- if (!(le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo)
- & LEAPIORAID_SAS_DEVICE_INFO_SEP))
- continue;
- if ((leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
- i))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- continue;
- }
- if (!
- (le32_to_cpu(phy_pg0.PhyInfo) &
- LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY))
- continue;
- attached_handle =
- le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
- if (leapioraid_scsihost_get_sas_address
- (ioc, attached_handle, &attached_sas_addr)
- != 0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- continue;
- }
- found = 0;
- port = port_next = NULL;
- list_for_each_entry_safe(port, port_next, &ioc->port_table_list,
- list) {
- if (!port->vphys_mask)
- continue;
- list_for_each_entry_safe(vphy, vphy_next,
- &port->vphys_list, list) {
- if (!
- (vphy->flags & LEAPIORAID_VPHY_FLAG_DIRTY_PHY))
- continue;
- if (vphy->sas_address != attached_sas_addr)
- continue;
- if (!(vphy->phy_mask & (1 << i)))
- vphy->phy_mask = (1 << i);
- port_id = sas_iounit_pg0->PhyData[i].Port;
- mport =
- leapioraid_get_port_by_id(ioc, port_id, 1);
- if (!mport) {
- mport =
- kzalloc(sizeof(struct leapioraid_hba_port),
- GFP_KERNEL);
- if (!mport) {
- pr_err(
- "%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__,
- __LINE__, __func__);
- break;
- }
- mport->port_id = port_id;
- pr_err(
- "%s %s: hba_port entry: %p, port: %d is added to hba_port list\n",
- ioc->name, __func__, mport,
- mport->port_id);
- list_add_tail(&mport->list,
- &ioc->port_table_list);
- }
- if (port != mport) {
- if (!mport->vphys_mask)
- INIT_LIST_HEAD(&mport->vphys_list);
- mport->vphys_mask |= (1 << i);
- port->vphys_mask &= ~(1 << i);
- list_move(&vphy->list,
- &mport->vphys_list);
- sas_device =
- leapioraid_get_sdev_by_addr(ioc,
- attached_sas_addr,
- port);
- if (sas_device)
- sas_device->port = mport;
- }
- if (mport->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) {
- mport->sas_address = 0;
- mport->phy_mask = 0;
- mport->flags &=
- ~LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT;
- }
- vphy->flags &= ~LEAPIORAID_VPHY_FLAG_DIRTY_PHY;
- found = 1;
- break;
- }
- if (found)
- break;
- }
- }
-out:
- kfree(sas_iounit_pg0);
-}
-
-static u8
-leapioraid_scsihost_get_port_table_after_reset(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_hba_port *port_table)
-{
- u16 sz, ioc_status;
- int i, j;
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL;
- u16 attached_handle;
- u64 attached_sas_addr;
- u8 found = 0, port_count = 0, port_id;
-
- sz = offsetof(struct LeapioraidSasIOUnitP0_t, PhyData)
- + (ioc->sas_hba.num_phys
- * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA));
- sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
- if (!sas_iounit_pg0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return port_count;
- }
- if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply,
- sas_iounit_pg0, sz)) != 0)
- goto out;
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS)
- goto out;
- for (i = 0; i < ioc->sas_hba.num_phys; i++) {
- found = 0;
- if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
- LEAPIORAID_SAS_NEG_LINK_RATE_1_5)
- continue;
- attached_handle =
- le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
- if (leapioraid_scsihost_get_sas_address
- (ioc, attached_handle, &attached_sas_addr)
- != 0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- continue;
- }
- for (j = 0; j < port_count; j++) {
- port_id = sas_iounit_pg0->PhyData[i].Port;
- if ((port_table[j].port_id == port_id) &&
- (port_table[j].sas_address == attached_sas_addr)) {
- port_table[j].phy_mask |= (1 << i);
- found = 1;
- break;
- }
- }
- if (found)
- continue;
- port_id = sas_iounit_pg0->PhyData[i].Port;
- port_table[port_count].port_id = port_id;
- port_table[port_count].phy_mask = (1 << i);
- port_table[port_count].sas_address = attached_sas_addr;
- port_count++;
- }
-out:
- kfree(sas_iounit_pg0);
- return port_count;
-}
-
-enum hba_port_matched_codes {
- NOT_MATCHED = 0,
- MATCHED_WITH_ADDR_AND_PHYMASK,
- MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
- MATCHED_WITH_ADDR_AND_SUBPHYMASK,
- MATCHED_WITH_ADDR,
-};
-static int
-leapioraid_scsihost_look_and_get_matched_port_entry(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_hba_port *port_entry,
- struct leapioraid_hba_port **matched_port_entry,
- int *count)
-{
- struct leapioraid_hba_port *port_table_entry, *matched_port = NULL;
- enum hba_port_matched_codes matched_code = NOT_MATCHED;
- int lcount = 0;
-
- *matched_port_entry = NULL;
- list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
- if (!(port_table_entry->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT))
- continue;
- if ((port_table_entry->sas_address == port_entry->sas_address)
- && (port_table_entry->phy_mask == port_entry->phy_mask)) {
- matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
- matched_port = port_table_entry;
- break;
- }
- if ((port_table_entry->sas_address == port_entry->sas_address)
- && (port_table_entry->phy_mask & port_entry->phy_mask)
- && (port_table_entry->port_id == port_entry->port_id)) {
- matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
- matched_port = port_table_entry;
- continue;
- }
- if ((port_table_entry->sas_address == port_entry->sas_address)
- && (port_table_entry->phy_mask & port_entry->phy_mask)) {
- if (matched_code ==
- MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
- continue;
- matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
- matched_port = port_table_entry;
- continue;
- }
- if (port_table_entry->sas_address == port_entry->sas_address) {
- if (matched_code ==
- MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
- continue;
- if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
- continue;
- matched_code = MATCHED_WITH_ADDR;
- matched_port = port_table_entry;
- lcount++;
- }
- }
- *matched_port_entry = matched_port;
- if (matched_code == MATCHED_WITH_ADDR)
- *count = lcount;
- return matched_code;
-}
-
-static void
-leapioraid_scsihost_del_phy_part_of_anther_port(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_hba_port *port_table,
- int index, u8 port_count, int offset)
-{
- struct leapioraid_raid_sas_node *sas_node = &ioc->sas_hba;
- u32 i, found = 0;
-
- for (i = 0; i < port_count; i++) {
- if (i == index)
- continue;
- if (port_table[i].phy_mask & (1 << offset)) {
- leapioraid_transport_del_phy_from_an_existing_port(
- ioc,
- sas_node,
- &sas_node->phy
- [offset]);
- found = 1;
- break;
- }
- }
- if (!found)
- port_table[index].phy_mask |= (1 << offset);
-}
-
-static void
-leapioraid_scsihost_add_or_del_phys_from_existing_port(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_hba_port *hba_port_entry,
- struct leapioraid_hba_port *port_table,
- int index, u8 port_count)
-{
- u32 phy_mask, offset = 0;
- struct leapioraid_raid_sas_node *sas_node = &ioc->sas_hba;
-
- phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
- for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
- if (phy_mask & (1 << offset)) {
- if (!(port_table[index].phy_mask & (1 << offset))) {
- leapioraid_scsihost_del_phy_part_of_anther_port(
- ioc,
- port_table,
- index,
- port_count,
- offset);
- } else {
-#if defined(LEAPIORAID_WIDE_PORT_API)
- if (sas_node->phy[offset].phy_belongs_to_port)
- leapioraid_transport_del_phy_from_an_existing_port
- (ioc, sas_node,
- &sas_node->phy[offset]);
- leapioraid_transport_add_phy_to_an_existing_port
- (ioc, sas_node, &sas_node->phy[offset],
- hba_port_entry->sas_address,
- hba_port_entry);
-#endif
- }
- }
- }
-}
-
-static void
-leapioraid_scsihost_del_dirty_vphy(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_hba_port *port, *port_next;
- struct leapioraid_virtual_phy *vphy, *vphy_next;
-
- list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) {
- if (!port->vphys_mask)
- continue;
- list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list,
- list) {
- if (vphy->flags & LEAPIORAID_VPHY_FLAG_DIRTY_PHY) {
- drsprintk(ioc, pr_err(
- "%s Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
- ioc->name, vphy,
- port->port_id,
- vphy->phy_mask));
- port->vphys_mask &= ~vphy->phy_mask;
- list_del(&vphy->list);
- kfree(vphy);
- }
- }
- if (!port->vphys_mask && !port->sas_address)
- port->flags |= LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT;
- }
-}
-
-static void
-leapioraid_scsihost_del_dirty_port_entries(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_hba_port *port, *port_next;
-
- list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) {
- if (!(port->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) ||
- port->flags & LEAPIORAID_HBA_PORT_FLAG_NEW_PORT)
- continue;
- drsprintk(ioc, pr_err(
- "%s Deleting port table entry %p having Port id: %d\t, Phy_mask 0x%08x\n",
- ioc->name, port, port->port_id,
- port->phy_mask));
- list_del(&port->list);
- kfree(port);
- }
-}
-
-static void
-leapioraid_scsihost_sas_port_refresh(struct LEAPIORAID_ADAPTER *ioc)
-{
- u8 port_count = 0;
- struct leapioraid_hba_port *port_table;
- struct leapioraid_hba_port *port_table_entry;
- struct leapioraid_hba_port *port_entry = NULL;
- int i, j, ret, count = 0, lcount = 0;
- u64 sas_addr;
- u8 num_phys;
-
- drsprintk(ioc, pr_err(
- "%s updating ports for sas_host(0x%016llx)\n",
- ioc->name,
- (unsigned long long)ioc->sas_hba.sas_address));
- leapioraid_config_get_number_hba_phys(ioc, &num_phys);
- if (!num_phys) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- if (num_phys > ioc->sas_hba.nr_phys_allocated) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- ioc->sas_hba.num_phys = num_phys;
- port_table = kcalloc(ioc->sas_hba.num_phys,
- sizeof(struct leapioraid_hba_port), GFP_KERNEL);
- if (!port_table)
- return;
- port_count = leapioraid_scsihost_get_port_table_after_reset(
- ioc, port_table);
- if (!port_count)
- return;
- drsprintk(ioc,
- pr_info("%s New Port table\n", ioc->name));
- for (j = 0; j < port_count; j++)
- drsprintk(ioc, pr_err(
- "%s Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
- ioc->name, port_table[j].port_id,
- port_table[j].phy_mask,
- port_table[j].sas_address));
- list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
- port_table_entry->flags |= LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT;
- }
- drsprintk(ioc,
- pr_info("%s Old Port table\n", ioc->name));
- port_table_entry = NULL;
- list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
- drsprintk(ioc, pr_err(
- "%s Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
- ioc->name, port_table_entry->port_id,
- port_table_entry->phy_mask,
- port_table_entry->sas_address));
- }
- for (j = 0; j < port_count; j++) {
- ret = leapioraid_scsihost_look_and_get_matched_port_entry(ioc,
- &port_table[j],
- &port_entry,
- &count);
- if (!port_entry) {
- drsprintk(ioc, pr_err(
- "%s No Matched entry for sas_addr(0x%16llx), Port:%d\n",
- ioc->name,
- port_table[j].sas_address,
- port_table[j].port_id));
- continue;
- }
- switch (ret) {
- case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
- case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
- leapioraid_scsihost_add_or_del_phys_from_existing_port(ioc,
- port_entry,
- port_table,
- j,
- port_count);
- break;
- case MATCHED_WITH_ADDR:
- sas_addr = port_table[j].sas_address;
- for (i = 0; i < port_count; i++) {
- if (port_table[i].sas_address == sas_addr)
- lcount++;
- }
- if ((count > 1) || (lcount > 1))
- port_entry = NULL;
- else
- leapioraid_scsihost_add_or_del_phys_from_existing_port
- (ioc, port_entry, port_table, j,
- port_count);
- }
- if (!port_entry)
- continue;
- if (port_entry->port_id != port_table[j].port_id)
- port_entry->port_id = port_table[j].port_id;
- port_entry->flags &= ~LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT;
- port_entry->phy_mask = port_table[j].phy_mask;
- }
- port_table_entry = NULL;
-}
-
-static
-struct leapioraid_virtual_phy *leapioraid_scsihost_alloc_vphy(
- struct LEAPIORAID_ADAPTER *ioc,
- u8 port_id, u8 phy_num)
-{
- struct leapioraid_virtual_phy *vphy;
- struct leapioraid_hba_port *port;
-
- port = leapioraid_get_port_by_id(ioc, port_id, 0);
- if (!port)
- return NULL;
- vphy = leapioraid_get_vphy_by_phy(ioc, port, phy_num);
- if (!vphy) {
- vphy = kzalloc(sizeof(struct leapioraid_virtual_phy), GFP_KERNEL);
- if (!vphy)
- return NULL;
- if (!port->vphys_mask)
- INIT_LIST_HEAD(&port->vphys_list);
- port->vphys_mask |= (1 << phy_num);
- vphy->phy_mask |= (1 << phy_num);
- list_add_tail(&vphy->list, &port->vphys_list);
- pr_info(
- "%s vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
- ioc->name, vphy, port->port_id, phy_num);
- }
- return vphy;
-}
-
-static void
-leapioraid_scsihost_sas_host_refresh(struct LEAPIORAID_ADAPTER *ioc)
-{
- u16 sz;
- u16 ioc_status;
- int i;
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL;
- u16 attached_handle;
- u8 link_rate, port_id;
- struct leapioraid_hba_port *port;
- struct LeapioraidSasPhyP0_t phy_pg0;
-
- dtmprintk(ioc, pr_err(
- "%s updating handles for sas_host(0x%016llx)\n",
- ioc->name,
- (unsigned long long)ioc->sas_hba.sas_address));
- sz = offsetof(struct LeapioraidSasIOUnitP0_t,
- PhyData) +
- (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA));
- sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
- if (!sas_iounit_pg0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply,
- sas_iounit_pg0, sz)) != 0)
- goto out;
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS)
- goto out;
- for (i = 0; i < ioc->sas_hba.num_phys; i++) {
- link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
- if (i == 0)
- ioc->sas_hba.handle =
- le16_to_cpu(sas_iounit_pg0->PhyData[0].ControllerDevHandle);
- port_id = sas_iounit_pg0->PhyData[i].Port;
- if (!(leapioraid_get_port_by_id(ioc, port_id, 0))) {
- port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_KERNEL);
- if (!port)
- goto out;
-
- port->port_id = port_id;
- pr_info(
- "%s hba_port entry: %p, port: %d is added to hba_port list\n",
- ioc->name, port, port->port_id);
- if (ioc->shost_recovery)
- port->flags = LEAPIORAID_HBA_PORT_FLAG_NEW_PORT;
- list_add_tail(&port->list, &ioc->port_table_list);
- }
- if (le32_to_cpu
- (sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo)
- & LEAPIORAID_SAS_DEVICE_INFO_SEP
- && (link_rate >= LEAPIORAID_SAS_NEG_LINK_RATE_1_5)) {
- if ((leapioraid_config_get_phy_pg0
- (ioc, &mpi_reply, &phy_pg0, i))) {
- pr_err(
- "%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- continue;
- }
- if (!
- (le32_to_cpu(phy_pg0.PhyInfo) &
- LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY))
- continue;
- if (!leapioraid_scsihost_alloc_vphy(ioc, port_id, i))
- goto out;
- ioc->sas_hba.phy[i].hba_vphy = 1;
- }
- ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
- attached_handle =
- le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
- if (attached_handle
- && link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5)
- link_rate = LEAPIORAID_SAS_NEG_LINK_RATE_1_5;
- ioc->sas_hba.phy[i].port =
- leapioraid_get_port_by_id(ioc, port_id, 0);
- if (!ioc->sas_hba.phy[i].phy) {
- if ((leapioraid_config_get_phy_pg0
- (ioc, &mpi_reply, &phy_pg0, i))) {
- pr_err(
- "%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- continue;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err(
- "%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- continue;
- }
- ioc->sas_hba.phy[i].phy_id = i;
- leapioraid_transport_add_host_phy(ioc,
- &ioc->sas_hba.phy[i],
- phy_pg0,
- ioc->sas_hba.parent_dev);
- continue;
- }
- leapioraid_transport_update_links(ioc, ioc->sas_hba.sas_address,
- attached_handle, i, link_rate,
- ioc->sas_hba.phy[i].port);
- }
-out:
- kfree(sas_iounit_pg0);
-}
-
-static void
-leapioraid_scsihost_sas_host_add(struct LEAPIORAID_ADAPTER *ioc)
-{
- int i;
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL;
- struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL;
- struct LeapioraidSasPhyP0_t phy_pg0;
- struct LeapioraidSasDevP0_t sas_device_pg0;
- struct LeapioraidSasEncP0_t enclosure_pg0;
- u16 ioc_status;
- u16 sz;
- u8 device_missing_delay;
- u8 num_phys, port_id;
- struct leapioraid_hba_port *port;
-
- leapioraid_config_get_number_hba_phys(ioc, &num_phys);
- if (!num_phys) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- ioc->sas_hba.nr_phys_allocated =
- max_t(u8, LEAPIORAID_MAX_HBA_NUM_PHYS, num_phys);
- ioc->sas_hba.phy =
- kcalloc(ioc->sas_hba.nr_phys_allocated,
- sizeof(struct leapioraid_sas_phy),
- GFP_KERNEL);
- if (!ioc->sas_hba.phy) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- ioc->sas_hba.num_phys = num_phys;
- sz = offsetof(struct LeapioraidSasIOUnitP0_t,
- PhyData) +
- (ioc->sas_hba.num_phys
- * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA));
- sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
- if (!sas_iounit_pg0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply,
- sas_iounit_pg0, sz))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
- & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- sz = offsetof(struct LeapioraidSasIOUnitP1_t,
- PhyData) +
- (ioc->sas_hba.num_phys
- * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA));
- sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
- if (!sas_iounit_pg1) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply,
- sas_iounit_pg1, sz))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- ioc->io_missing_delay = sas_iounit_pg1->IODeviceMissingDelay;
- device_missing_delay = sas_iounit_pg1->ReportDeviceMissingDelay;
- if (device_missing_delay & LEAPIORAID_SASIOUNIT1_REPORT_MISSING_UNIT_16)
- ioc->device_missing_delay = (device_missing_delay &
- LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK)
- * 16;
- else
- ioc->device_missing_delay = device_missing_delay &
- LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
- ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
- for (i = 0; i < ioc->sas_hba.num_phys; i++) {
- if ((leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
- i))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- if (i == 0)
- ioc->sas_hba.handle =
- le16_to_cpu(sas_iounit_pg0->PhyData[0].ControllerDevHandle);
- port_id = sas_iounit_pg0->PhyData[i].Port;
- if (!(leapioraid_get_port_by_id(ioc, port_id, 0))) {
- port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_KERNEL);
- if (!port)
- goto out;
-
- port->port_id = port_id;
- pr_info(
- "%s hba_port entry: %p, port: %d is added to hba_port list\n",
- ioc->name, port, port->port_id);
- list_add_tail(&port->list, &ioc->port_table_list);
- }
- if ((le32_to_cpu(phy_pg0.PhyInfo) &
- LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY)
- && (phy_pg0.NegotiatedLinkRate >> 4) >=
- LEAPIORAID_SAS_NEG_LINK_RATE_1_5) {
- if (!leapioraid_scsihost_alloc_vphy(ioc, port_id, i))
- goto out;
- ioc->sas_hba.phy[i].hba_vphy = 1;
- }
- ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
- ioc->sas_hba.phy[i].phy_id = i;
- ioc->sas_hba.phy[i].port =
- leapioraid_get_port_by_id(ioc, port_id, 0);
- leapioraid_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
- phy_pg0,
- ioc->sas_hba.parent_dev);
- }
- if ((leapioraid_config_get_sas_device_pg0
- (ioc, &mpi_reply, &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- ioc->sas_hba.enclosure_handle =
- le16_to_cpu(sas_device_pg0.EnclosureHandle);
- ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- pr_info(
- "%s host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
- ioc->name,
- ioc->sas_hba.handle,
- (unsigned long long)ioc->sas_hba.sas_address,
- ioc->sas_hba.num_phys);
- if (ioc->sas_hba.enclosure_handle) {
- if (!(leapioraid_config_get_enclosure_pg0(ioc, &mpi_reply,
- &enclosure_pg0,
- LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE,
- ioc->sas_hba.enclosure_handle)))
- ioc->sas_hba.enclosure_logical_id =
- le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
- }
-out:
- kfree(sas_iounit_pg1);
- kfree(sas_iounit_pg0);
-}
-
-static int
-leapioraid_scsihost_expander_add(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_raid_sas_node *sas_expander;
- struct leapioraid_enclosure_node *enclosure_dev;
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidExpanderP0_t expander_pg0;
- struct LeapioraidExpanderP1_t expander_pg1;
- u32 ioc_status;
- u16 parent_handle;
- u64 sas_address, sas_address_parent = 0;
- int i;
- unsigned long flags;
- u8 port_id;
- struct leapioraid_sas_port *leapioraid_port = NULL;
- int rc = 0;
-
- if (!handle)
- return -1;
- if (ioc->shost_recovery || ioc->pci_error_recovery)
- return -1;
- if ((leapioraid_config_get_expander_pg0(
- ioc, &mpi_reply, &expander_pg0,
- LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL,
- handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -1;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
- & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -1;
- }
- parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
- if (leapioraid_scsihost_get_sas_address(
- ioc, parent_handle, &sas_address_parent)
- != 0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -1;
- }
- port_id = expander_pg0.PhysicalPort;
- if (sas_address_parent != ioc->sas_hba.sas_address) {
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_expander =
- leapioraid_scsihost_expander_find_by_sas_address(
- ioc,
- sas_address_parent,
- leapioraid_get_port_by_id(ioc, port_id, 0));
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- if (!sas_expander) {
- rc = leapioraid_scsihost_expander_add(ioc, parent_handle);
- if (rc != 0)
- return rc;
- }
- }
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_address = le64_to_cpu(expander_pg0.SASAddress);
- sas_expander = leapioraid_scsihost_expander_find_by_sas_address(
- ioc,
- sas_address,
- leapioraid_get_port_by_id(ioc, port_id, 0));
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- if (sas_expander)
- return 0;
- sas_expander = kzalloc(sizeof(struct leapioraid_raid_sas_node),
- GFP_KERNEL);
- if (!sas_expander)
- return -1;
-
- sas_expander->handle = handle;
- sas_expander->num_phys = expander_pg0.NumPhys;
- sas_expander->sas_address_parent = sas_address_parent;
- sas_expander->sas_address = sas_address;
- sas_expander->port = leapioraid_get_port_by_id(ioc, port_id, 0);
- if (!sas_expander->port) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -1;
- goto out_fail;
- }
- pr_info(
- "%s expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
- ioc->name,
- handle, parent_handle,
- (unsigned long long)sas_expander->sas_address,
- sas_expander->num_phys);
- if (!sas_expander->num_phys) {
- rc = -1;
- goto out_fail;
- }
- sas_expander->phy = kcalloc(sas_expander->num_phys,
- sizeof(struct leapioraid_sas_phy), GFP_KERNEL);
- if (!sas_expander->phy) {
- rc = -1;
- goto out_fail;
- }
- INIT_LIST_HEAD(&sas_expander->sas_port_list);
- leapioraid_port = leapioraid_transport_port_add(
- ioc, handle,
- sas_address_parent,
- sas_expander->port);
- if (!leapioraid_port) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -1;
- goto out_fail;
- }
- sas_expander->parent_dev = &leapioraid_port->rphy->dev;
- sas_expander->rphy = leapioraid_port->rphy;
- for (i = 0; i < sas_expander->num_phys; i++) {
- if ((leapioraid_config_get_expander_pg1(
- ioc, &mpi_reply,
- &expander_pg1, i,
- handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -1;
- goto out_fail;
- }
- sas_expander->phy[i].handle = handle;
- sas_expander->phy[i].phy_id = i;
- sas_expander->phy[i].port =
- leapioraid_get_port_by_id(ioc, port_id, 0);
- if ((leapioraid_transport_add_expander_phy
- (ioc, &sas_expander->phy[i], expander_pg1,
- sas_expander->parent_dev))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -1;
- goto out_fail;
- }
- }
- if (sas_expander->enclosure_handle) {
- enclosure_dev =
- leapioraid_scsihost_enclosure_find_by_handle(
- ioc,
- sas_expander->enclosure_handle);
- if (enclosure_dev)
- sas_expander->enclosure_logical_id =
- le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
- }
- leapioraid_scsihost_expander_node_add(ioc, sas_expander);
- return 0;
-out_fail:
- if (leapioraid_port)
- leapioraid_transport_port_remove(ioc,
- sas_expander->sas_address,
- sas_address_parent,
- sas_expander->port);
- kfree(sas_expander);
- return rc;
-}
-
-void
-leapioraid_expander_remove(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address, struct leapioraid_hba_port *port)
-{
- struct leapioraid_raid_sas_node *sas_expander;
- unsigned long flags;
-
- if (ioc->shost_recovery)
- return;
- if (!port)
- return;
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_expander = leapioraid_scsihost_expander_find_by_sas_address(
- ioc, sas_address, port);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- if (sas_expander)
- leapioraid_scsihost_expander_node_remove(
- ioc, sas_expander);
-}
-
-static u8
-leapioraid_scsihost_done(
- struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index,
- u32 reply)
-{
- struct LeapioraidDefaultRep_t *mpi_reply;
-
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (ioc->scsih_cmds.status == LEAPIORAID_CMD_NOT_USED)
- return 1;
- if (ioc->scsih_cmds.smid != smid)
- return 1;
- ioc->scsih_cmds.status |= LEAPIORAID_CMD_COMPLETE;
- if (mpi_reply) {
- memcpy(ioc->scsih_cmds.reply, mpi_reply,
- mpi_reply->MsgLength * 4);
- ioc->scsih_cmds.status |= LEAPIORAID_CMD_REPLY_VALID;
- }
- ioc->scsih_cmds.status &= ~LEAPIORAID_CMD_PENDING;
- complete(&ioc->scsih_cmds.done);
- return 1;
-}
-
-static int
-leapioraid_scsi_send_scsi_io(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_scsi_io_transfer *transfer_packet,
- u8 tr_timeout, u8 tr_method)
-{
- struct LeapioraidSCSIIORep_t *mpi_reply;
- struct LeapioSCSIIOReq_t *mpi_request;
- u16 smid;
- u8 issue_reset = 0;
- int rc;
- void *priv_sense;
- u32 mpi_control;
- void *psge;
- dma_addr_t data_out_dma = 0;
- dma_addr_t data_in_dma = 0;
- size_t data_in_sz = 0;
- size_t data_out_sz = 0;
- u16 handle;
- u8 retry_count = 0, host_reset_count = 0;
- int tm_return_code;
-
- if (ioc->pci_error_recovery) {
- pr_err("%s %s: pci error recovery in progress!\n",
- ioc->name, __func__);
- return -EFAULT;
- }
- if (ioc->shost_recovery) {
- pr_info("%s %s: host recovery in progress!\n",
- ioc->name, __func__);
- return -EAGAIN;
- }
- handle = transfer_packet->handle;
- if (handle == LEAPIORAID_INVALID_DEVICE_HANDLE) {
- pr_info("%s %s: no device!\n",
- __func__, ioc->name);
- return -EFAULT;
- }
- mutex_lock(&ioc->scsih_cmds.mutex);
- if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: scsih_cmd in use\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto out;
- }
-retry_loop:
- if (test_bit(handle, ioc->device_remove_in_progress)) {
- pr_info("%s %s: device removal in progress\n",
- ioc->name, __func__);
- rc = -EFAULT;
- goto out;
- }
- ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING;
- rc = leapioraid_wait_for_ioc_to_operational(ioc, 10);
- if (rc)
- goto out;
- smid = ioc->shost->can_queue
- + LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY;
- rc = 0;
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->scsih_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioSCSIIOReq_t));
- if (transfer_packet->is_raid)
- mpi_request->Function =
- LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH;
- else
- mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST;
- mpi_request->DevHandle = cpu_to_le16(handle);
- switch (transfer_packet->dir) {
- case DMA_TO_DEVICE:
- mpi_control = LEAPIORAID_SCSIIO_CONTROL_WRITE;
- data_out_dma = transfer_packet->data_dma;
- data_out_sz = transfer_packet->data_length;
- break;
- case DMA_FROM_DEVICE:
- mpi_control = LEAPIORAID_SCSIIO_CONTROL_READ;
- data_in_dma = transfer_packet->data_dma;
- data_in_sz = transfer_packet->data_length;
- break;
- case DMA_BIDIRECTIONAL:
- mpi_control = LEAPIORAID_SCSIIO_CONTROL_BIDIRECTIONAL;
- BUG();
- break;
- default:
- case DMA_NONE:
- mpi_control = LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER;
- break;
- }
- psge = &mpi_request->SGL;
- ioc->build_sg(
- ioc, psge, data_out_dma,
- data_out_sz, data_in_dma,
- data_in_sz);
- mpi_request->Control = cpu_to_le32(mpi_control |
- LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ);
- mpi_request->DataLength = cpu_to_le32(transfer_packet->data_length);
- mpi_request->MsgFlags = LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
- mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
- mpi_request->SenseBufferLowAddress =
- leapioraid_base_get_sense_buffer_dma(ioc, smid);
- priv_sense = leapioraid_base_get_sense_buffer(ioc, smid);
- mpi_request->SGLOffset0 = offsetof(struct LeapioSCSIIOReq_t, SGL) / 4;
- mpi_request->IoFlags = cpu_to_le16(transfer_packet->cdb_length);
- int_to_scsilun(transfer_packet->lun, (struct scsi_lun *)
- mpi_request->LUN);
- memcpy(mpi_request->CDB.CDB32, transfer_packet->cdb,
- transfer_packet->cdb_length);
- init_completion(&ioc->scsih_cmds.done);
- if (likely(mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST))
- ioc->put_smid_scsi_io(ioc, smid, handle);
- else
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->scsih_cmds.done,
- transfer_packet->timeout * HZ);
- if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- leapioraid_check_cmd_timeout(ioc,
- ioc->scsih_cmds.status,
- mpi_request,
- sizeof(struct LeapioSCSIIOReq_t) / 4,
- issue_reset);
- goto issue_target_reset;
- }
- if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- transfer_packet->valid_reply = 1;
- mpi_reply = ioc->scsih_cmds.reply;
- transfer_packet->sense_length =
- le32_to_cpu(mpi_reply->SenseCount);
- if (transfer_packet->sense_length)
- memcpy(transfer_packet->sense, priv_sense,
- transfer_packet->sense_length);
- transfer_packet->transfer_length =
- le32_to_cpu(mpi_reply->TransferCount);
- transfer_packet->ioc_status =
- le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- transfer_packet->scsi_state = mpi_reply->SCSIState;
- transfer_packet->scsi_status = mpi_reply->SCSIStatus;
- transfer_packet->log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
- }
- goto out;
-issue_target_reset:
- if (issue_reset) {
- pr_info("%s issue target reset: handle(0x%04x)\n", ioc->name, handle);
- tm_return_code =
- leapioraid_scsihost_issue_locked_tm(ioc, handle,
- 0xFFFFFFFF, 0xFFFFFFFF,
- 0,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
- smid, tr_timeout,
- tr_method);
- if (tm_return_code == SUCCESS) {
- pr_err(
- "%s target reset completed: handle (0x%04x)\n",
- ioc->name, handle);
- if (((ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)
- && retry_count++ < 3)
- || ((ioc->scsih_cmds.status & LEAPIORAID_CMD_RESET)
- && host_reset_count++ == 0)) {
- pr_info("%s issue retry: handle (0x%04x)\n",
- ioc->name, handle);
- goto retry_loop;
- }
- } else
- pr_err("%s target reset didn't complete: handle(0x%04x)\n",
- ioc->name, handle);
- rc = -EFAULT;
- } else
- rc = -EAGAIN;
-out:
- ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_unlock(&ioc->scsih_cmds.mutex);
- return rc;
-}
-
-static enum device_responsive_state
-leapioraid_scsihost_determine_disposition(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_scsi_io_transfer *transfer_packet)
-{
- static enum device_responsive_state rc;
- struct sense_info sense_info = { 0, 0, 0 };
- u8 check_sense = 0;
- char *desc = NULL;
-
- if (!transfer_packet->valid_reply)
- return DEVICE_READY;
- switch (transfer_packet->ioc_status) {
- case LEAPIORAID_IOCSTATUS_BUSY:
- case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES:
- case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED:
- case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR:
- case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED:
- rc = DEVICE_RETRY;
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED:
- if (transfer_packet->log_info == 0x31170000) {
- rc = DEVICE_RETRY;
- break;
- }
- if (transfer_packet->cdb[0] == REPORT_LUNS)
- rc = DEVICE_READY;
- else
- rc = DEVICE_RETRY;
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN:
- case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR:
- case LEAPIORAID_IOCSTATUS_SUCCESS:
- if (!transfer_packet->scsi_state &&
- !transfer_packet->scsi_status) {
- rc = DEVICE_READY;
- break;
- }
- if (transfer_packet->scsi_state &
- LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) {
- rc = DEVICE_ERROR;
- check_sense = 1;
- break;
- }
- if (transfer_packet->scsi_state &
- (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED |
- LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS |
- LEAPIORAID_SCSI_STATE_TERMINATED)) {
- rc = DEVICE_RETRY;
- break;
- }
- if (transfer_packet->scsi_status >= LEAPIORAID_SCSI_STATUS_BUSY) {
- rc = DEVICE_RETRY;
- break;
- }
- rc = DEVICE_READY;
- break;
- case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR:
- if (transfer_packet->scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED)
- rc = DEVICE_RETRY;
- else
- rc = DEVICE_ERROR;
- break;
- case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER:
- default:
- rc = DEVICE_ERROR;
- break;
- }
- if (check_sense) {
- leapioraid_scsihost_normalize_sense(
- transfer_packet->sense, &sense_info);
- if (sense_info.skey == UNIT_ATTENTION)
- rc = DEVICE_RETRY_UA;
- else if (sense_info.skey == NOT_READY) {
- if (sense_info.asc == 0x3a)
- rc = DEVICE_READY;
- else if (sense_info.asc == 0x04) {
- if (sense_info.ascq == 0x03 ||
- sense_info.ascq == 0x0b ||
- sense_info.ascq == 0x0c) {
- rc = DEVICE_ERROR;
- } else
- rc = DEVICE_START_UNIT;
- } else if (sense_info.asc == 0x3e && !sense_info.ascq)
- rc = DEVICE_START_UNIT;
- } else if (sense_info.skey == ILLEGAL_REQUEST &&
- transfer_packet->cdb[0] == REPORT_LUNS) {
- rc = DEVICE_READY;
- } else if (sense_info.skey == MEDIUM_ERROR) {
- if (sense_info.asc == 0x31)
- rc = DEVICE_READY;
- } else if (sense_info.skey == HARDWARE_ERROR) {
- if (sense_info.asc == 0x19)
- rc = DEVICE_READY;
- }
- }
- if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) {
- switch (rc) {
- case DEVICE_READY:
- desc = "ready";
- break;
- case DEVICE_RETRY:
- desc = "retry";
- break;
- case DEVICE_RETRY_UA:
- desc = "retry_ua";
- break;
- case DEVICE_START_UNIT:
- desc = "start_unit";
- break;
- case DEVICE_STOP_UNIT:
- desc = "stop_unit";
- break;
- case DEVICE_ERROR:
- desc = "error";
- break;
- }
- pr_info(
- "%s \tioc_status(0x%04x), loginfo(0x%08x),\n\t\t"
- "scsi_status(0x%02x), scsi_state(0x%02x), rc(%s)\n",
- ioc->name,
- transfer_packet->ioc_status,
- transfer_packet->log_info,
- transfer_packet->scsi_status,
- transfer_packet->scsi_state,
- desc);
- if (check_sense)
- pr_info("%s \t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x]\n",
- ioc->name,
- sense_info.skey, sense_info.asc,
- sense_info.ascq);
- }
- return rc;
-}
-
-static enum device_responsive_state
-leapioraid_scsihost_inquiry_vpd_sn(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- u8 **serial_number)
-{
- struct leapioraid_scsi_io_transfer *transfer_packet;
- enum device_responsive_state rc;
- u8 *inq_data;
- int return_code;
- u32 data_length;
- u8 len;
- u8 tr_timeout = 30;
- u8 tr_method = 0;
-
- inq_data = NULL;
- transfer_packet
- = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), GFP_KERNEL);
- if (!transfer_packet) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = DEVICE_RETRY;
- goto out;
- }
- data_length = 252;
- inq_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
- &transfer_packet->data_dma, GFP_ATOMIC);
- if (!inq_data) {
- rc = DEVICE_RETRY;
- goto out;
- }
-
- rc = DEVICE_READY;
- memset(inq_data, 0, data_length);
- transfer_packet->handle = handle;
- transfer_packet->dir = DMA_FROM_DEVICE;
- transfer_packet->data_length = data_length;
- transfer_packet->cdb_length = 6;
- transfer_packet->cdb[0] = INQUIRY;
- transfer_packet->cdb[1] = 1;
- transfer_packet->cdb[2] = 0x80;
- transfer_packet->cdb[4] = data_length;
- transfer_packet->timeout = 30;
- tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- return_code =
- leapioraid_scsi_send_scsi_io(
- ioc, transfer_packet, tr_timeout, tr_method);
- switch (return_code) {
- case 0:
- rc = leapioraid_scsihost_determine_disposition(
- ioc, transfer_packet);
- if (rc == DEVICE_READY) {
- len = strlen(&inq_data[4]) + 1;
- *serial_number = kmalloc(len, GFP_KERNEL);
- if (*serial_number)
- strscpy(*serial_number, &inq_data[4], len);
- }
- break;
- case -EAGAIN:
- rc = DEVICE_RETRY;
- break;
- case -EFAULT:
- default:
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = DEVICE_ERROR;
- break;
- }
-out:
- if (inq_data)
- dma_free_coherent(&ioc->pdev->dev, data_length, inq_data,
- transfer_packet->data_dma);
- kfree(transfer_packet);
- return rc;
-}
-
-static enum device_responsive_state
-leapioraid_scsihost_inquiry_vpd_supported_pages(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, u32 lun, void *data,
- u32 data_length)
-{
- struct leapioraid_scsi_io_transfer *transfer_packet;
- enum device_responsive_state rc;
- void *inq_data;
- int return_code;
-
- inq_data = NULL;
- transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer),
- GFP_KERNEL);
- if (!transfer_packet) {
- rc = DEVICE_RETRY;
- goto out;
- }
- inq_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
- &transfer_packet->data_dma, GFP_ATOMIC);
- if (!inq_data) {
- rc = DEVICE_RETRY;
- goto out;
- }
- rc = DEVICE_READY;
- memset(inq_data, 0, data_length);
- transfer_packet->handle = handle;
- transfer_packet->dir = DMA_FROM_DEVICE;
- transfer_packet->data_length = data_length;
- transfer_packet->cdb_length = 6;
- transfer_packet->lun = lun;
- transfer_packet->cdb[0] = INQUIRY;
- transfer_packet->cdb[1] = 1;
- transfer_packet->cdb[4] = data_length;
- transfer_packet->timeout = 30;
- return_code = leapioraid_scsi_send_scsi_io(
- ioc, transfer_packet, 30, 0);
- switch (return_code) {
- case 0:
- rc = leapioraid_scsihost_determine_disposition(
- ioc, transfer_packet);
- if (rc == DEVICE_READY)
- memcpy(data, inq_data, data_length);
- break;
- case -EAGAIN:
- rc = DEVICE_RETRY;
- break;
- case -EFAULT:
- default:
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = DEVICE_ERROR;
- break;
- }
-out:
- if (inq_data)
- dma_free_coherent(&ioc->pdev->dev, data_length, inq_data,
- transfer_packet->data_dma);
- kfree(transfer_packet);
- return rc;
-}
-
-static enum device_responsive_state
-leapioraid_scsihost_report_luns(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle, void *data,
- u32 data_length, u8 retry_count, u8 is_pd, u8 tr_timeout,
- u8 tr_method)
-{
- struct leapioraid_scsi_io_transfer *transfer_packet;
- enum device_responsive_state rc;
- void *lun_data;
- int return_code;
- int retries;
-
- lun_data = NULL;
- transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer),
- GFP_KERNEL);
- if (!transfer_packet) {
- rc = DEVICE_RETRY;
- goto out;
- }
- lun_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
- &transfer_packet->data_dma, GFP_ATOMIC);
- if (!lun_data) {
- rc = DEVICE_RETRY;
- goto out;
- }
- for (retries = 0; retries < 4; retries++) {
- rc = DEVICE_ERROR;
- pr_info("%s REPORT_LUNS: handle(0x%04x), retries(%d)\n",
- ioc->name, handle, retries);
- memset(lun_data, 0, data_length);
- transfer_packet->handle = handle;
- transfer_packet->dir = DMA_FROM_DEVICE;
- transfer_packet->data_length = data_length;
- transfer_packet->cdb_length = 12;
- transfer_packet->cdb[0] = REPORT_LUNS;
- transfer_packet->cdb[6] = (data_length >> 24) & 0xFF;
- transfer_packet->cdb[7] = (data_length >> 16) & 0xFF;
- transfer_packet->cdb[8] = (data_length >> 8) & 0xFF;
- transfer_packet->cdb[9] = data_length & 0xFF;
- transfer_packet->timeout = 30;
- transfer_packet->is_raid = is_pd;
- return_code =
- leapioraid_scsi_send_scsi_io(ioc, transfer_packet, tr_timeout,
- tr_method);
- switch (return_code) {
- case 0:
- rc = leapioraid_scsihost_determine_disposition(ioc,
- transfer_packet);
- if (rc == DEVICE_READY) {
- memcpy(data, lun_data, data_length);
- goto out;
- } else if (rc == DEVICE_ERROR)
- goto out;
- break;
- case -EAGAIN:
- rc = DEVICE_RETRY;
- break;
- case -EFAULT:
- default:
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- }
-out:
- if (lun_data)
- dma_free_coherent(&ioc->pdev->dev, data_length, lun_data,
- transfer_packet->data_dma);
- kfree(transfer_packet);
- if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
- rc == DEVICE_RETRY_UA) && retry_count >= 144)
- rc = DEVICE_ERROR;
- return rc;
-}
-
-static enum device_responsive_state
-leapioraid_scsihost_start_unit(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle, u32 lun,
- u8 is_pd, u8 tr_timeout, u8 tr_method)
-{
- struct leapioraid_scsi_io_transfer *transfer_packet;
- enum device_responsive_state rc;
- int return_code;
-
- transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer),
- GFP_KERNEL);
- if (!transfer_packet) {
- rc = DEVICE_RETRY;
- goto out;
- }
-
- rc = DEVICE_READY;
- transfer_packet->handle = handle;
- transfer_packet->dir = DMA_NONE;
- transfer_packet->lun = lun;
- transfer_packet->cdb_length = 6;
- transfer_packet->cdb[0] = START_STOP;
- transfer_packet->cdb[1] = 1;
- transfer_packet->cdb[4] = 1;
- transfer_packet->timeout = 30;
- transfer_packet->is_raid = is_pd;
- pr_info("%s START_UNIT: handle(0x%04x), lun(%d)\n",
- ioc->name, handle, lun);
- return_code =
- leapioraid_scsi_send_scsi_io(
- ioc, transfer_packet, tr_timeout, tr_method);
- switch (return_code) {
- case 0:
- rc = leapioraid_scsihost_determine_disposition(
- ioc, transfer_packet);
- break;
- case -EAGAIN:
- rc = DEVICE_RETRY;
- break;
- case -EFAULT:
- default:
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = DEVICE_ERROR;
- break;
- }
-out:
- kfree(transfer_packet);
- return rc;
-}
-
-static enum device_responsive_state
-leapioraid_scsihost_test_unit_ready(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle, u32 lun,
- u8 is_pd, u8 tr_timeout, u8 tr_method)
-{
- struct leapioraid_scsi_io_transfer *transfer_packet;
- enum device_responsive_state rc;
- int return_code;
- int sata_init_failure = 0;
-
- transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer),
- GFP_KERNEL);
- if (!transfer_packet) {
- rc = DEVICE_RETRY;
- goto out;
- }
- rc = DEVICE_READY;
- transfer_packet->handle = handle;
- transfer_packet->dir = DMA_NONE;
- transfer_packet->lun = lun;
- transfer_packet->cdb_length = 6;
- transfer_packet->cdb[0] = TEST_UNIT_READY;
- transfer_packet->timeout = 30;
- transfer_packet->is_raid = is_pd;
-sata_init_retry:
- pr_info("%s TEST_UNIT_READY: handle(0x%04x), lun(%d)\n",
- ioc->name, handle, lun);
- return_code =
- leapioraid_scsi_send_scsi_io(
- ioc, transfer_packet, tr_timeout, tr_method);
- switch (return_code) {
- case 0:
- rc = leapioraid_scsihost_determine_disposition(
- ioc, transfer_packet);
- if (rc == DEVICE_RETRY &&
- transfer_packet->log_info == 0x31111000) {
- if (!sata_init_failure++) {
- pr_err(
- "%s SATA Initialization Timeout,sending a retry\n",
- ioc->name);
- rc = DEVICE_READY;
- goto sata_init_retry;
- } else {
- pr_err(
- "%s SATA Initialization Failed\n",
- ioc->name);
- rc = DEVICE_ERROR;
- }
- }
- break;
- case -EAGAIN:
- rc = DEVICE_RETRY;
- break;
- case -EFAULT:
- default:
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = DEVICE_ERROR;
- break;
- }
-out:
- kfree(transfer_packet);
- return rc;
-}
-
-static enum device_responsive_state
-leapioraid_scsihost_ata_pass_thru_idd(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- u8 *is_ssd_device, u8 tr_timeout, u8 tr_method)
-{
- struct leapioraid_scsi_io_transfer *transfer_packet;
- enum device_responsive_state rc;
- u16 *idd_data;
- int return_code;
- u32 data_length;
-
- idd_data = NULL;
- transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer),
- GFP_KERNEL);
- if (!transfer_packet) {
- rc = DEVICE_RETRY;
- goto out;
- }
- data_length = 512;
- idd_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
- &transfer_packet->data_dma, GFP_ATOMIC);
- if (!idd_data) {
- rc = DEVICE_RETRY;
- goto out;
- }
- rc = DEVICE_READY;
- memset(idd_data, 0, data_length);
- transfer_packet->handle = handle;
- transfer_packet->dir = DMA_FROM_DEVICE;
- transfer_packet->data_length = data_length;
- transfer_packet->cdb_length = 12;
- transfer_packet->cdb[0] = ATA_12;
- transfer_packet->cdb[1] = 0x8;
- transfer_packet->cdb[2] = 0xd;
- transfer_packet->cdb[3] = 0x1;
- transfer_packet->cdb[9] = 0xec;
- transfer_packet->timeout = 30;
- return_code = leapioraid_scsi_send_scsi_io(
- ioc, transfer_packet, 30, 0);
- switch (return_code) {
- case 0:
- rc = leapioraid_scsihost_determine_disposition(
- ioc, transfer_packet);
- if (rc == DEVICE_READY) {
- if (le16_to_cpu(idd_data[217]) == 1)
- *is_ssd_device = 1;
- }
- break;
- case -EAGAIN:
- rc = DEVICE_RETRY;
- break;
- case -EFAULT:
- default:
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = DEVICE_ERROR;
- break;
- }
-out:
- if (idd_data) {
- dma_free_coherent(&ioc->pdev->dev, data_length, idd_data,
- transfer_packet->data_dma);
- }
- kfree(transfer_packet);
- return rc;
-}
-
-static enum device_responsive_state
-leapioraid_scsihost_wait_for_device_to_become_ready(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, u8 retry_count, u8 is_pd,
- int lun, u8 tr_timeout, u8 tr_method)
-{
- enum device_responsive_state rc;
-
- if (ioc->pci_error_recovery)
- return DEVICE_ERROR;
- if (ioc->shost_recovery)
- return DEVICE_RETRY;
- rc = leapioraid_scsihost_test_unit_ready(
- ioc, handle, lun, is_pd, tr_timeout,
- tr_method);
- if (rc == DEVICE_READY || rc == DEVICE_ERROR)
- return rc;
- else if (rc == DEVICE_START_UNIT) {
- rc = leapioraid_scsihost_start_unit(
- ioc, handle, lun, is_pd, tr_timeout,
- tr_method);
- if (rc == DEVICE_ERROR)
- return rc;
- rc = leapioraid_scsihost_test_unit_ready(
- ioc, handle, lun, is_pd,
- tr_timeout, tr_method);
- }
- if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT ||
- rc == DEVICE_RETRY_UA) && retry_count >= 144)
- rc = DEVICE_ERROR;
- return rc;
-}
-
-static enum device_responsive_state
-leapioraid_scsihost_wait_for_target_to_become_ready(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, u8 retry_count, u8 is_pd,
- u8 tr_timeout, u8 tr_method)
-{
- enum device_responsive_state rc;
- struct scsi_lun *lun_data;
- u32 length, num_luns;
- u8 *data;
- int lun;
- struct scsi_lun *lunp;
-
- lun_data =
- kcalloc(255, sizeof(struct scsi_lun), GFP_KERNEL);
- if (!lun_data) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return DEVICE_RETRY;
- }
- rc = leapioraid_scsihost_report_luns(ioc, handle, lun_data,
- 255 * sizeof(struct scsi_lun),
- retry_count, is_pd, tr_timeout, tr_method);
- if (rc != DEVICE_READY)
- goto out;
- data = (u8 *) lun_data;
- length = ((data[0] << 24) | (data[1] << 16) |
- (data[2] << 8) | (data[3] << 0));
- num_luns = (length / sizeof(struct scsi_lun));
- lunp = &lun_data[1];
- lun = (num_luns) ? scsilun_to_int(&lun_data[1]) : 0;
- rc = leapioraid_scsihost_wait_for_device_to_become_ready(
- ioc, handle, retry_count,
- is_pd, lun, tr_timeout,
- tr_method);
- if (rc == DEVICE_ERROR) {
- struct scsi_lun *lunq;
-
- for (lunq = lunp++; lunq <= &lun_data[num_luns]; lunq++) {
- rc = leapioraid_scsihost_wait_for_device_to_become_ready(ioc,
- handle,
- retry_count,
- is_pd,
- scsilun_to_int
- (lunq),
- tr_timeout,
- tr_method);
- if (rc != DEVICE_ERROR)
- goto out;
- }
- }
-out:
- kfree(lun_data);
- return rc;
-}
-
-static u8
-leapioraid_scsihost_check_access_status(
- struct LEAPIORAID_ADAPTER *ioc, u64 sas_address,
- u16 handle, u8 access_status)
-{
- u8 rc = 1;
- char *desc = NULL;
-
- switch (access_status) {
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_NO_ERRORS:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
- rc = 0;
- break;
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
- desc = "sata capability failed";
- break;
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
- desc = "sata affiliation conflict";
- break;
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
- desc = "route not addressable";
- break;
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
- desc = "smp error not addressable";
- break;
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
- desc = "device blocked";
- break;
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_DIAG:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
- case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MAX:
- desc = "sata initialization failed";
- break;
- default:
- desc = "unknown";
- break;
- }
- if (!rc)
- return 0;
- pr_err(
- "%s discovery errors(%s): sas_address(0x%016llx),\n\t\t"
- "handle(0x%04x)\n",
- ioc->name,
- desc,
- (unsigned long long)sas_address,
- handle);
- return rc;
-}
-
-static void
-leapioraid_scsihost_check_device(struct LEAPIORAID_ADAPTER *ioc,
- u64 parent_sas_address, u16 handle, u8 phy_number,
- u8 link_rate)
-{
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasDevP0_t sas_device_pg0;
- struct leapioraid_sas_device *sas_device = NULL;
- struct leapioraid_enclosure_node *enclosure_dev = NULL;
- u32 ioc_status;
- unsigned long flags;
- u64 sas_address;
- struct scsi_target *starget;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- u32 device_info;
- u8 *serial_number = NULL;
- u8 *original_serial_number = NULL;
- int rc;
- struct leapioraid_hba_port *port;
-
- if ((leapioraid_config_get_sas_device_pg0
- (ioc, &mpi_reply, &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
- return;
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
- & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS)
- return;
- if (phy_number != sas_device_pg0.PhyNum)
- return;
- device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
- if (!(leapioraid_scsihost_is_sas_end_device(device_info)))
- return;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- port = leapioraid_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
- if (!port)
- goto out_unlock;
- sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port);
- if (!sas_device)
- goto out_unlock;
- if (unlikely(sas_device->handle != handle)) {
- starget = sas_device->starget;
- sas_target_priv_data = starget->hostdata;
- starget_printk(KERN_INFO, starget,
- "handle changed from(0x%04x) to (0x%04x)!!!\n",
- sas_device->handle, handle);
- sas_target_priv_data->handle = handle;
- sas_device->handle = handle;
- if (le16_to_cpu(sas_device_pg0.Flags) &
- LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
- sas_device->enclosure_level =
- sas_device_pg0.EnclosureLevel;
- memcpy(sas_device->connector_name,
- sas_device_pg0.ConnectorName, 4);
- sas_device->connector_name[4] = '\0';
- } else {
- sas_device->enclosure_level = 0;
- sas_device->connector_name[0] = '\0';
- }
- sas_device->enclosure_handle =
- le16_to_cpu(sas_device_pg0.EnclosureHandle);
- sas_device->is_chassis_slot_valid = 0;
- enclosure_dev =
- leapioraid_scsihost_enclosure_find_by_handle(ioc,
- sas_device->enclosure_handle);
- if (enclosure_dev) {
- sas_device->enclosure_logical_id =
- le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
- if (le16_to_cpu(enclosure_dev->pg0.Flags) &
- LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
- sas_device->is_chassis_slot_valid = 1;
- sas_device->chassis_slot =
- enclosure_dev->pg0.ChassisSlot;
- }
- }
- }
- if (!(le16_to_cpu(sas_device_pg0.Flags) &
- LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err("%s device is not present handle(0x%04x), flags!!!\n",
- ioc->name, handle);
- goto out_unlock;
- }
- if (leapioraid_scsihost_check_access_status(ioc, sas_address, handle,
- sas_device_pg0.AccessStatus))
- goto out_unlock;
- original_serial_number = sas_device->serial_number;
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- leapioraid_scsihost_ublock_io_device_wait(ioc, sas_address, port);
- if (!original_serial_number)
- goto out;
- if (leapioraid_scsihost_inquiry_vpd_sn(ioc, handle, &serial_number) ==
- DEVICE_READY && serial_number) {
- rc = strcmp(original_serial_number, serial_number);
- kfree(serial_number);
- if (!rc)
- goto out;
- leapioraid_device_remove_by_sas_address(ioc, sas_address, port);
- leapioraid_transport_update_links(ioc, parent_sas_address,
- handle, phy_number, link_rate,
- port);
- leapioraid_scsihost_add_device(ioc, handle, 0, 0);
- }
- goto out;
-out_unlock:
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-out:
- if (sas_device)
- leapioraid_sas_device_put(sas_device);
-}
-
-static int
-leapioraid_scsihost_add_device(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle, u8 retry_count,
- u8 is_pd)
-{
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasDevP0_t sas_device_pg0;
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_enclosure_node *enclosure_dev = NULL;
- u32 ioc_status;
- u64 sas_address;
- u32 device_info;
- enum device_responsive_state rc;
- u8 connector_name[5], port_id;
-
- if ((leapioraid_config_get_sas_device_pg0
- (ioc, &mpi_reply, &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return 0;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
- & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return 0;
- }
- device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
- if (!(leapioraid_scsihost_is_sas_end_device(device_info)))
- return 0;
- set_bit(handle, ioc->pend_os_device_add);
- sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- if (!(le16_to_cpu(sas_device_pg0.Flags) &
- LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err("%s device is not present handle(0x04%x)!!!\n",
- ioc->name, handle);
- return 0;
- }
- if (leapioraid_scsihost_check_access_status(
- ioc, sas_address, handle,
- sas_device_pg0.AccessStatus))
- return 0;
- port_id = sas_device_pg0.PhysicalPort;
- sas_device = leapioraid_get_sdev_by_addr(ioc,
- sas_address,
- leapioraid_get_port_by_id(ioc, port_id, 0));
- if (sas_device) {
- clear_bit(handle, ioc->pend_os_device_add);
- leapioraid_sas_device_put(sas_device);
- return 0;
- }
- if (le16_to_cpu(sas_device_pg0.EnclosureHandle)) {
- enclosure_dev =
- leapioraid_scsihost_enclosure_find_by_handle(ioc,
- le16_to_cpu
- (sas_device_pg0.EnclosureHandle));
- if (enclosure_dev == NULL)
- pr_info(
- "%s Enclosure handle(0x%04x)doesn't\n\t\t"
- "match with enclosure device!\n",
- ioc->name,
- le16_to_cpu(sas_device_pg0.EnclosureHandle));
- }
- if (!ioc->wait_for_discovery_to_complete) {
- pr_info(
- "%s detecting: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
- ioc->name, handle,
- (unsigned long long)sas_address,
- sas_device_pg0.PhyNum);
- rc = leapioraid_scsihost_wait_for_target_to_become_ready(
- ioc, handle,
- retry_count,
- is_pd, 30, 0);
- if (rc != DEVICE_READY) {
- if (le16_to_cpu(sas_device_pg0.EnclosureHandle) != 0)
- dewtprintk(ioc,
- pr_info("%s %s: device not ready: slot(%d)\n",
- ioc->name, __func__,
- le16_to_cpu(sas_device_pg0.Slot)));
- if (le16_to_cpu(sas_device_pg0.Flags) &
- LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
- memcpy(connector_name,
- sas_device_pg0.ConnectorName, 4);
- connector_name[4] = '\0';
- dewtprintk(ioc,
- pr_info(
- "%s %s: device not ready: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__,
- sas_device_pg0.EnclosureLevel,
- connector_name));
- }
- if ((enclosure_dev)
- && (le16_to_cpu(enclosure_dev->pg0.Flags) &
- LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID))
- pr_err(
- "%s chassis slot(0x%04x)\n", ioc->name,
- enclosure_dev->pg0.ChassisSlot);
- if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT
- || rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA)
- return 1;
- else if (rc == DEVICE_ERROR)
- return 0;
- }
- }
- sas_device = kzalloc(sizeof(struct leapioraid_sas_device),
- GFP_KERNEL);
- if (!sas_device)
- return 0;
-
- kref_init(&sas_device->refcount);
- sas_device->handle = handle;
- if (leapioraid_scsihost_get_sas_address(ioc,
- le16_to_cpu(sas_device_pg0.ParentDevHandle),
- &sas_device->sas_address_parent) != 0)
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- sas_device->enclosure_handle =
- le16_to_cpu(sas_device_pg0.EnclosureHandle);
- if (sas_device->enclosure_handle != 0)
- sas_device->slot = le16_to_cpu(sas_device_pg0.Slot);
- sas_device->device_info = device_info;
- sas_device->sas_address = sas_address;
- sas_device->port = leapioraid_get_port_by_id(ioc, port_id, 0);
- if (!sas_device->port) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
- sas_device->phy = sas_device_pg0.PhyNum;
- sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
- LEAPIORAID_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ?
- 1 : 0;
- sas_device->supports_sata_smart =
- (le16_to_cpu(sas_device_pg0.Flags) &
- LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED);
- if (le16_to_cpu(sas_device_pg0.Flags) &
- LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
- sas_device->enclosure_level = sas_device_pg0.EnclosureLevel;
- memcpy(sas_device->connector_name,
- sas_device_pg0.ConnectorName, 4);
- sas_device->connector_name[4] = '\0';
- } else {
- sas_device->enclosure_level = 0;
- sas_device->connector_name[0] = '\0';
- }
- sas_device->is_chassis_slot_valid = 0;
- if (enclosure_dev) {
- sas_device->enclosure_logical_id =
- le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
- if (le16_to_cpu(enclosure_dev->pg0.Flags) &
- LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
- sas_device->is_chassis_slot_valid = 1;
- sas_device->chassis_slot =
- enclosure_dev->pg0.ChassisSlot;
- }
- }
- sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
- sas_device->port_type = sas_device_pg0.MaxPortConnections;
- pr_err(
- "%s handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
- ioc->name, handle, sas_device->sas_address,
- sas_device->port_type);
- if (ioc->wait_for_discovery_to_complete)
- leapioraid_scsihost_sas_device_init_add(ioc, sas_device);
- else
- leapioraid_scsihost_sas_device_add(ioc, sas_device);
-out:
- leapioraid_sas_device_put(sas_device);
- return 0;
-}
-
-static void
-leapioraid_scsihost_remove_device(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_device *sas_device)
-{
- struct LEAPIORAID_TARGET *sas_target_priv_data;
-
- if (sas_device->pfa_led_on) {
- leapioraid_scsihost_turn_off_pfa_led(ioc, sas_device);
- sas_device->pfa_led_on = 0;
- }
- dewtprintk(ioc, pr_info(
- "%s %s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
- dewtprintk(ioc,
- leapioraid_scsihost_display_enclosure_chassis_info(
- ioc, sas_device, NULL, NULL));
- if (sas_device->starget && sas_device->starget->hostdata) {
- sas_target_priv_data = sas_device->starget->hostdata;
- sas_target_priv_data->deleted = 1;
- leapioraid_scsihost_ublock_io_device(
- ioc, sas_device->sas_address,
- sas_device->port);
- sas_target_priv_data->handle =
- LEAPIORAID_INVALID_DEVICE_HANDLE;
- }
- if (!ioc->hide_drives)
- leapioraid_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent,
- sas_device->port);
- pr_info("%s removing handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, sas_device->handle,
- (unsigned long long)sas_device->sas_address);
- leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
- dewtprintk(ioc, pr_info(
- "%s %s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__, sas_device->handle,
- (unsigned long long)
- sas_device->sas_address));
- dewtprintk(ioc,
- leapioraid_scsihost_display_enclosure_chassis_info(
- ioc, sas_device, NULL, NULL));
- kfree(sas_device->serial_number);
-}
-
-static void
-leapioraid_scsihost_sas_topology_change_event_debug(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataSasTopoChangeList_t *event_data)
-{
- int i;
- u16 handle;
- u16 reason_code;
- u8 phy_number;
- char *status_str = NULL;
- u8 link_rate, prev_link_rate;
-
- switch (event_data->ExpStatus) {
- case LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED:
- status_str = "add";
- break;
- case LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
- status_str = "remove";
- break;
- case LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING:
- case 0:
- status_str = "responding";
- break;
- case LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
- status_str = "remove delay";
- break;
- default:
- status_str = "unknown status";
- break;
- }
- pr_info("%s sas topology change: (%s)\n",
- ioc->name, status_str);
- pr_info(
- "\thandle(0x%04x), enclosure_handle(0x%04x)\n\t\t"
- "start_phy(%02d), count(%d)\n",
- le16_to_cpu(event_data->ExpanderDevHandle),
- le16_to_cpu(event_data->EnclosureHandle),
- event_data->StartPhyNum,
- event_data->NumEntries);
- for (i = 0; i < event_data->NumEntries; i++) {
- handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
- if (!handle)
- continue;
- phy_number = event_data->StartPhyNum + i;
- reason_code = event_data->PHY[i].PhyStatus &
- LEAPIORAID_EVENT_SAS_TOPO_RC_MASK;
- switch (reason_code) {
- case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED:
- status_str = "target add";
- break;
- case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
- status_str = "target remove";
- break;
- case LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
- status_str = "delay target remove";
- break;
- case LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED:
- status_str = "link rate change";
- break;
- case LEAPIORAID_EVENT_SAS_TOPO_RC_NO_CHANGE:
- status_str = "target responding";
- break;
- default:
- status_str = "unknown";
- break;
- }
- link_rate = event_data->PHY[i].LinkRate >> 4;
- prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
- pr_info(
- "\tphy(%02d), attached_handle(0x%04x): %s:\n\t\t"
- "link rate: new(0x%02x), old(0x%02x)\n",
- phy_number,
- handle,
- status_str,
- link_rate,
- prev_link_rate);
- }
-}
-
-static int
-leapioraid_scsihost_sas_topology_change_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- int i;
- u16 parent_handle, handle;
- u16 reason_code;
- u8 phy_number, max_phys;
- struct leapioraid_raid_sas_node *sas_expander;
- struct leapioraid_sas_device *sas_device;
- u64 sas_address;
- unsigned long flags;
- u8 link_rate, prev_link_rate;
- int rc;
- int requeue_event;
- struct leapioraid_hba_port *port;
- struct LeapioraidEventDataSasTopoChangeList_t *event_data =
- fw_event->event_data;
-
- if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK)
- leapioraid_scsihost_sas_topology_change_event_debug(
- ioc, event_data);
- if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
- return 0;
- if (!ioc->sas_hba.num_phys)
- leapioraid_scsihost_sas_host_add(ioc);
- else
- leapioraid_scsihost_sas_host_refresh(ioc);
- if (fw_event->ignore) {
- dewtprintk(ioc,
- pr_info("%s ignoring expander event\n",
- ioc->name));
- return 0;
- }
- parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
- port = leapioraid_get_port_by_id(ioc, event_data->PhysicalPort, 0);
- if (event_data->ExpStatus == LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED)
- if (leapioraid_scsihost_expander_add(ioc, parent_handle) != 0)
- return 0;
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_expander = leapioraid_scsihost_expander_find_by_handle(
- ioc, parent_handle);
- if (sas_expander) {
- sas_address = sas_expander->sas_address;
- max_phys = sas_expander->num_phys;
- port = sas_expander->port;
- } else if (parent_handle < ioc->sas_hba.num_phys) {
- sas_address = ioc->sas_hba.sas_address;
- max_phys = ioc->sas_hba.num_phys;
- } else {
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) {
- if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(
- "%s ignoring expander event\n",
- ioc->name));
- return 0;
- }
- if (ioc->remove_host || ioc->pci_error_recovery)
- return 0;
- phy_number = event_data->StartPhyNum + i;
- if (phy_number >= max_phys)
- continue;
- reason_code = event_data->PHY[i].PhyStatus &
- LEAPIORAID_EVENT_SAS_TOPO_RC_MASK;
- if ((event_data->PHY[i].PhyStatus &
- LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
- LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
- continue;
- if (fw_event->delayed_work_active && (reason_code ==
- LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
- dewtprintk(ioc,
- pr_info(
- "%s ignoring Targ not responding\n\t\t"
- "event phy in re-queued event processing\n",
- ioc->name));
- continue;
- }
- handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
- if (!handle)
- continue;
- link_rate = event_data->PHY[i].LinkRate >> 4;
- prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
- switch (reason_code) {
- case LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED:
- if (ioc->shost_recovery)
- break;
- if (link_rate == prev_link_rate)
- break;
- leapioraid_transport_update_links(ioc, sas_address,
- handle, phy_number,
- link_rate, port);
- if (link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5)
- break;
- leapioraid_scsihost_check_device(ioc, sas_address, handle,
- phy_number, link_rate);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_handle(ioc,
- handle);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device) {
- leapioraid_sas_device_put(sas_device);
- break;
- }
- if (!test_bit(handle, ioc->pend_os_device_add))
- break;
- dewtprintk(ioc, pr_err(
- "%s handle(0x%04x) device not found:\n\t\t"
- "convert event to a device add\n",
- ioc->name, handle));
- event_data->PHY[i].PhyStatus &= 0xF0;
- event_data->PHY[i].PhyStatus |=
- LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED;
- fallthrough;
- case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED:
- if (ioc->shost_recovery)
- break;
- leapioraid_transport_update_links(ioc, sas_address,
- handle, phy_number,
- link_rate, port);
- if (link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5)
- break;
- rc = leapioraid_scsihost_add_device(ioc, handle,
- fw_event->retries[i], 0);
- if (rc) {
- fw_event->retries[i]++;
- requeue_event = 1;
- } else {
- event_data->PHY[i].PhyStatus |=
- LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT;
- }
- break;
- case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
- leapioraid_scsihost_device_remove_by_handle(ioc, handle);
- break;
- }
- }
- if (event_data->ExpStatus == LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING
- && sas_expander)
- leapioraid_expander_remove(ioc, sas_address, port);
- return requeue_event;
-}
-
-static void
-leapioraid_scsihost_sas_device_status_change_event_debug(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataSasDeviceStatusChange_t *event_data)
-{
- char *reason_str = NULL;
-
- switch (event_data->ReasonCode) {
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
- reason_str = "smart data";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
- reason_str = "unsupported device discovered";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
- reason_str = "internal device reset";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
- reason_str = "internal task abort";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
- reason_str = "internal task abort set";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
- reason_str = "internal clear task set";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
- reason_str = "internal query task";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
- reason_str = "sata init failure";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
- reason_str = "internal device reset complete";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
- reason_str = "internal task abort complete";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
- reason_str = "internal async notification";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
- reason_str = "expander reduced functionality";
- break;
- case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
- reason_str = "expander reduced functionality complete";
- break;
- default:
- reason_str = "unknown reason";
- break;
- }
- pr_info("%s device status change: (%s)\n"
- "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
- ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- le16_to_cpu(event_data->TaskTag));
- if (event_data->ReasonCode == LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
- pr_info("%s , ASC(0x%x), ASCQ(0x%x)\n",
- ioc->name, event_data->ASC, event_data->ASCQ);
- pr_info("\n");
-}
-
-static void
-leapioraid_scsihost_sas_device_status_change_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataSasDeviceStatusChange_t *event_data)
-{
- struct LEAPIORAID_TARGET *target_priv_data;
- struct leapioraid_sas_device *sas_device;
- u64 sas_address;
- unsigned long flags;
-
- if ((ioc->facts.HeaderVersion >> 8) < 0xC)
- return;
- if (event_data->ReasonCode !=
- LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
- event_data->ReasonCode !=
- LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
- return;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_address = le64_to_cpu(event_data->SASAddress);
- sas_device = __leapioraid_get_sdev_by_addr(
- ioc, sas_address,
- leapioraid_get_port_by_id(ioc, event_data->PhysicalPort, 0));
- if (!sas_device || !sas_device->starget)
- goto out;
- target_priv_data = sas_device->starget->hostdata;
- if (!target_priv_data)
- goto out;
- if (event_data->ReasonCode ==
- LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
- target_priv_data->tm_busy = 1;
- else
- target_priv_data->tm_busy = 0;
- if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK)
- pr_err(
- "%s %s tm_busy flag for handle(0x%04x)\n", ioc->name,
- (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
- target_priv_data->handle);
-out:
- if (sas_device)
- leapioraid_sas_device_put(sas_device);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-}
-
-static void
-leapioraid_scsihost_sas_enclosure_dev_status_change_event_debug(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataSasEnclDevStatusChange_t *event_data)
-{
- char *reason_str = NULL;
-
- switch (event_data->ReasonCode) {
- case LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED:
- reason_str = "enclosure add";
- break;
- case LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
- reason_str = "enclosure remove";
- break;
- default:
- reason_str = "unknown reason";
- break;
- }
- pr_info(
- "%s enclosure status change: (%s)\n\thandle(0x%04x),\n\t\t"
- "enclosure logical id(0x%016llx) number slots(%d)\n",
- ioc->name,
- reason_str,
- le16_to_cpu(event_data->EnclosureHandle),
- (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
- le16_to_cpu(event_data->StartSlot));
-}
-
-static void
-leapioraid_scsihost_sas_enclosure_dev_status_change_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- struct LeapioraidCfgRep_t mpi_reply;
- struct leapioraid_enclosure_node *enclosure_dev = NULL;
- struct LeapioraidEventDataSasEnclDevStatusChange_t *event_data =
- fw_event->event_data;
- int rc;
-
- if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK)
- leapioraid_scsihost_sas_enclosure_dev_status_change_event_debug(
- ioc, fw_event->event_data);
- if (ioc->shost_recovery)
- return;
- event_data->EnclosureHandle = le16_to_cpu(event_data->EnclosureHandle);
- if (event_data->EnclosureHandle)
- enclosure_dev =
- leapioraid_scsihost_enclosure_find_by_handle(ioc,
- event_data->EnclosureHandle);
- switch (event_data->ReasonCode) {
- case LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED:
- if (!enclosure_dev) {
- enclosure_dev =
- kzalloc(sizeof(struct leapioraid_enclosure_node), GFP_KERNEL);
- if (!enclosure_dev) {
- pr_err("%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- return;
- }
- rc = leapioraid_config_get_enclosure_pg0(ioc,
- &mpi_reply,
- &enclosure_dev->pg0,
- LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE,
- event_data->EnclosureHandle);
- if (rc
- || (le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK)) {
- kfree(enclosure_dev);
- return;
- }
- list_add_tail(&enclosure_dev->list,
- &ioc->enclosure_list);
- }
- break;
- case LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
- if (enclosure_dev) {
- list_del(&enclosure_dev->list);
- kfree(enclosure_dev);
- }
- break;
- default:
- break;
- }
-}
-
-static void
-leapioraid_scsihost_sas_broadcast_primitive_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- struct scsi_cmnd *scmd;
- struct scsi_device *sdev;
- u16 smid, handle;
- u32 lun;
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- u32 termination_count;
- u32 query_count;
- struct LeapioraidSCSITmgRep_t *mpi_reply;
- struct LeapioraidEventDataSasBroadcastPrimitive_t *event_data =
- fw_event->event_data;
- u16 ioc_status;
- unsigned long flags;
- int r;
- u8 max_retries = 0;
- u8 task_abort_retries;
- struct leapioraid_scsiio_tracker *st;
-
- mutex_lock(&ioc->tm_cmds.mutex);
- dewtprintk(ioc,
- pr_info(
- "%s %s: enter: phy number(%d), width(%d)\n",
- ioc->name, __func__,
- event_data->PhyNum, event_data->PortWidth));
- leapioraid_scsihost_block_io_all_device(ioc);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- mpi_reply = ioc->tm_cmds.reply;
-broadcast_aen_retry:
- if (max_retries++ == 5) {
- dewtprintk(ioc, pr_info("%s %s: giving up\n",
- ioc->name, __func__));
- goto out;
- } else if (max_retries > 1)
- dewtprintk(ioc, pr_info("%s %s: %d retry\n",
- ioc->name, __func__, max_retries - 1));
- termination_count = 0;
- query_count = 0;
- for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
- if (ioc->shost_recovery)
- goto out;
- scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid);
- if (!scmd)
- continue;
- st = leapioraid_base_scsi_cmd_priv(scmd);
- if (!st || st->smid == 0)
- continue;
- sdev = scmd->device;
- sas_device_priv_data = sdev->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
- continue;
- if (sas_device_priv_data->sas_target->flags &
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT)
- continue;
- if (sas_device_priv_data->sas_target->flags &
- LEAPIORAID_TARGET_FLAGS_VOLUME)
- continue;
- handle = sas_device_priv_data->sas_target->handle;
- lun = sas_device_priv_data->lun;
- query_count++;
- if (ioc->shost_recovery)
- goto out;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- r = leapioraid_scsihost_issue_tm(ioc, handle, 0, 0, lun,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
- st->smid, 30, 0);
- if (r == FAILED) {
- sdev_printk(KERN_WARNING, sdev,
- "leapioraid_scsihost_issue_tm:\n\t\t"
- "FAILED when sending QUERY_TASK: scmd(%p)\n",
- scmd);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- goto broadcast_aen_retry;
- }
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
- & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- sdev_printk(KERN_WARNING, sdev,
- "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
- ioc_status, scmd);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- goto broadcast_aen_retry;
- }
- if (mpi_reply->ResponseCode ==
- LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
- mpi_reply->ResponseCode ==
- LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- continue;
- }
- task_abort_retries = 0;
-tm_retry:
- if (task_abort_retries++ == 60) {
- dewtprintk(ioc, pr_err(
- "%s %s: ABORT_TASK: giving up\n",
- ioc->name, __func__));
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- goto broadcast_aen_retry;
- }
- if (ioc->shost_recovery)
- goto out_no_lock;
- r = leapioraid_scsihost_issue_tm(ioc, handle, sdev->channel,
- sdev->id, sdev->lun,
- LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
- st->smid, 30, 0);
- if (r == FAILED) {
- sdev_printk(KERN_WARNING, sdev,
- "ABORT_TASK: FAILED : scmd(%p)\n", scmd);
- goto tm_retry;
- }
- if (task_abort_retries > 1)
- sdev_printk(KERN_WARNING, sdev,
- "leapioraid_scsihost_issue_tm:\n\t\t"
- "ABORT_TASK: RETRIES (%d): scmd(%p)\n",
- task_abort_retries - 1,
- scmd);
- termination_count += le32_to_cpu(mpi_reply->TerminationCount);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- }
- if (ioc->broadcast_aen_pending) {
- dewtprintk(ioc,
- pr_info("%s %s: loop back due to pending AEN\n",
- ioc->name, __func__));
- ioc->broadcast_aen_pending = 0;
- goto broadcast_aen_retry;
- }
-out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-out_no_lock:
- dewtprintk(ioc, pr_err(
- "%s %s - exit, query_count = %d termination_count = %d\n",
- ioc->name, __func__, query_count,
- termination_count));
- ioc->broadcast_aen_busy = 0;
- if (!ioc->shost_recovery)
- leapioraid_scsihost_ublock_io_all_device(ioc, 1);
- mutex_unlock(&ioc->tm_cmds.mutex);
-}
-
-static void
-leapioraid_scsihost_sas_discovery_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- struct LeapioraidEventDataSasDiscovery_t *event_data
- = fw_event->event_data;
-
- if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) {
- pr_info("%s sas discovery event: (%s)",
- ioc->name,
- (event_data->ReasonCode ==
- LEAPIORAID_EVENT_SAS_DISC_RC_STARTED) ? "start" : "stop");
- if (event_data->DiscoveryStatus)
- pr_info("discovery_status(0x%08x)",
- le32_to_cpu(event_data->DiscoveryStatus));
- pr_info("\n");
- }
- if (event_data->ReasonCode == LEAPIORAID_EVENT_SAS_DISC_RC_STARTED &&
- !ioc->sas_hba.num_phys) {
- if (disable_discovery > 0 && ioc->shost_recovery) {
- while (ioc->shost_recovery)
- ssleep(1);
- }
- leapioraid_scsihost_sas_host_add(ioc);
- }
-}
-
-static void
-leapioraid_scsihost_sas_device_discovery_error_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- struct LeapioraidEventDataSasDeviceDiscoveryError_t *event_data =
- fw_event->event_data;
-
- switch (event_data->ReasonCode) {
- case LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_FAILED:
- pr_warn(
- "%s SMP command sent to the expander(handle:0x%04x,\n\t\t"
- "sas_address:0x%016llx,physical_port:0x%02x) has failed\n",
- ioc->name,
- le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- event_data->PhysicalPort);
- break;
- case LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
- pr_warn(
- "%s SMP command sent to the expander(handle:0x%04x,\n\t\t"
- "sas_address:0x%016llx,physical_port:0x%02x) has timed out\n",
- ioc->name,
- le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- event_data->PhysicalPort);
- break;
- default:
- break;
- }
-}
-
-static int
-leapioraid_scsihost_ir_fastpath(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- u8 phys_disk_num)
-{
- struct LeapioraidRaidActionReq_t *mpi_request;
- struct LeapioraidRaidActionRep_t *mpi_reply;
- u16 smid;
- u8 issue_reset = 0;
- int rc = 0;
- u16 ioc_status;
- u32 log_info;
-
- mutex_lock(&ioc->scsih_cmds.mutex);
- if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: scsih_cmd in use\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto out;
- }
- ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING;
- smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED;
- rc = -EAGAIN;
- goto out;
- }
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->scsih_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioraidRaidActionReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_RAID_ACTION;
- mpi_request->Action = 0x24;
- mpi_request->PhysDiskNum = phys_disk_num;
- dewtprintk(ioc, pr_info(
- "%s IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
- ioc->name, handle, phys_disk_num));
- init_completion(&ioc->scsih_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->scsih_cmds.done, 10 * HZ);
- if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- leapioraid_check_cmd_timeout(ioc,
- ioc->scsih_cmds.status,
- mpi_request,
- sizeof(struct LeapioraidRaidActionReq_t)
- / 4, issue_reset);
- rc = -EFAULT;
- goto out;
- }
- if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- mpi_reply = ioc->scsih_cmds.reply;
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
- if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
- log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
- else
- log_info = 0;
- ioc_status &= LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- dewtprintk(ioc, pr_err(
- "%s IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
- ioc->name, ioc_status,
- log_info));
- rc = -EFAULT;
- } else
- dewtprintk(ioc, pr_err(
- "%s IR RAID_ACTION: completed successfully\n",
- ioc->name));
- }
-out:
- ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_unlock(&ioc->scsih_cmds.mutex);
- if (issue_reset)
- leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- return rc;
-}
-
-static void
-leapioraid_scsihost_reprobe_lun(
- struct scsi_device *sdev, void *no_uld_attach)
-{
- int rc;
-
- sdev->no_uld_attach = no_uld_attach ? 1 : 0;
- sdev_printk(KERN_INFO, sdev, "%s raid component\n",
- sdev->no_uld_attach ? "hiding" : "exposing");
- rc = scsi_device_reprobe(sdev);
- pr_info("%s rc=%d\n", __func__, rc);
-}
-
-static void
-leapioraid_scsihost_sas_volume_add(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventIrCfgEle_t *element)
-{
- struct leapioraid_raid_device *raid_device;
- unsigned long flags;
- u64 wwid;
- u16 handle = le16_to_cpu(element->VolDevHandle);
- int rc;
-
- leapioraid_config_get_volume_wwid(ioc, handle, &wwid);
- if (!wwid) {
- pr_err("%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- return;
- }
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_scsihost_raid_device_find_by_wwid(
- ioc, wwid);
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- if (raid_device)
- return;
- raid_device = kzalloc(sizeof(struct leapioraid_raid_device),
- GFP_KERNEL);
- if (!raid_device)
- return;
-
- raid_device->id = ioc->sas_id++;
- raid_device->channel = RAID_CHANNEL;
- raid_device->handle = handle;
- raid_device->wwid = wwid;
- leapioraid_scsihost_raid_device_add(ioc, raid_device);
- if (!ioc->wait_for_discovery_to_complete) {
- rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
- raid_device->id, 0);
- if (rc)
- leapioraid_scsihost_raid_device_remove(ioc, raid_device);
- } else {
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- leapioraid_scsihost_determine_boot_device(
- ioc, raid_device, RAID_CHANNEL);
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- }
-}
-
-static void
-leapioraid_scsihost_sas_volume_delete(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle)
-{
- struct leapioraid_raid_device *raid_device;
- unsigned long flags;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct scsi_target *starget = NULL;
-
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_raid_device_find_by_handle(ioc, handle);
- if (raid_device) {
- if (raid_device->starget) {
- starget = raid_device->starget;
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->deleted = 1;
- }
- pr_info("%s removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long)raid_device->wwid);
- list_del(&raid_device->list);
- kfree(raid_device);
- }
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- if (starget)
- scsi_remove_target(&starget->dev);
-}
-
-static void
-leapioraid_scsihost_sas_pd_expose(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventIrCfgEle_t *element)
-{
- struct leapioraid_sas_device *sas_device;
- struct scsi_target *starget = NULL;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- unsigned long flags;
- u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_handle(ioc, handle);
- if (sas_device) {
- sas_device->volume_handle = 0;
- sas_device->volume_wwid = 0;
- clear_bit(handle, ioc->pd_handles);
- if (sas_device->starget && sas_device->starget->hostdata) {
- starget = sas_device->starget;
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->flags &=
- ~LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT;
- sas_device->pfa_led_on = 0;
- leapioraid_sas_device_put(sas_device);
- }
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (!sas_device)
- return;
- if (starget)
- starget_for_each_device(starget, NULL, leapioraid_scsihost_reprobe_lun);
-}
-
-static void
-leapioraid_scsihost_sas_pd_hide(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventIrCfgEle_t *element)
-{
- struct leapioraid_sas_device *sas_device;
- struct scsi_target *starget = NULL;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- unsigned long flags;
- u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
- u16 volume_handle = 0;
- u64 volume_wwid = 0;
-
- leapioraid_config_get_volume_handle(ioc, handle, &volume_handle);
- if (volume_handle)
- leapioraid_config_get_volume_wwid(ioc, volume_handle,
- &volume_wwid);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_handle(ioc, handle);
- if (sas_device) {
- set_bit(handle, ioc->pd_handles);
- if (sas_device->starget && sas_device->starget->hostdata) {
- starget = sas_device->starget;
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->flags |=
- LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT;
- sas_device->volume_handle = volume_handle;
- sas_device->volume_wwid = volume_wwid;
- leapioraid_sas_device_put(sas_device);
- }
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (!sas_device)
- return;
- leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum);
- if (starget)
- starget_for_each_device(starget, (void *)1,
- leapioraid_scsihost_reprobe_lun);
-}
-
-static void
-leapioraid_scsihost_sas_pd_delete(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventIrCfgEle_t *element)
-{
- u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
-
- leapioraid_scsihost_device_remove_by_handle(ioc, handle);
-}
-
-static void
-leapioraid_scsihost_sas_pd_add(struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventIrCfgEle_t *element)
-{
- struct leapioraid_sas_device *sas_device;
- u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasDevP0_t sas_device_pg0;
- u32 ioc_status;
- u64 sas_address;
- u16 parent_handle;
-
- set_bit(handle, ioc->pd_handles);
- sas_device = leapioraid_get_sdev_by_handle(ioc, handle);
- if (sas_device) {
- leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum);
- leapioraid_sas_device_put(sas_device);
- return;
- }
- if ((leapioraid_config_get_sas_device_pg0
- (ioc, &mpi_reply, &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
- if (!leapioraid_scsihost_get_sas_address(ioc, parent_handle, &sas_address))
- leapioraid_transport_update_links(ioc, sas_address, handle,
- sas_device_pg0.PhyNum,
- LEAPIORAID_SAS_NEG_LINK_RATE_1_5,
- leapioraid_get_port_by_id(ioc,
- sas_device_pg0.PhysicalPort,
- 0));
- leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum);
- leapioraid_scsihost_add_device(ioc, handle, 0, 1);
-}
-
-static void
-leapioraid_scsihost_sas_ir_config_change_event_debug(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataIrCfgChangeList_t *event_data)
-{
- struct LeapioraidEventIrCfgEle_t *element;
- u8 element_type;
- int i;
- char *reason_str = NULL, *element_str = NULL;
-
- element =
- (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0];
- pr_info("%s raid config change: (%s), elements(%d)\n",
- ioc->name,
- (le32_to_cpu(event_data->Flags) &
- LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? "foreign" :
- "native", event_data->NumElements);
- for (i = 0; i < event_data->NumElements; i++, element++) {
- switch (element->ReasonCode) {
- case LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED:
- reason_str = "add";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED:
- reason_str = "remove";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_NO_CHANGE:
- reason_str = "no change";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE:
- reason_str = "hide";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE:
- reason_str = "unhide";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
- reason_str = "volume_created";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
- reason_str = "volume_deleted";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED:
- reason_str = "pd_created";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED:
- reason_str = "pd_deleted";
- break;
- default:
- reason_str = "unknown reason";
- break;
- }
- element_type = le16_to_cpu(element->ElementFlags) &
- LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
- switch (element_type) {
- case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
- element_str = "volume";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
- element_str = "phys disk";
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
- element_str = "hot spare";
- break;
- default:
- element_str = "unknown element";
- break;
- }
- pr_info(
- "\t(%s:%s), vol handle(0x%04x), pd handle(0x%04x), pd num(0x%02x)\n",
- element_str,
- reason_str, le16_to_cpu(element->VolDevHandle),
- le16_to_cpu(element->PhysDiskDevHandle),
- element->PhysDiskNum);
- }
-}
-
-static void
-leapioraid_scsihost_sas_ir_config_change_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- struct LeapioraidEventIrCfgEle_t *element;
- int i;
- u8 foreign_config;
- struct LeapioraidEventDataIrCfgChangeList_t *event_data
- = fw_event->event_data;
-
- if ((ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK)
- && !ioc->warpdrive_msg)
- leapioraid_scsihost_sas_ir_config_change_event_debug(ioc, event_data);
- foreign_config = (le32_to_cpu(event_data->Flags) &
- LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
- element =
- (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0];
- if (ioc->shost_recovery) {
- for (i = 0; i < event_data->NumElements; i++, element++) {
- if (element->ReasonCode ==
- LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE)
- leapioraid_scsihost_ir_fastpath(ioc,
- le16_to_cpu(element->PhysDiskDevHandle),
- element->PhysDiskNum);
- }
- return;
- }
- for (i = 0; i < event_data->NumElements; i++, element++) {
- switch (element->ReasonCode) {
- case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
- case LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED:
- if (!foreign_config)
- leapioraid_scsihost_sas_volume_add(ioc, element);
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
- case LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED:
- if (!foreign_config)
- leapioraid_scsihost_sas_volume_delete(ioc,
- le16_to_cpu
- (element->VolDevHandle));
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED:
- leapioraid_scsihost_sas_pd_hide(ioc, element);
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED:
- leapioraid_scsihost_sas_pd_expose(ioc, element);
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE:
- leapioraid_scsihost_sas_pd_add(ioc, element);
- break;
- case LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE:
- leapioraid_scsihost_sas_pd_delete(ioc, element);
- break;
- }
- }
-}
-
-static void
-leapioraid_scsihost_sas_ir_volume_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- u64 wwid;
- unsigned long flags;
- struct leapioraid_raid_device *raid_device;
- u16 handle;
- u32 state;
- int rc;
- struct LeapioraidEventDataIrVol_t *event_data
- = fw_event->event_data;
-
- if (ioc->shost_recovery)
- return;
- if (event_data->ReasonCode != LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED)
- return;
- handle = le16_to_cpu(event_data->VolDevHandle);
- state = le32_to_cpu(event_data->NewValue);
- if (!ioc->warpdrive_msg)
- dewtprintk(ioc,
- pr_info("%s %s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name,
- __func__, handle,
- le32_to_cpu(event_data->PreviousValue),
- state));
- switch (state) {
- case LEAPIORAID_RAID_VOL_STATE_MISSING:
- case LEAPIORAID_RAID_VOL_STATE_FAILED:
- leapioraid_scsihost_sas_volume_delete(ioc, handle);
- break;
- case LEAPIORAID_RAID_VOL_STATE_ONLINE:
- case LEAPIORAID_RAID_VOL_STATE_DEGRADED:
- case LEAPIORAID_RAID_VOL_STATE_OPTIMAL:
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device =
- leapioraid_raid_device_find_by_handle(ioc, handle);
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- if (raid_device)
- break;
- leapioraid_config_get_volume_wwid(ioc, handle, &wwid);
- if (!wwid) {
- pr_err(
- "%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- break;
- }
- raid_device = kzalloc(sizeof(struct leapioraid_raid_device),
- GFP_KERNEL);
- if (!raid_device)
- break;
-
- raid_device->id = ioc->sas_id++;
- raid_device->channel = RAID_CHANNEL;
- raid_device->handle = handle;
- raid_device->wwid = wwid;
- leapioraid_scsihost_raid_device_add(ioc, raid_device);
- rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
- raid_device->id, 0);
- if (rc)
- leapioraid_scsihost_raid_device_remove(ioc, raid_device);
- break;
- case LEAPIORAID_RAID_VOL_STATE_INITIALIZING:
- default:
- break;
- }
-}
-
-static void
-leapioraid_scsihost_sas_ir_physical_disk_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- u16 handle, parent_handle;
- u32 state;
- struct leapioraid_sas_device *sas_device;
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasDevP0_t sas_device_pg0;
- u32 ioc_status;
- struct LeapioraidEventDataIrPhyDisk_t *event_data
- = fw_event->event_data;
- u64 sas_address;
-
- if (ioc->shost_recovery)
- return;
- if (event_data->ReasonCode !=
- LEAPIORAID_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
- return;
- handle = le16_to_cpu(event_data->PhysDiskDevHandle);
- state = le32_to_cpu(event_data->NewValue);
- if (!ioc->warpdrive_msg)
- dewtprintk(ioc,
- pr_info("%s %s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name,
- __func__, handle,
- le32_to_cpu(event_data->PreviousValue),
- state));
- switch (state) {
- case LEAPIORAID_RAID_PD_STATE_ONLINE:
- case LEAPIORAID_RAID_PD_STATE_DEGRADED:
- case LEAPIORAID_RAID_PD_STATE_REBUILDING:
- case LEAPIORAID_RAID_PD_STATE_OPTIMAL:
- case LEAPIORAID_RAID_PD_STATE_HOT_SPARE:
- set_bit(handle, ioc->pd_handles);
- sas_device = leapioraid_get_sdev_by_handle(ioc, handle);
- if (sas_device) {
- leapioraid_sas_device_put(sas_device);
- return;
- }
- if ((leapioraid_config_get_sas_device_pg0(
- ioc, &mpi_reply,
- &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE,
- handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
- if (!leapioraid_scsihost_get_sas_address
- (ioc, parent_handle, &sas_address))
- leapioraid_transport_update_links(ioc, sas_address,
- handle,
- sas_device_pg0.PhyNum,
- LEAPIORAID_SAS_NEG_LINK_RATE_1_5,
- leapioraid_get_port_by_id
- (ioc,
- sas_device_pg0.PhysicalPort, 0));
- leapioraid_scsihost_add_device(ioc, handle, 0, 1);
- break;
- case LEAPIORAID_RAID_PD_STATE_OFFLINE:
- case LEAPIORAID_RAID_PD_STATE_NOT_CONFIGURED:
- case LEAPIORAID_RAID_PD_STATE_NOT_COMPATIBLE:
- default:
- break;
- }
-}
-
-static void
-leapioraid_scsihost_sas_ir_operation_status_event_debug(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidEventDataIrOpStatus_t *event_data)
-{
- char *reason_str = NULL;
-
- switch (event_data->RAIDOperation) {
- case LEAPIORAID_EVENT_IR_RAIDOP_RESYNC:
- reason_str = "resync";
- break;
- case LEAPIORAID_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
- reason_str = "online capacity expansion";
- break;
- case LEAPIORAID_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
- reason_str = "consistency check";
- break;
- case LEAPIORAID_EVENT_IR_RAIDOP_BACKGROUND_INIT:
- reason_str = "background init";
- break;
- case LEAPIORAID_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
- reason_str = "make data consistent";
- break;
- }
- if (!reason_str)
- return;
- pr_info(
- "%s raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
- ioc->name, reason_str,
- le16_to_cpu(event_data->VolDevHandle),
- event_data->PercentComplete);
-}
-
-static void
-leapioraid_scsihost_sas_ir_operation_status_event(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- struct LeapioraidEventDataIrOpStatus_t *event_data
- = fw_event->event_data;
- static struct leapioraid_raid_device *raid_device;
- unsigned long flags;
- u16 handle;
-
- if ((ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK)
- && !ioc->warpdrive_msg)
- leapioraid_scsihost_sas_ir_operation_status_event_debug(
- ioc, event_data);
- if (event_data->RAIDOperation == LEAPIORAID_EVENT_IR_RAIDOP_RESYNC) {
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- handle = le16_to_cpu(event_data->VolDevHandle);
- raid_device =
- leapioraid_raid_device_find_by_handle(ioc, handle);
- if (raid_device)
- raid_device->percent_complete =
- event_data->PercentComplete;
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- }
-}
-
-static void
-leapioraid_scsihost_prep_device_scan(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, ioc->shost) {
- sas_device_priv_data = sdev->hostdata;
- if (sas_device_priv_data && sas_device_priv_data->sas_target)
- sas_device_priv_data->sas_target->deleted = 1;
- }
-}
-
-static void
-leapioraid_scsihost_update_device_qdepth(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LEAPIORAID_DEVICE *sas_device_priv_data;
- struct leapioraid_sas_device *sas_device;
- struct scsi_device *sdev;
- u16 qdepth;
-
- pr_info("%s Update Devices with FW Reported QD\n",
- ioc->name);
- shost_for_each_device(sdev, ioc->shost) {
- sas_device_priv_data = sdev->hostdata;
- if (sas_device_priv_data && sas_device_priv_data->sas_target) {
- sas_device = sas_device_priv_data->sas_target->sas_dev;
- if (sas_device &&
- sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET)
- qdepth =
- (sas_device->port_type >
- 1) ? ioc->max_wideport_qd : ioc->max_narrowport_qd;
- else if (sas_device
- && sas_device->device_info &
- LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE)
- qdepth = ioc->max_sata_qd;
- else
- continue;
- leapioraid__scsihost_change_queue_depth(sdev, qdepth);
- }
- }
-}
-
-static void
-leapioraid_scsihost_mark_responding_sas_device(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidSasDevP0_t *sas_device_pg0)
-{
- struct LEAPIORAID_TARGET *sas_target_priv_data = NULL;
- struct scsi_target *starget;
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_enclosure_node *enclosure_dev = NULL;
- unsigned long flags;
- struct leapioraid_hba_port *port;
-
- port = leapioraid_get_port_by_id(ioc, sas_device_pg0->PhysicalPort, 0);
- if (sas_device_pg0->EnclosureHandle) {
- enclosure_dev =
- leapioraid_scsihost_enclosure_find_by_handle(ioc,
- le16_to_cpu
- (sas_device_pg0->EnclosureHandle));
- if (enclosure_dev == NULL)
- pr_info(
- "%s Enclosure handle(0x%04x)doesn't match with enclosure device!\n",
- ioc->name, sas_device_pg0->EnclosureHandle);
- }
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if ((sas_device->sas_address ==
- le64_to_cpu(sas_device_pg0->SASAddress))
- && (sas_device->slot == le16_to_cpu(sas_device_pg0->Slot))
- && (sas_device->port == port)) {
- sas_device->responding = 1;
- starget = sas_device->starget;
- if (starget && starget->hostdata) {
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->tm_busy = 0;
- sas_target_priv_data->deleted = 0;
- } else
- sas_target_priv_data = NULL;
- if (starget) {
- starget_printk(KERN_INFO, starget,
- "handle(0x%04x), sas_address(0x%016llx), port: %d\n",
- sas_device->handle,
- (unsigned long long)sas_device->sas_address,
- sas_device->port->port_id);
- if (sas_device->enclosure_handle != 0)
- starget_printk(KERN_INFO, starget,
- "enclosure logical id(0x%016llx), slot(%d)\n",
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- }
- if (le16_to_cpu(sas_device_pg0->Flags) &
- LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
- sas_device->enclosure_level =
- sas_device_pg0->EnclosureLevel;
- memcpy(sas_device->connector_name,
- sas_device_pg0->ConnectorName, 4);
- sas_device->connector_name[4] = '\0';
- } else {
- sas_device->enclosure_level = 0;
- sas_device->connector_name[0] = '\0';
- }
- sas_device->enclosure_handle =
- le16_to_cpu(sas_device_pg0->EnclosureHandle);
- sas_device->is_chassis_slot_valid = 0;
- if (enclosure_dev) {
- sas_device->enclosure_logical_id =
- le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
- if (le16_to_cpu(enclosure_dev->pg0.Flags) &
- LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
- sas_device->is_chassis_slot_valid = 1;
- sas_device->chassis_slot =
- enclosure_dev->pg0.ChassisSlot;
- }
- }
- if (sas_device->handle ==
- le16_to_cpu(sas_device_pg0->DevHandle))
- goto out;
- pr_info("\thandle changed from(0x%04x)!!!\n",
- sas_device->handle);
- sas_device->handle =
- le16_to_cpu(sas_device_pg0->DevHandle);
- if (sas_target_priv_data)
- sas_target_priv_data->handle =
- le16_to_cpu(sas_device_pg0->DevHandle);
- goto out;
- }
- }
-out:
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-}
-
-static void
-leapioraid_scsihost_create_enclosure_list_after_reset(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_enclosure_node *enclosure_dev;
- struct LeapioraidCfgRep_t mpi_reply;
- u16 enclosure_handle;
- int rc;
-
- leapioraid_free_enclosure_list(ioc);
- enclosure_handle = 0xFFFF;
- do {
- enclosure_dev =
- kzalloc(sizeof(struct leapioraid_enclosure_node), GFP_KERNEL);
- if (!enclosure_dev) {
- pr_err("%s failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- return;
- }
- rc = leapioraid_config_get_enclosure_pg0(ioc, &mpi_reply,
- &enclosure_dev->pg0,
- LEAPIORAID_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
- enclosure_handle);
- if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK)) {
- kfree(enclosure_dev);
- return;
- }
- list_add_tail(&enclosure_dev->list, &ioc->enclosure_list);
- enclosure_handle =
- le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
- } while (1);
-}
-
-static void
-leapioraid_scsihost_search_responding_sas_devices(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidSasDevP0_t sas_device_pg0;
- struct LeapioraidCfgRep_t mpi_reply;
- u16 ioc_status;
- u16 handle;
- u32 device_info;
-
- pr_info("%s search for end-devices: start\n",
- ioc->name);
- if (list_empty(&ioc->sas_device_list))
- goto out;
- handle = 0xFFFF;
- while (!(leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply,
- &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
- handle))) {
- ioc_status =
- le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_info(
- "%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, __func__, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- handle = le16_to_cpu(sas_device_pg0.DevHandle);
- device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
- if (!(leapioraid_scsihost_is_sas_end_device(device_info)))
- continue;
- leapioraid_scsihost_mark_responding_sas_device(
- ioc, &sas_device_pg0);
- }
-out:
- pr_info("%s search for end-devices: complete\n",
- ioc->name);
-}
-
-static void
-leapioraid_scsihost_mark_responding_raid_device(
- struct LEAPIORAID_ADAPTER *ioc, u64 wwid, u16 handle)
-{
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct scsi_target *starget;
- struct leapioraid_raid_device *raid_device;
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
- if (raid_device->wwid == wwid && raid_device->starget) {
- starget = raid_device->starget;
- if (starget && starget->hostdata) {
- sas_target_priv_data = starget->hostdata;
- sas_target_priv_data->deleted = 0;
- } else
- sas_target_priv_data = NULL;
- raid_device->responding = 1;
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- starget_printk(KERN_INFO, raid_device->starget,
- "handle(0x%04x), wwid(0x%016llx)\n",
- handle,
- (unsigned long long)raid_device->wwid);
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- if (raid_device->handle == handle) {
- spin_unlock_irqrestore(&ioc->raid_device_lock,
- flags);
- return;
- }
- pr_info("\thandle changed from(0x%04x)!!!\n",
- raid_device->handle);
- raid_device->handle = handle;
- if (sas_target_priv_data)
- sas_target_priv_data->handle = handle;
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- return;
- }
- }
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
-}
-
-static void
-leapioraid_scsihost_search_responding_raid_devices(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidRaidVolP1_t volume_pg1;
- struct LeapioraidRaidVolP0_t volume_pg0;
- struct LeapioraidRaidPDP0_t pd_pg0;
- struct LeapioraidCfgRep_t mpi_reply;
- u16 ioc_status;
- u16 handle;
- u8 phys_disk_num;
-
- if (!ioc->ir_firmware)
- return;
- pr_info("%s search for raid volumes: start\n",
- ioc->name);
- if (list_empty(&ioc->raid_device_list))
- goto out;
- handle = 0xFFFF;
- while (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply,
- &volume_pg1,
- LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE,
- handle))) {
- ioc_status =
- le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_info("%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, __func__, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- handle = le16_to_cpu(volume_pg1.DevHandle);
- if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply,
- &volume_pg0,
- LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE,
- handle,
- sizeof
- (struct LeapioraidRaidVolP0_t)))
- continue;
- if (volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_OPTIMAL ||
- volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_ONLINE ||
- volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_DEGRADED)
- leapioraid_scsihost_mark_responding_raid_device(ioc,
- le64_to_cpu
- (volume_pg1.WWID),
- handle);
- }
- phys_disk_num = 0xFF;
- memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
- while (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply,
- &pd_pg0,
- LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
- phys_disk_num))) {
- ioc_status =
- le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_info("%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, __func__, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- phys_disk_num = pd_pg0.PhysDiskNum;
- handle = le16_to_cpu(pd_pg0.DevHandle);
- set_bit(handle, ioc->pd_handles);
- }
-out:
- pr_info("%s search for responding raid volumes: complete\n",
- ioc->name);
-}
-
-static void
-leapioraid_scsihost_mark_responding_expander(
- struct LEAPIORAID_ADAPTER *ioc,
- struct LeapioraidExpanderP0_t *expander_pg0)
-{
- struct leapioraid_raid_sas_node *sas_expander;
- unsigned long flags;
- int i;
- u8 port_id = expander_pg0->PhysicalPort;
- struct leapioraid_hba_port *port = leapioraid_get_port_by_id(
- ioc, port_id, 0);
- struct leapioraid_enclosure_node *enclosure_dev = NULL;
- u16 handle = le16_to_cpu(expander_pg0->DevHandle);
- u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
- u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
-
- if (enclosure_handle)
- enclosure_dev =
- leapioraid_scsihost_enclosure_find_by_handle(ioc,
- enclosure_handle);
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
- if (sas_expander->sas_address != sas_address ||
- (sas_expander->port != port))
- continue;
- sas_expander->responding = 1;
- if (enclosure_dev) {
- sas_expander->enclosure_logical_id =
- le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
- sas_expander->enclosure_handle =
- le16_to_cpu(expander_pg0->EnclosureHandle);
- }
- if (sas_expander->handle == handle)
- goto out;
- pr_info(
- "\texpander(0x%016llx): handle changed from(0x%04x) to (0x%04x)!!!\n",
- (unsigned long long)sas_expander->sas_address,
- sas_expander->handle, handle);
- sas_expander->handle = handle;
- for (i = 0; i < sas_expander->num_phys; i++)
- sas_expander->phy[i].handle = handle;
- goto out;
- }
-out:
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-}
-
-static void
-leapioraid_scsihost_search_responding_expanders(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidExpanderP0_t expander_pg0;
- struct LeapioraidCfgRep_t mpi_reply;
- u16 ioc_status;
- u64 sas_address;
- u16 handle;
- u8 port;
-
- pr_info("%s search for expanders: start\n",
- ioc->name);
- if (list_empty(&ioc->sas_expander_list))
- goto out;
- handle = 0xFFFF;
- while (!
- (leapioraid_config_get_expander_pg0
- (ioc, &mpi_reply, &expander_pg0,
- LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
- ioc_status =
- le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_info(
- "%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, __func__, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- handle = le16_to_cpu(expander_pg0.DevHandle);
- sas_address = le64_to_cpu(expander_pg0.SASAddress);
- port = expander_pg0.PhysicalPort;
- pr_info(
- "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
- handle,
- (unsigned long long)sas_address,
- ((ioc->multipath_on_hba) ?
- (port) : (LEAPIORAID_MULTIPATH_DISABLED_PORT_ID)));
- leapioraid_scsihost_mark_responding_expander(
- ioc, &expander_pg0);
- }
-out:
- pr_info("%s search for expanders: complete\n",
- ioc->name);
-}
-
-static void
-leapioraid_scsihost_remove_unresponding_devices(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_sas_device *sas_device, *sas_device_next;
- struct leapioraid_raid_sas_node *sas_expander, *sas_expander_next;
- struct leapioraid_raid_device *raid_device, *raid_device_next;
- struct list_head tmp_list;
- unsigned long flags;
- LIST_HEAD(head);
-
- pr_info("%s removing unresponding devices: start\n",
- ioc->name);
- pr_err("%s removing unresponding devices: sas end-devices\n",
- ioc->name);
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- list_for_each_entry_safe(sas_device, sas_device_next,
- &ioc->sas_device_init_list, list) {
- list_del_init(&sas_device->list);
- leapioraid_sas_device_put(sas_device);
- }
- list_for_each_entry_safe(sas_device, sas_device_next,
- &ioc->sas_device_list, list) {
- if (!sas_device->responding)
- list_move_tail(&sas_device->list, &head);
- else
- sas_device->responding = 0;
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
- leapioraid_scsihost_remove_device(ioc, sas_device);
- list_del_init(&sas_device->list);
- leapioraid_sas_device_put(sas_device);
- }
- if (ioc->ir_firmware) {
- pr_info("%s removing unresponding devices: volumes\n",
- ioc->name);
- list_for_each_entry_safe(raid_device, raid_device_next,
- &ioc->raid_device_list, list) {
- if (!raid_device->responding)
- leapioraid_scsihost_sas_volume_delete(ioc,
- raid_device->handle);
- else
- raid_device->responding = 0;
- }
- }
- pr_err("%s removing unresponding devices: expanders\n",
- ioc->name);
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- INIT_LIST_HEAD(&tmp_list);
- list_for_each_entry_safe(sas_expander, sas_expander_next,
- &ioc->sas_expander_list, list) {
- if (!sas_expander->responding)
- list_move_tail(&sas_expander->list, &tmp_list);
- else
- sas_expander->responding = 0;
- }
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- list_for_each_entry_safe(
- sas_expander, sas_expander_next, &tmp_list, list) {
- leapioraid_scsihost_expander_node_remove(ioc, sas_expander);
- }
- pr_err("%s removing unresponding devices: complete\n", ioc->name);
- leapioraid_scsihost_ublock_io_all_device(ioc, 0);
-}
-
-static void
-leapioraid_scsihost_refresh_expander_links(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_sas_node *sas_expander, u16 handle)
-{
- struct LeapioraidExpanderP1_t expander_pg1;
- struct LeapioraidCfgRep_t mpi_reply;
- int i;
-
- for (i = 0; i < sas_expander->num_phys; i++) {
- if ((leapioraid_config_get_expander_pg1(ioc, &mpi_reply,
- &expander_pg1, i,
- handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
- leapioraid_transport_update_links(ioc,
- sas_expander->sas_address,
- le16_to_cpu(expander_pg1.AttachedDevHandle),
- i,
- expander_pg1.NegotiatedLinkRate >> 4,
- sas_expander->port);
- }
-}
-
-static void
-leapioraid_scsihost_scan_for_devices_after_reset(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidExpanderP0_t expander_pg0;
- struct LeapioraidSasDevP0_t sas_device_pg0;
- struct LeapioraidRaidVolP1_t *volume_pg1;
- struct LeapioraidRaidVolP0_t *volume_pg0;
- struct LeapioraidRaidPDP0_t pd_pg0;
- struct LeapioraidEventIrCfgEle_t element;
- struct LeapioraidCfgRep_t mpi_reply;
- u8 phys_disk_num, port_id;
- u16 ioc_status;
- u16 handle, parent_handle;
- u64 sas_address;
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_raid_sas_node *expander_device;
- static struct leapioraid_raid_device *raid_device;
- u8 retry_count;
- unsigned long flags;
-
- volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
- if (!volume_pg0)
- return;
-
- volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
- if (!volume_pg1) {
- kfree(volume_pg0);
- return;
- }
- pr_info("%s scan devices: start\n", ioc->name);
- leapioraid_scsihost_sas_host_refresh(ioc);
- pr_info("%s \tscan devices: expanders start\n",
- ioc->name);
- handle = 0xFFFF;
- while (!
- (leapioraid_config_get_expander_pg0
- (ioc, &mpi_reply, &expander_pg0,
- LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
- ioc_status =
- le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err(
- "%s \tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- handle = le16_to_cpu(expander_pg0.DevHandle);
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- port_id = expander_pg0.PhysicalPort;
- expander_device =
- leapioraid_scsihost_expander_find_by_sas_address(
- ioc,
- le64_to_cpu
- (expander_pg0.SASAddress),
- leapioraid_get_port_by_id
- (ioc,
- port_id,
- 0));
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- if (expander_device)
- leapioraid_scsihost_refresh_expander_links(
- ioc, expander_device, handle);
- else {
- pr_err(
- "%s \tBEFORE adding expander:\n\t\t"
- "handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
- leapioraid_scsihost_expander_add(ioc, handle);
- pr_info(
- "%s \tAFTER adding expander:\n\t\t"
- "handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
- }
- }
- pr_info("%s \tscan devices: expanders complete\n",
- ioc->name);
- if (!ioc->ir_firmware)
- goto skip_to_sas;
- pr_info("%s \tscan devices: phys disk start\n",
- ioc->name);
- phys_disk_num = 0xFF;
- while (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply,
- &pd_pg0,
- LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
- phys_disk_num))) {
- ioc_status =
- le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err(
- "%s \tbreak from phys disk scan:\n\t\t"
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name,
- ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- phys_disk_num = pd_pg0.PhysDiskNum;
- handle = le16_to_cpu(pd_pg0.DevHandle);
- sas_device = leapioraid_get_sdev_by_handle(ioc, handle);
- if (sas_device) {
- leapioraid_sas_device_put(sas_device);
- continue;
- }
- if (leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply,
- &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE,
- handle) != 0)
- continue;
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err(
- "%s \tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
- if (!leapioraid_scsihost_get_sas_address(ioc, parent_handle,
- &sas_address)) {
- pr_err(
- "%s \tBEFORE adding phys disk:\n\t\t"
- "handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
- port_id = sas_device_pg0.PhysicalPort;
- leapioraid_transport_update_links(ioc, sas_address,
- handle,
- sas_device_pg0.PhyNum,
- LEAPIORAID_SAS_NEG_LINK_RATE_1_5,
- leapioraid_get_port_by_id
- (ioc, port_id, 0));
- set_bit(handle, ioc->pd_handles);
- retry_count = 0;
- while (leapioraid_scsihost_add_device
- (ioc, handle, retry_count++, 1)) {
- ssleep(1);
- }
- pr_err(
- "%s \tAFTER adding phys disk:\n\t\t"
- "handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
- }
- }
- pr_info("%s \tscan devices: phys disk complete\n",
- ioc->name);
- pr_info("%s \tscan devices: volumes start\n",
- ioc->name);
- handle = 0xFFFF;
- while (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply,
- volume_pg1,
- LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE,
- handle))) {
- ioc_status =
- le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err(
- "%s \tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- handle = le16_to_cpu(volume_pg1->DevHandle);
- spin_lock_irqsave(&ioc->raid_device_lock, flags);
- raid_device = leapioraid_scsihost_raid_device_find_by_wwid(
- ioc, le64_to_cpu(volume_pg1->WWID));
- spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- if (raid_device)
- continue;
- if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply,
- volume_pg0,
- LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE,
- handle,
- sizeof
- (struct LeapioraidRaidVolP0_t)))
- continue;
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
- LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err(
- "%s \tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- if (volume_pg0->VolumeState == LEAPIORAID_RAID_VOL_STATE_OPTIMAL ||
- volume_pg0->VolumeState == LEAPIORAID_RAID_VOL_STATE_ONLINE ||
- volume_pg0->VolumeState ==
- LEAPIORAID_RAID_VOL_STATE_DEGRADED) {
- memset(&element, 0,
- sizeof(struct LeapioraidEventIrCfgEle_t));
- element.ReasonCode = LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED;
- element.VolDevHandle = volume_pg1->DevHandle;
- pr_info("%s \tBEFORE adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1->DevHandle);
- leapioraid_scsihost_sas_volume_add(ioc, &element);
- pr_info("%s \tAFTER adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1->DevHandle);
- }
- }
- pr_info("%s \tscan devices: volumes complete\n",
- ioc->name);
-skip_to_sas:
- pr_info("%s \tscan devices: sas end devices start\n",
- ioc->name);
- handle = 0xFFFF;
- while (!(leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply,
- &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
- handle))) {
- ioc_status =
- le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err(
- "%s \tbreak from sas end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
- break;
- }
- handle = le16_to_cpu(sas_device_pg0.DevHandle);
- if (!
- (leapioraid_scsihost_is_sas_end_device
- (le32_to_cpu(sas_device_pg0.DeviceInfo))))
- continue;
- port_id = sas_device_pg0.PhysicalPort;
- sas_device = leapioraid_get_sdev_by_addr(ioc,
- le64_to_cpu
- (sas_device_pg0.SASAddress),
- leapioraid_get_port_by_id
- (ioc, port_id, 0));
- if (sas_device) {
- leapioraid_sas_device_put(sas_device);
- continue;
- }
- parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
- if (!leapioraid_scsihost_get_sas_address
- (ioc, parent_handle, &sas_address)) {
- pr_err(
- "%s \tBEFORE adding sas end device:\n\t\t"
- "handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
- leapioraid_transport_update_links(ioc, sas_address,
- handle,
- sas_device_pg0.PhyNum,
- LEAPIORAID_SAS_NEG_LINK_RATE_1_5,
- leapioraid_get_port_by_id
- (ioc, port_id, 0));
- retry_count = 0;
- while (leapioraid_scsihost_add_device
- (ioc, handle, retry_count++, 0)) {
- ssleep(1);
- }
- pr_err(
- "%s \tAFTER adding sas end device:\n\t\t"
- "handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
- }
- }
- pr_err("%s \tscan devices: sas end devices complete\n", ioc->name);
- kfree(volume_pg0);
- kfree(volume_pg1);
- pr_info("%s scan devices: complete\n", ioc->name);
-}
-
-void
-leapioraid_scsihost_clear_outstanding_scsi_tm_commands(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_internal_qcmd *scsih_qcmd, *scsih_qcmd_next;
- unsigned long flags;
-
- if (ioc->scsih_cmds.status & LEAPIORAID_CMD_PENDING) {
- ioc->scsih_cmds.status |= LEAPIORAID_CMD_RESET;
- leapioraid_base_free_smid(ioc, ioc->scsih_cmds.smid);
- complete(&ioc->scsih_cmds.done);
- }
- if (ioc->tm_cmds.status & LEAPIORAID_CMD_PENDING) {
- ioc->tm_cmds.status |= LEAPIORAID_CMD_RESET;
- leapioraid_base_free_smid(ioc, ioc->tm_cmds.smid);
- complete(&ioc->tm_cmds.done);
- }
- spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags);
- list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next,
- &ioc->scsih_q_intenal_cmds, list) {
- scsih_qcmd->status |= LEAPIORAID_CMD_RESET;
- leapioraid_base_free_smid(ioc, scsih_qcmd->smid);
- }
- spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags);
- memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
- memset(ioc->device_remove_in_progress, 0,
- ioc->device_remove_in_progress_sz);
- memset(ioc->tm_tr_retry, 0, ioc->tm_tr_retry_sz);
- leapioraid_scsihost_fw_event_cleanup_queue(ioc);
- leapioraid_scsihost_flush_running_cmds(ioc);
-}
-
-void
-leapioraid_scsihost_reset_handler(struct LEAPIORAID_ADAPTER *ioc,
- int reset_phase)
-{
- switch (reset_phase) {
- case LEAPIORAID_IOC_PRE_RESET_PHASE:
- dtmprintk(ioc, pr_info(
- "%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n",
- ioc->name, __func__));
- break;
- case LEAPIORAID_IOC_AFTER_RESET_PHASE:
- dtmprintk(ioc, pr_info(
- "%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n",
- ioc->name, __func__));
- leapioraid_scsihost_clear_outstanding_scsi_tm_commands(ioc);
- break;
- case LEAPIORAID_IOC_DONE_RESET_PHASE:
- dtmprintk(ioc, pr_info(
- "%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n",
- ioc->name, __func__));
- if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
- if (ioc->multipath_on_hba) {
- leapioraid_scsihost_sas_port_refresh(ioc);
- leapioraid_scsihost_update_vphys_after_reset(ioc);
- }
- leapioraid_scsihost_prep_device_scan(ioc);
- leapioraid_scsihost_create_enclosure_list_after_reset(ioc);
- leapioraid_scsihost_search_responding_sas_devices(ioc);
- leapioraid_scsihost_search_responding_raid_devices(ioc);
- leapioraid_scsihost_search_responding_expanders(ioc);
- leapioraid_scsihost_error_recovery_delete_devices(ioc);
- }
- break;
- }
-}
-
-static void
-leapioraid_fw_work(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_fw_event_work *fw_event)
-{
- ioc->current_event = fw_event;
- leapioraid_scsihost_fw_event_del_from_list(ioc, fw_event);
- if (ioc->remove_host || ioc->pci_error_recovery) {
- leapioraid_fw_event_work_put(fw_event);
- ioc->current_event = NULL;
- return;
- }
- switch (fw_event->event) {
- case LEAPIORAID_REMOVE_UNRESPONDING_DEVICES:
- while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) {
- if (ioc->remove_host || ioc->fw_events_cleanup)
- goto out;
- ssleep(1);
- }
- leapioraid_scsihost_remove_unresponding_devices(ioc);
- leapioraid_scsihost_del_dirty_vphy(ioc);
- leapioraid_scsihost_del_dirty_port_entries(ioc);
- leapioraid_scsihost_update_device_qdepth(ioc);
- leapioraid_scsihost_scan_for_devices_after_reset(ioc);
- if (ioc->is_driver_loading)
- leapioraid_scsihost_complete_devices_scanning(ioc);
- break;
- case LEAPIORAID_PORT_ENABLE_COMPLETE:
- ioc->start_scan = 0;
- dewtprintk(ioc, pr_info(
- "%s port enable: complete from worker thread\n",
- ioc->name));
- break;
- case LEAPIORAID_TURN_ON_PFA_LED:
- leapioraid_scsihost_turn_on_pfa_led(ioc, fw_event->device_handle);
- break;
- case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
- if (leapioraid_scsihost_sas_topology_change_event(ioc, fw_event)) {
- leapioraid_scsihost_fw_event_requeue(ioc, fw_event, 1000);
- ioc->current_event = NULL;
- return;
- }
- break;
- case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE:
- if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK)
- leapioraid_scsihost_sas_device_status_change_event_debug(
- ioc,
- (struct LeapioraidEventDataSasDeviceStatusChange_t *)
- fw_event->event_data);
- break;
- case LEAPIORAID_EVENT_SAS_DISCOVERY:
- leapioraid_scsihost_sas_discovery_event(
- ioc, fw_event);
- break;
- case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
- leapioraid_scsihost_sas_device_discovery_error_event(
- ioc, fw_event);
- break;
- case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE:
- leapioraid_scsihost_sas_broadcast_primitive_event(
- ioc, fw_event);
- break;
- case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
- leapioraid_scsihost_sas_enclosure_dev_status_change_event(
- ioc, fw_event);
- break;
- case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST:
- leapioraid_scsihost_sas_ir_config_change_event(
- ioc, fw_event);
- break;
- case LEAPIORAID_EVENT_IR_VOLUME:
- leapioraid_scsihost_sas_ir_volume_event(
- ioc, fw_event);
- break;
- case LEAPIORAID_EVENT_IR_PHYSICAL_DISK:
- leapioraid_scsihost_sas_ir_physical_disk_event(
- ioc, fw_event);
- break;
- case LEAPIORAID_EVENT_IR_OPERATION_STATUS:
- leapioraid_scsihost_sas_ir_operation_status_event(
- ioc, fw_event);
- break;
- default:
- break;
- }
-out:
- leapioraid_fw_event_work_put(fw_event);
- ioc->current_event = NULL;
-}
-
-static void
-leapioraid_firmware_event_work(struct work_struct *work)
-{
- struct leapioraid_fw_event_work *fw_event = container_of(work,
- struct leapioraid_fw_event_work,
- work);
-
- leapioraid_fw_work(fw_event->ioc, fw_event);
-}
-
-static void
-leapioraid_firmware_event_work_delayed(struct work_struct *work)
-{
- struct leapioraid_fw_event_work *fw_event = container_of(work,
- struct leapioraid_fw_event_work,
- delayed_work.work);
-
- leapioraid_fw_work(fw_event->ioc, fw_event);
-}
-
-u8
-leapioraid_scsihost_event_callback(struct LEAPIORAID_ADAPTER *ioc,
- u8 msix_index, u32 reply)
-{
- struct leapioraid_fw_event_work *fw_event;
- struct LeapioraidEventNotificationRep_t *mpi_reply;
- u16 event;
- u16 sz;
-
- if (ioc->pci_error_recovery)
- return 1;
-
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (unlikely(!mpi_reply)) {
- pr_err("%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
- return 1;
- }
- event = le16_to_cpu(mpi_reply->Event);
- switch (event) {
- case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE:
- {
- struct LeapioraidEventDataSasBroadcastPrimitive_t *baen_data =
- (struct LeapioraidEventDataSasBroadcastPrimitive_t *)
- mpi_reply->EventData;
- if (baen_data->Primitive !=
- LEAPIORAID_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
- return 1;
- if (ioc->broadcast_aen_busy) {
- ioc->broadcast_aen_pending++;
- return 1;
- }
- ioc->broadcast_aen_busy = 1;
- break;
- }
- case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
- leapioraid_scsihost_check_topo_delete_events(
- ioc,
- (struct LeapioraidEventDataSasTopoChangeList_t *)
- mpi_reply->EventData);
- if (ioc->shost_recovery)
- return 1;
- break;
- case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST:
- leapioraid_scsihost_check_ir_config_unhide_events(
- ioc,
- (struct LeapioraidEventDataIrCfgChangeList_t *)
- mpi_reply->EventData);
- break;
- case LEAPIORAID_EVENT_IR_VOLUME:
- leapioraid_scsihost_check_volume_delete_events(
- ioc,
- (struct LeapioraidEventDataIrVol_t *)
- mpi_reply->EventData);
- break;
- case LEAPIORAID_EVENT_LOG_ENTRY_ADDED:
- fallthrough;
- case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE:
- leapioraid_scsihost_sas_device_status_change_event(
- ioc,
- (struct LeapioraidEventDataSasDeviceStatusChange_t *)
- mpi_reply->EventData);
- break;
- case LEAPIORAID_EVENT_IR_OPERATION_STATUS:
- case LEAPIORAID_EVENT_SAS_DISCOVERY:
- case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
- case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
- case LEAPIORAID_EVENT_IR_PHYSICAL_DISK:
- break;
- default:
- return 1;
- }
- fw_event = leapioraid_alloc_fw_event_work(0);
- if (!fw_event) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return 1;
- }
- sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
- fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
- if (!fw_event->event_data) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- leapioraid_fw_event_work_put(fw_event);
- return 1;
- }
- if (event == LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST) {
- struct LeapioraidEventDataSasTopoChangeList_t *topo_event_data =
- (struct LeapioraidEventDataSasTopoChangeList_t *)
- mpi_reply->EventData;
- fw_event->retries = kzalloc(topo_event_data->NumEntries,
- GFP_ATOMIC);
- if (!fw_event->retries) {
- kfree(fw_event->event_data);
- leapioraid_fw_event_work_put(fw_event);
- return 1;
- }
- }
- memcpy(fw_event->event_data, mpi_reply->EventData, sz);
- fw_event->ioc = ioc;
- fw_event->VF_ID = mpi_reply->VF_ID;
- fw_event->VP_ID = mpi_reply->VP_ID;
- fw_event->event = event;
- leapioraid_scsihost_fw_event_add(ioc, fw_event);
- leapioraid_fw_event_work_put(fw_event);
- return 1;
-}
-
-static void
-leapioraid_scsihost_expander_node_remove(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_sas_node *sas_expander)
-{
- struct leapioraid_sas_port *leapioraid_port, *next;
- unsigned long flags;
- int port_id;
-
- list_for_each_entry_safe(leapioraid_port, next,
- &sas_expander->sas_port_list, port_list) {
- if (ioc->shost_recovery)
- return;
- if (leapioraid_port->remote_identify.device_type ==
- SAS_END_DEVICE)
- leapioraid_device_remove_by_sas_address(ioc,
- leapioraid_port->remote_identify.sas_address,
- leapioraid_port->hba_port);
- else if (leapioraid_port->remote_identify.device_type ==
- SAS_EDGE_EXPANDER_DEVICE
- || leapioraid_port->remote_identify.device_type ==
- SAS_FANOUT_EXPANDER_DEVICE)
- leapioraid_expander_remove(ioc,
- leapioraid_port->remote_identify.sas_address,
- leapioraid_port->hba_port);
- }
- port_id = sas_expander->port->port_id;
- leapioraid_transport_port_remove(ioc, sas_expander->sas_address,
- sas_expander->sas_address_parent,
- sas_expander->port);
- pr_info(
- "%s expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
- ioc->name,
- sas_expander->handle,
- (unsigned long long)sas_expander->sas_address,
- port_id);
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- list_del(&sas_expander->list);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- kfree(sas_expander->phy);
- kfree(sas_expander);
-}
-
-static void
-leapioraid_scsihost_ir_shutdown(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct LeapioraidRaidActionReq_t *mpi_request;
- struct LeapioraidRaidActionRep_t *mpi_reply;
- u16 smid;
-
- if (!ioc->ir_firmware)
- return;
-
- if (list_empty(&ioc->raid_device_list))
- return;
- if (leapioraid_base_pci_device_is_unplugged(ioc))
- return;
- mutex_lock(&ioc->scsih_cmds.mutex);
- if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: scsih_cmd in use\n",
- ioc->name, __func__);
- goto out;
- }
- ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING;
- smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED;
- goto out;
- }
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->scsih_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioraidRaidActionReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_RAID_ACTION;
- mpi_request->Action = 0x20;
- if (!ioc->warpdrive_msg)
- pr_info("%s IR shutdown (sending)\n",
- ioc->name);
- init_completion(&ioc->scsih_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->scsih_cmds.done, 10 * HZ);
- if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- pr_err("%s %s: timeout\n",
- ioc->name, __func__);
- goto out;
- }
- if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- mpi_reply = ioc->scsih_cmds.reply;
- if (!ioc->warpdrive_msg)
- pr_info(
- "%s IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
- }
-out:
- ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_unlock(&ioc->scsih_cmds.mutex);
-}
-
-static int
-leapioraid_scsihost_get_shost_and_ioc(struct pci_dev *pdev,
- struct Scsi_Host **shost,
- struct LEAPIORAID_ADAPTER **ioc)
-{
- *shost = pci_get_drvdata(pdev);
- if (*shost == NULL) {
- dev_err(&pdev->dev, "pdev's driver data is null\n");
- return -ENXIO;
- }
- *ioc = leapioraid_shost_private(*shost);
- if (*ioc == NULL) {
- dev_err(&pdev->dev, "shost's private data is null\n");
- return -ENXIO;
- }
- return 0;
-}
-
-static void
-leapioraid_scsihost_remove(struct pci_dev *pdev)
-{
- struct Scsi_Host *shost = NULL;
- struct LEAPIORAID_ADAPTER *ioc = NULL;
- struct leapioraid_sas_port *leapioraid_port, *next_port;
- struct leapioraid_raid_device *raid_device, *next;
- struct LEAPIORAID_TARGET *sas_target_priv_data;
- struct workqueue_struct *wq;
- unsigned long flags;
- struct leapioraid_hba_port *port, *port_next;
- struct leapioraid_virtual_phy *vphy, *vphy_next;
- struct LeapioraidCfgRep_t mpi_reply;
-
- if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) {
- dev_err(&pdev->dev, "unable to remove device\n");
- return;
- }
-
- while (ioc->is_driver_loading)
- ssleep(1);
-
- ioc->remove_host = 1;
- leapioraid_wait_for_commands_to_complete(ioc);
- spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags);
- if (leapioraid_base_pci_device_is_unplugged(ioc)) {
- leapioraid_base_pause_mq_polling(ioc);
- leapioraid_scsihost_flush_running_cmds(ioc);
- }
- leapioraid_scsihost_fw_event_cleanup_queue(ioc);
- spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags);
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- wq = ioc->firmware_event_thread;
- ioc->firmware_event_thread = NULL;
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
- if (wq)
- destroy_workqueue(wq);
- leapioraid_config_set_ioc_pg1(ioc, &mpi_reply,
- &ioc->ioc_pg1_copy);
- leapioraid_scsihost_ir_shutdown(ioc);
- sas_remove_host(shost);
- scsi_remove_host(shost);
- list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
- list) {
- if (raid_device->starget) {
- sas_target_priv_data = raid_device->starget->hostdata;
- sas_target_priv_data->deleted = 1;
- scsi_remove_target(&raid_device->starget->dev);
- }
- pr_info("%s removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long)raid_device->wwid);
- leapioraid_scsihost_raid_device_remove(ioc, raid_device);
- }
- list_for_each_entry_safe(leapioraid_port, next_port,
- &ioc->sas_hba.sas_port_list, port_list) {
- if (leapioraid_port->remote_identify.device_type ==
- SAS_END_DEVICE)
- leapioraid_device_remove_by_sas_address(ioc,
- leapioraid_port->remote_identify.sas_address,
- leapioraid_port->hba_port);
- else if (leapioraid_port->remote_identify.device_type ==
- SAS_EDGE_EXPANDER_DEVICE
- || leapioraid_port->remote_identify.device_type ==
- SAS_FANOUT_EXPANDER_DEVICE)
- leapioraid_expander_remove(ioc,
- leapioraid_port->remote_identify.sas_address,
- leapioraid_port->hba_port);
- }
- list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) {
- if (port->vphys_mask) {
- list_for_each_entry_safe(vphy, vphy_next,
- &port->vphys_list, list) {
- list_del(&vphy->list);
- kfree(vphy);
- }
- }
- list_del(&port->list);
- kfree(port);
- }
- if (ioc->sas_hba.num_phys) {
- kfree(ioc->sas_hba.phy);
- ioc->sas_hba.phy = NULL;
- ioc->sas_hba.num_phys = 0;
- }
- leapioraid_base_detach(ioc);
- spin_lock(&leapioraid_gioc_lock);
- list_del(&ioc->list);
- spin_unlock(&leapioraid_gioc_lock);
- scsi_host_put(shost);
-}
-
-static void
-leapioraid_scsihost_shutdown(struct pci_dev *pdev)
-{
- struct Scsi_Host *shost = NULL;
- struct LEAPIORAID_ADAPTER *ioc = NULL;
- struct workqueue_struct *wq;
- unsigned long flags;
- struct LeapioraidCfgRep_t mpi_reply;
-
- if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) {
- dev_err(&pdev->dev, "unable to shutdown device\n");
- return;
- }
- ioc->remove_host = 1;
- leapioraid_wait_for_commands_to_complete(ioc);
- leapioraid_scsihost_fw_event_cleanup_queue(ioc);
- spin_lock_irqsave(&ioc->fw_event_lock, flags);
- wq = ioc->firmware_event_thread;
- ioc->firmware_event_thread = NULL;
- spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
- if (wq)
- destroy_workqueue(wq);
- leapioraid_config_set_ioc_pg1(ioc, &mpi_reply,
- &ioc->ioc_pg1_copy);
- leapioraid_scsihost_ir_shutdown(ioc);
- leapioraid_base_mask_interrupts(ioc);
- ioc->shost_recovery = 1;
- leapioraid_base_make_ioc_ready(ioc, SOFT_RESET);
- ioc->shost_recovery = 0;
- leapioraid_base_free_irq(ioc);
- leapioraid_base_disable_msix(ioc);
-}
-
-static void
-leapioraid_scsihost_probe_boot_devices(struct LEAPIORAID_ADAPTER *ioc)
-{
- u32 channel;
- void *device;
- struct leapioraid_sas_device *sas_device;
- struct leapioraid_raid_device *raid_device;
- u16 handle;
- u64 sas_address_parent;
- u64 sas_address;
- unsigned long flags;
- int rc;
- struct leapioraid_hba_port *port;
- u8 protection_mask;
-
- if (!ioc->bios_pg3.BiosVersion)
- return;
-
- device = NULL;
- if (ioc->req_boot_device.device) {
- device = ioc->req_boot_device.device;
- channel = ioc->req_boot_device.channel;
- } else if (ioc->req_alt_boot_device.device) {
- device = ioc->req_alt_boot_device.device;
- channel = ioc->req_alt_boot_device.channel;
- } else if (ioc->current_boot_device.device) {
- device = ioc->current_boot_device.device;
- channel = ioc->current_boot_device.channel;
- }
- if (!device)
- return;
- if (channel == RAID_CHANNEL) {
- raid_device = device;
- if (raid_device->starget)
- return;
- if (!ioc->disable_eedp_support) {
- protection_mask = scsi_host_get_prot(ioc->shost);
- if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) {
- scsi_host_set_prot(ioc->shost,
- protection_mask & 0x77);
- pr_err(
- "%s: Disabling DIX0 because of unsupport!\n",
- ioc->name);
- }
- }
- rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
- raid_device->id, 0);
- if (rc)
- leapioraid_scsihost_raid_device_remove(ioc, raid_device);
- } else {
- sas_device = device;
- if (sas_device->starget)
- return;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- handle = sas_device->handle;
- sas_address_parent = sas_device->sas_address_parent;
- sas_address = sas_device->sas_address;
- port = sas_device->port;
- list_move_tail(&sas_device->list, &ioc->sas_device_list);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
- if (!port)
- return;
-
- if (ioc->hide_drives)
- return;
-
- if (!leapioraid_transport_port_add(ioc, handle,
- sas_address_parent, port)) {
- leapioraid_scsihost_sas_device_remove(ioc, sas_device);
- } else if (!sas_device->starget) {
- if (!ioc->is_driver_loading) {
- leapioraid_transport_port_remove(ioc,
- sas_address,
- sas_address_parent,
- port);
- leapioraid_scsihost_sas_device_remove(ioc, sas_device);
- }
- }
- }
-}
-
-static void
-leapioraid_scsihost_probe_raid(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_raid_device *raid_device, *raid_next;
- int rc;
-
- list_for_each_entry_safe(raid_device, raid_next,
- &ioc->raid_device_list, list) {
- if (raid_device->starget)
- continue;
- rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
- raid_device->id, 0);
- if (rc)
- leapioraid_scsihost_raid_device_remove(ioc, raid_device);
- }
-}
-
-static
-struct leapioraid_sas_device *leapioraid_get_next_sas_device(
- struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_sas_device *sas_device = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- if (!list_empty(&ioc->sas_device_init_list)) {
- sas_device = list_first_entry(&ioc->sas_device_init_list,
- struct leapioraid_sas_device, list);
- leapioraid_sas_device_get(sas_device);
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return sas_device;
-}
-
-static void
-leapioraid_sas_device_make_active(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_device *sas_device)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- if (!list_empty(&sas_device->list)) {
- list_del_init(&sas_device->list);
- leapioraid_sas_device_put(sas_device);
- }
- leapioraid_sas_device_get(sas_device);
- list_add_tail(&sas_device->list, &ioc->sas_device_list);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-}
-
-static void
-leapioraid_scsihost_probe_sas(struct LEAPIORAID_ADAPTER *ioc)
-{
- struct leapioraid_sas_device *sas_device;
-
- while ((sas_device = leapioraid_get_next_sas_device(ioc))) {
- if (ioc->hide_drives) {
- leapioraid_sas_device_make_active(ioc, sas_device);
- leapioraid_sas_device_put(sas_device);
- continue;
- }
- if (!leapioraid_transport_port_add(ioc, sas_device->handle,
- sas_device->sas_address_parent,
- sas_device->port)) {
- leapioraid_scsihost_sas_device_remove(ioc, sas_device);
- leapioraid_sas_device_put(sas_device);
- continue;
- } else if (!sas_device->starget) {
- if (!ioc->is_driver_loading) {
- leapioraid_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent,
- sas_device->port);
- leapioraid_scsihost_sas_device_remove(ioc, sas_device);
- leapioraid_sas_device_put(sas_device);
- continue;
- }
- }
- leapioraid_sas_device_make_active(ioc, sas_device);
- leapioraid_sas_device_put(sas_device);
- }
-}
-
-static void
-leapioraid_scsihost_probe_devices(struct LEAPIORAID_ADAPTER *ioc)
-{
- u16 volume_mapping_flags;
-
- if (!(ioc->facts.ProtocolFlags
- & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
- return;
- leapioraid_scsihost_probe_boot_devices(ioc);
-
- if (ioc->ir_firmware) {
- volume_mapping_flags =
- le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
- LEAPIORAID_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
- if (volume_mapping_flags ==
- LEAPIORAID_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
- leapioraid_scsihost_probe_raid(ioc);
- leapioraid_scsihost_probe_sas(ioc);
- } else {
- leapioraid_scsihost_probe_sas(ioc);
- leapioraid_scsihost_probe_raid(ioc);
- }
- } else {
- leapioraid_scsihost_probe_sas(ioc);
- }
-}
-
-static void
-leapioraid_scsihost_scan_start(struct Scsi_Host *shost)
-{
- struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost);
- int rc;
-
- if (disable_discovery > 0)
- return;
- ioc->start_scan = 1;
- rc = leapioraid_port_enable(ioc);
- if (rc != 0)
- pr_info("%s port enable: FAILED\n",
- ioc->name);
-}
-
-void
-leapioraid_scsihost_complete_devices_scanning(struct LEAPIORAID_ADAPTER *ioc)
-{
- if (ioc->wait_for_discovery_to_complete) {
- ioc->wait_for_discovery_to_complete = 0;
- leapioraid_scsihost_probe_devices(ioc);
- }
- leapioraid_base_start_watchdog(ioc);
- ioc->is_driver_loading = 0;
-}
-
-static int
-leapioraid_scsihost_scan_finished(
- struct Scsi_Host *shost, unsigned long time)
-{
- struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost);
- u32 ioc_state;
- int issue_hard_reset = 0;
-
- if (disable_discovery > 0) {
- ioc->is_driver_loading = 0;
- ioc->wait_for_discovery_to_complete = 0;
- goto out;
- }
- if (time >= (300 * HZ)) {
- ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED;
- pr_info("%s port enable: FAILED with timeout (timeout=300s)\n",
- ioc->name);
- ioc->is_driver_loading = 0;
- goto out;
- }
- if (ioc->start_scan) {
- ioc_state = leapioraid_base_get_iocstate(ioc, 0);
- if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_FAULT) {
- leapioraid_print_fault_code(ioc,
- ioc_state &
- LEAPIORAID_DOORBELL_DATA_MASK);
- issue_hard_reset = 1;
- goto out;
- } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) ==
- LEAPIORAID_IOC_STATE_COREDUMP) {
- leapioraid_base_coredump_info(ioc,
- ioc_state &
- LEAPIORAID_DOORBELL_DATA_MASK);
- leapioraid_base_wait_for_coredump_completion(ioc,
- __func__);
- issue_hard_reset = 1;
- goto out;
- }
- return 0;
- }
- if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_RESET) {
- pr_err("%s port enable: aborted due to diag reset\n",
- ioc->name);
- ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED;
- goto out;
- }
- if (ioc->start_scan_failed) {
- pr_info("%s port enable: FAILED with (ioc_status=0x%08x)\n",
- ioc->name, ioc->start_scan_failed);
- ioc->is_driver_loading = 0;
- ioc->wait_for_discovery_to_complete = 0;
- ioc->remove_host = 1;
- goto out;
- }
- pr_info("%s port enable: SUCCESS\n", ioc->name);
- ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED;
- leapioraid_scsihost_complete_devices_scanning(ioc);
-out:
- if (issue_hard_reset) {
- ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED;
- if (leapioraid_base_hard_reset_handler(ioc, SOFT_RESET))
- ioc->is_driver_loading = 0;
- }
- return 1;
-}
-
-SCSIH_MAP_QUEUE(struct Scsi_Host *shost)
-{
- struct LEAPIORAID_ADAPTER *ioc =
- (struct LEAPIORAID_ADAPTER *)shost->hostdata;
- struct blk_mq_queue_map *map;
- int i, qoff, offset;
- int nr_msix_vectors = ioc->iopoll_q_start_index;
- int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
-
- if (shost->nr_hw_queues == 1)
- return;
- for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
- map = &shost->tag_set.map[i];
- map->nr_queues = 0;
- offset = 0;
- if (i == HCTX_TYPE_DEFAULT) {
- map->nr_queues =
- nr_msix_vectors - ioc->high_iops_queues;
- offset = ioc->high_iops_queues;
- } else if (i == HCTX_TYPE_POLL)
- map->nr_queues = iopoll_q_count;
- if (!map->nr_queues)
- BUG_ON(i == HCTX_TYPE_DEFAULT);
- map->queue_offset = qoff;
- if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, ioc->pdev, offset);
- else
- blk_mq_map_queues(map);
- qoff += map->nr_queues;
- }
-}
-
-static struct scsi_host_template leapioraid_driver_template = {
- .module = THIS_MODULE,
- .name = "LEAPIO RAID Host",
- .proc_name = LEAPIORAID_DRIVER_NAME,
- .queuecommand = leapioraid_scsihost_qcmd,
- .target_alloc = leapioraid_scsihost_target_alloc,
- .slave_alloc = leapioraid_scsihost_slave_alloc,
- .slave_configure = leapioraid_scsihost_slave_configure,
- .target_destroy = leapioraid_scsihost_target_destroy,
- .slave_destroy = leapioraid_scsihost_slave_destroy,
- .scan_finished = leapioraid_scsihost_scan_finished,
- .scan_start = leapioraid_scsihost_scan_start,
- .change_queue_depth = leapioraid_scsihost_change_queue_depth,
- .eh_abort_handler = leapioraid_scsihost_abort,
- .eh_device_reset_handler = leapioraid_scsihost_dev_reset,
- .eh_target_reset_handler = leapioraid_scsihost_target_reset,
- .eh_host_reset_handler = leapioraid_scsihost_host_reset,
- .bios_param = leapioraid_scsihost_bios_param,
- .can_queue = 1,
- .this_id = -1,
- .sg_tablesize = LEAPIORAID_SG_DEPTH,
- .max_sectors = 128,
- .max_segment_size = 0xffffffff,
- .cmd_per_lun = 128,
- .shost_groups = leapioraid_host_groups,
- .sdev_groups = leapioraid_dev_groups,
- .track_queue_depth = 1,
- .cmd_size = sizeof(struct leapioraid_scsiio_tracker),
- .map_queues = leapioraid_scsihost_map_queues,
- .mq_poll = leapioraid_blk_mq_poll,
-};
-
-static struct raid_function_template leapioraid_raid_functions = {
- .cookie = &leapioraid_driver_template,
- .is_raid = leapioraid_scsihost_is_raid,
- .get_resync = leapioraid_scsihost_get_resync,
- .get_state = leapioraid_scsihost_get_state,
-};
-
-static int
-leapioraid_scsihost_probe(
- struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct LEAPIORAID_ADAPTER *ioc;
- struct Scsi_Host *shost = NULL;
- int rv;
-
- shost = scsi_host_alloc(&leapioraid_driver_template,
- sizeof(struct LEAPIORAID_ADAPTER));
- if (!shost)
- return -ENODEV;
- ioc = shost_priv(shost);
- memset(ioc, 0, sizeof(struct LEAPIORAID_ADAPTER));
- ioc->id = leapioraid_ids++;
- sprintf(ioc->driver_name, "%s", LEAPIORAID_DRIVER_NAME);
-
- ioc->combined_reply_queue = 1;
- ioc->nc_reply_index_count = 16;
- ioc->multipath_on_hba = 1;
-
- ioc = leapioraid_shost_private(shost);
- INIT_LIST_HEAD(&ioc->list);
- spin_lock(&leapioraid_gioc_lock);
- list_add_tail(&ioc->list, &leapioraid_ioc_list);
- spin_unlock(&leapioraid_gioc_lock);
- ioc->shost = shost;
- ioc->pdev = pdev;
-
- ioc->scsi_io_cb_idx = scsi_io_cb_idx;
- ioc->tm_cb_idx = tm_cb_idx;
- ioc->ctl_cb_idx = ctl_cb_idx;
- ioc->ctl_tm_cb_idx = ctl_tm_cb_idx;
- ioc->base_cb_idx = base_cb_idx;
- ioc->port_enable_cb_idx = port_enable_cb_idx;
- ioc->transport_cb_idx = transport_cb_idx;
- ioc->scsih_cb_idx = scsih_cb_idx;
- ioc->config_cb_idx = config_cb_idx;
- ioc->tm_tr_cb_idx = tm_tr_cb_idx;
- ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
- ioc->tm_tr_internal_cb_idx = tm_tr_internal_cb_idx;
- ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
-
- ioc->logging_level = logging_level;
- ioc->schedule_dead_ioc_flush_running_cmds =
- &leapioraid_scsihost_flush_running_cmds;
- ioc->open_pcie_trace = open_pcie_trace;
- ioc->enable_sdev_max_qd = 0;
- ioc->max_shutdown_latency = 6;
- ioc->drv_support_bitmap |= 0x00000001;
- ioc->drv_support_bitmap |= 0x00000002;
-
- mutex_init(&ioc->reset_in_progress_mutex);
- mutex_init(&ioc->hostdiag_unlock_mutex);
- mutex_init(&ioc->pci_access_mutex);
- spin_lock_init(&ioc->ioc_reset_in_progress_lock);
- spin_lock_init(&ioc->scsi_lookup_lock);
- spin_lock_init(&ioc->sas_device_lock);
- spin_lock_init(&ioc->sas_node_lock);
- spin_lock_init(&ioc->fw_event_lock);
- spin_lock_init(&ioc->raid_device_lock);
- spin_lock_init(&ioc->scsih_q_internal_lock);
- spin_lock_init(&ioc->hba_hot_unplug_lock);
- INIT_LIST_HEAD(&ioc->sas_device_list);
- INIT_LIST_HEAD(&ioc->port_table_list);
- INIT_LIST_HEAD(&ioc->sas_device_init_list);
- INIT_LIST_HEAD(&ioc->sas_expander_list);
- INIT_LIST_HEAD(&ioc->enclosure_list);
- INIT_LIST_HEAD(&ioc->fw_event_list);
- INIT_LIST_HEAD(&ioc->raid_device_list);
- INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
- INIT_LIST_HEAD(&ioc->delayed_tr_list);
- INIT_LIST_HEAD(&ioc->delayed_sc_list);
- INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
- INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
- INIT_LIST_HEAD(&ioc->delayed_internal_tm_list);
- INIT_LIST_HEAD(&ioc->scsih_q_intenal_cmds);
- INIT_LIST_HEAD(&ioc->reply_queue_list);
- sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
-
- shost->max_cmd_len = 32;
- shost->max_lun = 8;
- shost->transportt = leapioraid_transport_template;
- shost->unique_id = ioc->id;
-
- ioc->drv_internal_flags |= LEAPIORAID_DRV_INTERNAL_BITMAP_BLK_MQ;
-
- ioc->disable_eedp_support = 1;
- snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
- "fw_event_%s%u", ioc->driver_name, ioc->id);
- ioc->firmware_event_thread =
- alloc_ordered_workqueue(ioc->firmware_event_name, 0);
- if (!ioc->firmware_event_thread) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rv = -ENODEV;
- goto out_thread_fail;
- }
-
- shost->host_tagset = 0;
- ioc->is_driver_loading = 1;
- if ((leapioraid_base_attach(ioc))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rv = -ENODEV;
- goto out_attach_fail;
- }
- ioc->hide_drives = 0;
-
- shost->nr_hw_queues = 1;
- rv = scsi_add_host(shost, &pdev->dev);
- if (rv) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- spin_lock(&leapioraid_gioc_lock);
- list_del(&ioc->list);
- spin_unlock(&leapioraid_gioc_lock);
- goto out_add_shost_fail;
- }
-
- scsi_scan_host(shost);
-
- return 0;
-out_add_shost_fail:
- leapioraid_base_detach(ioc);
-out_attach_fail:
- destroy_workqueue(ioc->firmware_event_thread);
-out_thread_fail:
- spin_lock(&leapioraid_gioc_lock);
- list_del(&ioc->list);
- spin_unlock(&leapioraid_gioc_lock);
- scsi_host_put(shost);
- return rv;
-}
-
-#ifdef CONFIG_PM
-static int
-leapioraid_scsihost_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct Scsi_Host *shost = NULL;
- struct LEAPIORAID_ADAPTER *ioc = NULL;
- pci_power_t device_state;
- int rc;
-
- rc = leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc);
- if (rc) {
- dev_err(&pdev->dev, "unable to suspend device\n");
- return rc;
- }
- leapioraid_base_stop_watchdog(ioc);
- leapioraid_base_stop_hba_unplug_watchdog(ioc);
- scsi_block_requests(shost);
- device_state = pci_choose_state(pdev, state);
- leapioraid_scsihost_ir_shutdown(ioc);
- pr_info("%s pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- ioc->name, pdev,
- pci_name(pdev), device_state);
- pci_save_state(pdev);
- leapioraid_base_free_resources(ioc);
- pci_set_power_state(pdev, device_state);
- return 0;
-}
-
-static int
-leapioraid_scsihost_resume(struct pci_dev *pdev)
-{
- struct Scsi_Host *shost = NULL;
- struct LEAPIORAID_ADAPTER *ioc = NULL;
- pci_power_t device_state = pdev->current_state;
- int r;
-
- r = leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc);
- if (r) {
- dev_err(&pdev->dev, "unable to resume device\n");
- return r;
- }
- pr_info("%s pdev=0x%p, slot=%s, previous operating state [D%d]\n",
- ioc->name, pdev,
- pci_name(pdev), device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
- ioc->pdev = pdev;
- r = leapioraid_base_map_resources(ioc);
- if (r)
- return r;
- pr_err("%s issuing hard reset as part of OS resume\n",
- ioc->name);
- leapioraid_base_hard_reset_handler(ioc, SOFT_RESET);
- scsi_unblock_requests(shost);
- leapioraid_base_start_watchdog(ioc);
- leapioraid_base_start_hba_unplug_watchdog(ioc);
- return 0;
-}
-#endif
-
-static pci_ers_result_t
-leapioraid_scsihost_pci_error_detected(
- struct pci_dev *pdev, pci_channel_state_t state)
-{
- struct Scsi_Host *shost = NULL;
- struct LEAPIORAID_ADAPTER *ioc = NULL;
-
- if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) {
- dev_err(&pdev->dev, "device unavailable\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pr_err("%s PCI error: detected callback, state(%d)!!\n",
- ioc->name, state);
- switch (state) {
- case pci_channel_io_normal:
- return PCI_ERS_RESULT_CAN_RECOVER;
- case pci_channel_io_frozen:
- ioc->pci_error_recovery = 1;
- scsi_block_requests(ioc->shost);
- leapioraid_base_stop_watchdog(ioc);
- leapioraid_base_stop_hba_unplug_watchdog(ioc);
- leapioraid_base_free_resources(ioc);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- ioc->pci_error_recovery = 1;
- leapioraid_base_stop_watchdog(ioc);
- leapioraid_base_stop_hba_unplug_watchdog(ioc);
- leapioraid_base_pause_mq_polling(ioc);
- leapioraid_scsihost_flush_running_cmds(ioc);
- return PCI_ERS_RESULT_DISCONNECT;
- }
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-static pci_ers_result_t
-leapioraid_scsihost_pci_slot_reset(struct pci_dev *pdev)
-{
- struct Scsi_Host *shost = NULL;
- struct LEAPIORAID_ADAPTER *ioc = NULL;
- int rc;
-
- if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) {
- dev_err(&pdev->dev, "unable to perform slot reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pr_err("%s PCI error: slot reset callback!!\n",
- ioc->name);
- ioc->pci_error_recovery = 0;
- ioc->pdev = pdev;
- pci_restore_state(pdev);
- rc = leapioraid_base_map_resources(ioc);
- if (rc)
- return PCI_ERS_RESULT_DISCONNECT;
- pr_info("%s issuing hard reset as part of PCI slot reset\n",
- ioc->name);
- rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_info("%s hard reset: %s\n",
- ioc->name, (rc == 0) ? "success" : "failed");
- if (!rc)
- return PCI_ERS_RESULT_RECOVERED;
- else
- return PCI_ERS_RESULT_DISCONNECT;
-}
-
-static void
-leapioraid_scsihost_pci_resume(struct pci_dev *pdev)
-{
- struct Scsi_Host *shost = NULL;
- struct LEAPIORAID_ADAPTER *ioc = NULL;
-
- if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) {
- dev_err(&pdev->dev, "unable to resume device\n");
- return;
- }
- pr_err("%s PCI error: resume callback!!\n",
- ioc->name);
-
- pci_aer_clear_nonfatal_status(pdev);
-
- leapioraid_base_start_watchdog(ioc);
- leapioraid_base_start_hba_unplug_watchdog(ioc);
- scsi_unblock_requests(ioc->shost);
-}
-
-static pci_ers_result_t
-leapioraid_scsihost_pci_mmio_enabled(struct pci_dev *pdev)
-{
- struct Scsi_Host *shost = NULL;
- struct LEAPIORAID_ADAPTER *ioc = NULL;
-
- if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) {
- dev_err(&pdev->dev, "unable to enable mmio\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- pr_err("%s: PCI error: mmio enabled callback!!!\n",
- ioc->name);
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-u8 leapioraid_scsihost_ncq_prio_supp(struct scsi_device *sdev)
-{
- u8 ncq_prio_supp = 0;
-
- struct scsi_vpd *vpd;
-
- rcu_read_lock();
- vpd = rcu_dereference(sdev->vpd_pg89);
- if (!vpd || vpd->len < 214)
- goto out;
- ncq_prio_supp = (vpd->data[213] >> 4) & 1;
-out:
- rcu_read_unlock();
- return ncq_prio_supp;
-}
-
-static const struct pci_device_id leapioraid_pci_table[] = {
- { 0x1556, 0x1111, PCI_ANY_ID, PCI_ANY_ID },
- { LEAPIORAID_VENDOR_ID, LEAPIORAID_DEVICE_ID_1, PCI_ANY_ID, PCI_ANY_ID },
- { LEAPIORAID_VENDOR_ID, LEAPIORAID_DEVICE_ID_2, PCI_ANY_ID, PCI_ANY_ID },
- { LEAPIORAID_VENDOR_ID, LEAPIORAID_HBA, PCI_ANY_ID, PCI_ANY_ID },
- { LEAPIORAID_VENDOR_ID, LEAPIORAID_RAID, PCI_ANY_ID, PCI_ANY_ID },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, leapioraid_pci_table);
-static struct pci_error_handlers leapioraid_err_handler = {
- .error_detected = leapioraid_scsihost_pci_error_detected,
- .mmio_enabled = leapioraid_scsihost_pci_mmio_enabled,
- .slot_reset = leapioraid_scsihost_pci_slot_reset,
- .resume = leapioraid_scsihost_pci_resume,
-};
-
-static struct pci_driver leapioraid_driver = {
- .name = LEAPIORAID_DRIVER_NAME,
- .id_table = leapioraid_pci_table,
- .probe = leapioraid_scsihost_probe,
- .remove = leapioraid_scsihost_remove,
- .shutdown = leapioraid_scsihost_shutdown,
- .err_handler = &leapioraid_err_handler,
-#ifdef CONFIG_PM
- .suspend = leapioraid_scsihost_suspend,
- .resume = leapioraid_scsihost_resume,
-#endif
-};
-
-static int
-leapioraid_scsihost_init(void)
-{
- leapioraid_ids = 0;
- leapioraid_base_initialize_callback_handler();
-
- scsi_io_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_scsihost_io_done);
- tm_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_scsihost_tm_done);
- base_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_base_done);
- port_enable_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_port_enable_done);
- transport_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_transport_done);
- scsih_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_scsihost_done);
- config_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_config_done);
- ctl_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_ctl_done);
- ctl_tm_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_ctl_tm_done);
- tm_tr_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_scsihost_tm_tr_complete);
- tm_tr_volume_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_scsihost_tm_volume_tr_complete);
- tm_tr_internal_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_scsihost_tm_internal_tr_complete);
- tm_sas_control_cb_idx =
- leapioraid_base_register_callback_handler(
- leapioraid_scsihost_sas_control_complete);
-
- return 0;
-}
-
-static void
-leapioraid_scsihost_exit(void)
-{
- leapioraid_base_release_callback_handler(scsi_io_cb_idx);
- leapioraid_base_release_callback_handler(tm_cb_idx);
- leapioraid_base_release_callback_handler(base_cb_idx);
- leapioraid_base_release_callback_handler(port_enable_cb_idx);
- leapioraid_base_release_callback_handler(transport_cb_idx);
- leapioraid_base_release_callback_handler(scsih_cb_idx);
- leapioraid_base_release_callback_handler(config_cb_idx);
- leapioraid_base_release_callback_handler(ctl_cb_idx);
- leapioraid_base_release_callback_handler(ctl_tm_cb_idx);
- leapioraid_base_release_callback_handler(tm_tr_cb_idx);
- leapioraid_base_release_callback_handler(tm_tr_volume_cb_idx);
- leapioraid_base_release_callback_handler(tm_tr_internal_cb_idx);
- leapioraid_base_release_callback_handler(tm_sas_control_cb_idx);
-
- raid_class_release(leapioraid_raid_template);
- sas_release_transport(leapioraid_transport_template);
-}
-
-static int __init leapioraid_init(void)
-{
- int error;
-
- pr_info("%s version %s loaded\n", LEAPIORAID_DRIVER_NAME,
- LEAPIORAID_DRIVER_VERSION);
- leapioraid_transport_template =
- sas_attach_transport(&leapioraid_transport_functions);
-
- if (!leapioraid_transport_template)
- return -ENODEV;
-
- leapioraid_raid_template =
- raid_class_attach(&leapioraid_raid_functions);
- if (!leapioraid_raid_template) {
- sas_release_transport(leapioraid_transport_template);
- return -ENODEV;
- }
-
- error = leapioraid_scsihost_init();
- if (error) {
- leapioraid_scsihost_exit();
- return error;
- }
- leapioraid_ctl_init();
- error = pci_register_driver(&leapioraid_driver);
- if (error)
- leapioraid_scsihost_exit();
- return error;
-}
-
-static void __exit leapioraid_exit(void)
-{
- pr_info("leapioraid_ids version %s unloading\n",
- LEAPIORAID_DRIVER_VERSION);
- leapioraid_ctl_exit();
- pci_unregister_driver(&leapioraid_driver);
- leapioraid_scsihost_exit();
-}
-
-module_init(leapioraid_init);
-module_exit(leapioraid_exit);
diff --git a/drivers/scsi/leapioraid/leapioraid_transport.c b/drivers/scsi/leapioraid/leapioraid_transport.c
deleted file mode 100644
index edddd56128a1..000000000000
--- a/drivers/scsi/leapioraid/leapioraid_transport.c
+++ /dev/null
@@ -1,1926 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SAS Transport Layer for MPT (Message Passing Technology) based controllers
- *
- * Copyright (C) 2013-2018 LSI Corporation
- * Copyright (C) 2013-2018 Avago Technologies
- * Copyright (C) 2013-2018 Broadcom Inc.
- * (mailto:MPT-FusionLinux.pdl@broadcom.com)
- *
- * Copyright (C) 2024 LeapIO Tech Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_transport_sas.h>
-#include <scsi/scsi_dbg.h>
-#include "leapioraid_func.h"
-
-static
-struct leapioraid_raid_sas_node *leapioraid_transport_sas_node_find_by_sas_address(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address, struct leapioraid_hba_port *port)
-{
- if (ioc->sas_hba.sas_address == sas_address)
- return &ioc->sas_hba;
- else
- return leapioraid_scsihost_expander_find_by_sas_address(ioc,
- sas_address,
- port);
-}
-
-static inline u8
-leapioraid_transport_get_port_id_by_sas_phy(struct sas_phy *phy)
-{
- u8 port_id = 0xFF;
- struct leapioraid_hba_port *port = phy->hostdata;
-
- if (port)
- port_id = port->port_id;
- else
- BUG();
- return port_id;
-}
-
-static int
-leapioraid_transport_find_parent_node(
- struct LEAPIORAID_ADAPTER *ioc, struct sas_phy *phy)
-{
- unsigned long flags;
- struct leapioraid_hba_port *port = phy->hostdata;
-
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- if (leapioraid_transport_sas_node_find_by_sas_address(ioc,
- phy->identify.sas_address,
- port) == NULL) {
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- return 0;
-}
-
-static u8
-leapioraid_transport_get_port_id_by_rphy(struct LEAPIORAID_ADAPTER *ioc,
- struct sas_rphy *rphy)
-{
- struct leapioraid_raid_sas_node *sas_expander;
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
- u8 port_id = 0xFF;
-
- if (!rphy)
- return port_id;
- if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
- rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) {
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
- if (sas_expander->rphy == rphy) {
- port_id = sas_expander->port->port_id;
- break;
- }
- }
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- } else if (rphy->identify.device_type == SAS_END_DEVICE) {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_addr_and_rphy(
- ioc, rphy->identify.sas_address, rphy);
- if (sas_device) {
- port_id = sas_device->port->port_id;
- leapioraid_sas_device_put(sas_device);
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- }
- return port_id;
-}
-
-static enum sas_linkrate
-leapioraid_transport_convert_phy_link_rate(u8 link_rate)
-{
- enum sas_linkrate rc;
-
- switch (link_rate) {
- case LEAPIORAID_SAS_NEG_LINK_RATE_1_5:
- rc = SAS_LINK_RATE_1_5_GBPS;
- break;
- case LEAPIORAID_SAS_NEG_LINK_RATE_3_0:
- rc = SAS_LINK_RATE_3_0_GBPS;
- break;
- case LEAPIORAID_SAS_NEG_LINK_RATE_6_0:
- rc = SAS_LINK_RATE_6_0_GBPS;
- break;
- case LEAPIORAID_SAS_NEG_LINK_RATE_12_0:
- rc = SAS_LINK_RATE_12_0_GBPS;
- break;
- case LEAPIORAID_SAS_NEG_LINK_RATE_PHY_DISABLED:
- rc = SAS_PHY_DISABLED;
- break;
- case LEAPIORAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
- rc = SAS_LINK_RATE_FAILED;
- break;
- case LEAPIORAID_SAS_NEG_LINK_RATE_PORT_SELECTOR:
- rc = SAS_SATA_PORT_SELECTOR;
- break;
- case LEAPIORAID_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
- default:
- case LEAPIORAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
- case LEAPIORAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
- rc = SAS_LINK_RATE_UNKNOWN;
- break;
- }
- return rc;
-}
-
-static int
-leapioraid_transport_set_identify(
- struct LEAPIORAID_ADAPTER *ioc, u16 handle,
- struct sas_identify *identify)
-{
- struct LeapioraidSasDevP0_t sas_device_pg0;
- struct LeapioraidCfgRep_t mpi_reply;
- u32 device_info;
- u32 ioc_status;
-
- if ((ioc->shost_recovery && !ioc->is_driver_loading)
- || ioc->pci_error_recovery) {
- pr_info("%s %s: host reset in progress!\n",
- __func__, ioc->name);
- return -EFAULT;
- }
- if ((leapioraid_config_get_sas_device_pg0
- (ioc, &mpi_reply, &sas_device_pg0,
- LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -ENXIO;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
- ioc->name, handle,
- ioc_status, __FILE__, __LINE__, __func__);
- return -EIO;
- }
- memset(identify, 0, sizeof(struct sas_identify));
- device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
- identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- identify->phy_identifier = sas_device_pg0.PhyNum;
- switch (device_info & LEAPIORAID_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
- case LEAPIORAID_SAS_DEVICE_INFO_NO_DEVICE:
- identify->device_type = SAS_PHY_UNUSED;
- break;
- case LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE:
- identify->device_type = SAS_END_DEVICE;
- break;
- case LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER:
- identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
- break;
- case LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER:
- identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
- break;
- }
- if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_INITIATOR)
- identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
- if (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_INITIATOR)
- identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
- if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SMP_INITIATOR)
- identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
- if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_HOST)
- identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
- if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET)
- identify->target_port_protocols |= SAS_PROTOCOL_SSP;
- if (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET)
- identify->target_port_protocols |= SAS_PROTOCOL_STP;
- if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SMP_TARGET)
- identify->target_port_protocols |= SAS_PROTOCOL_SMP;
- if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE)
- identify->target_port_protocols |= SAS_PROTOCOL_SATA;
- return 0;
-}
-
-u8
-leapioraid_transport_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid,
- u8 msix_index, u32 reply)
-{
- struct LeapioraidDefaultRep_t *mpi_reply;
-
- mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply);
- if (ioc->transport_cmds.status == LEAPIORAID_CMD_NOT_USED)
- return 1;
- if (ioc->transport_cmds.smid != smid)
- return 1;
- ioc->transport_cmds.status |= LEAPIORAID_CMD_COMPLETE;
- if (mpi_reply) {
- memcpy(ioc->transport_cmds.reply, mpi_reply,
- mpi_reply->MsgLength * 4);
- ioc->transport_cmds.status |= LEAPIORAID_CMD_REPLY_VALID;
- }
- ioc->transport_cmds.status &= ~LEAPIORAID_CMD_PENDING;
- complete(&ioc->transport_cmds.done);
- return 1;
-}
-
-#if defined(LEAPIORAID_WIDE_PORT_API)
-struct leapioraid_rep_manu_request {
- u8 smp_frame_type;
- u8 function;
- u8 reserved;
- u8 request_length;
-};
-
-struct leapioraid_rep_manu_reply {
- u8 smp_frame_type;
- u8 function;
- u8 function_result;
- u8 response_length;
- u16 expander_change_count;
- u8 reserved0[2];
- u8 sas_format;
- u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
- u16 component_id;
- u8 component_revision_id;
- u8 reserved3;
- u8 vendor_specific[8];
-};
-
-static int
-leapioraid_transport_expander_report_manufacture(
- struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address,
- struct sas_expander_device *edev,
- u8 port_id)
-{
- struct LeapioraidSmpPassthroughReq_t *mpi_request;
- struct LeapioraidSmpPassthroughRep_t *mpi_reply;
- struct leapioraid_rep_manu_reply *manufacture_reply;
- struct leapioraid_rep_manu_request *manufacture_request;
- int rc;
- u16 smid;
- void *psge;
- u8 issue_reset = 0;
- void *data_out = NULL;
- dma_addr_t data_out_dma;
- dma_addr_t data_in_dma;
- size_t data_in_sz;
- size_t data_out_sz;
-
- if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info("%s %s: host reset in progress!\n",
- __func__, ioc->name);
- return -EFAULT;
- }
- mutex_lock(&ioc->transport_cmds.mutex);
- if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: transport_cmds in use\n",
- ioc->name, __func__);
- mutex_unlock(&ioc->transport_cmds.mutex);
- return -EAGAIN;
- }
- ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING;
- rc = leapioraid_wait_for_ioc_to_operational(ioc, 10);
- if (rc)
- goto out;
- smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto out;
- }
- rc = 0;
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->transport_cmds.smid = smid;
- data_out_sz = sizeof(struct leapioraid_rep_manu_request);
- data_in_sz = sizeof(struct leapioraid_rep_manu_reply);
- data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
- &data_out_dma, GFP_ATOMIC);
- if (!data_out) {
- rc = -ENOMEM;
- leapioraid_base_free_smid(ioc, smid);
- goto out;
- }
- data_in_dma = data_out_dma + sizeof(struct leapioraid_rep_manu_request);
- manufacture_request = data_out;
- manufacture_request->smp_frame_type = 0x40;
- manufacture_request->function = 1;
- manufacture_request->reserved = 0;
- manufacture_request->request_length = 0;
- memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = port_id;
- mpi_request->SASAddress = cpu_to_le64(sas_address);
- mpi_request->RequestDataLength = cpu_to_le16(data_out_sz);
- psge = &mpi_request->SGL;
- ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
- data_in_sz);
- dtransportprintk(ioc,
- pr_info("%s report_manufacture - send to sas_addr(0x%016llx)\n",
- ioc->name,
- (unsigned long long)sas_address));
- init_completion(&ioc->transport_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ);
- if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- pr_err("%s %s: timeout\n",
- ioc->name, __func__);
- leapioraid_debug_dump_mf(mpi_request,
- sizeof(struct LeapioraidSmpPassthroughReq_t) / 4);
- if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET))
- issue_reset = 1;
- goto issue_host_reset;
- }
- dtransportprintk(ioc,
- pr_info("%s report_manufacture - complete\n", ioc->name));
- if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- u8 *tmp;
-
- mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_err(
- "%s report_manufacture - reply data transfer size(%d)\n",
- ioc->name,
- le16_to_cpu(mpi_reply->ResponseDataLength)));
- if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
- sizeof(struct leapioraid_rep_manu_reply))
- goto out;
- manufacture_reply = data_out + sizeof(struct leapioraid_rep_manu_request);
- strscpy(edev->vendor_id, manufacture_reply->vendor_id,
- sizeof(edev->vendor_id));
- strscpy(edev->product_id, manufacture_reply->product_id,
- sizeof(edev->product_id));
- strscpy(edev->product_rev, manufacture_reply->product_rev,
- sizeof(edev->product_rev));
- edev->level = manufacture_reply->sas_format & 1;
- if (edev->level) {
- strscpy(edev->component_vendor_id,
- manufacture_reply->component_vendor_id,
- sizeof(edev->component_vendor_id));
- tmp = (u8 *) &manufacture_reply->component_id;
- edev->component_id = tmp[0] << 8 | tmp[1];
- edev->component_revision_id =
- manufacture_reply->component_revision_id;
- }
- } else
- dtransportprintk(ioc, pr_err(
- "%s report_manufacture - no reply\n",
- ioc->name));
-issue_host_reset:
- if (issue_reset)
- leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-out:
- ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED;
- if (data_out)
- dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
- data_out, data_out_dma);
- mutex_unlock(&ioc->transport_cmds.mutex);
- return rc;
-}
-#endif
-
-static void
-leapioraid_transport_delete_port(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_port *leapioraid_port)
-{
- u64 sas_address = leapioraid_port->remote_identify.sas_address;
- struct leapioraid_hba_port *port = leapioraid_port->hba_port;
- enum sas_device_type device_type =
- leapioraid_port->remote_identify.device_type;
-
-#if defined(LEAPIORAID_WIDE_PORT_API)
- dev_info(&leapioraid_port->port->dev,
- "remove: sas_addr(0x%016llx)\n",
- (unsigned long long)sas_address);
-#endif
- ioc->logging_level |= LEAPIORAID_DEBUG_TRANSPORT;
- if (device_type == SAS_END_DEVICE)
- leapioraid_device_remove_by_sas_address(ioc, sas_address, port);
- else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
- device_type == SAS_FANOUT_EXPANDER_DEVICE)
- leapioraid_expander_remove(ioc, sas_address, port);
- ioc->logging_level &= ~LEAPIORAID_DEBUG_TRANSPORT;
-}
-
-#if defined(LEAPIORAID_WIDE_PORT_API)
-static void
-leapioraid_transport_delete_phy(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_port *leapioraid_port,
- struct leapioraid_sas_phy *leapioraid_phy)
-{
- u64 sas_address = leapioraid_port->remote_identify.sas_address;
-
- dev_info(&leapioraid_phy->phy->dev,
- "remove: sas_addr(0x%016llx), phy(%d)\n",
- (unsigned long long)sas_address, leapioraid_phy->phy_id);
- list_del(&leapioraid_phy->port_siblings);
- leapioraid_port->num_phys--;
- sas_port_delete_phy(leapioraid_port->port, leapioraid_phy->phy);
- leapioraid_phy->phy_belongs_to_port = 0;
-}
-
-static void
-leapioraid_transport_add_phy(struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_port *leapioraid_port,
- struct leapioraid_sas_phy *leapioraid_phy)
-{
- u64 sas_address = leapioraid_port->remote_identify.sas_address;
-
- dev_info(&leapioraid_phy->phy->dev,
- "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long)
- sas_address, leapioraid_phy->phy_id);
- list_add_tail(&leapioraid_phy->port_siblings,
- &leapioraid_port->phy_list);
- leapioraid_port->num_phys++;
- sas_port_add_phy(leapioraid_port->port, leapioraid_phy->phy);
- leapioraid_phy->phy_belongs_to_port = 1;
-}
-
-void
-leapioraid_transport_add_phy_to_an_existing_port(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_sas_node *sas_node,
- struct leapioraid_sas_phy *leapioraid_phy,
- u64 sas_address,
- struct leapioraid_hba_port *port)
-{
- struct leapioraid_sas_port *leapioraid_port;
- struct leapioraid_sas_phy *phy_srch;
-
- if (leapioraid_phy->phy_belongs_to_port == 1)
- return;
- if (!port)
- return;
- list_for_each_entry(leapioraid_port, &sas_node->sas_port_list,
- port_list) {
- if (leapioraid_port->remote_identify.sas_address != sas_address)
- continue;
- if (leapioraid_port->hba_port != port)
- continue;
- list_for_each_entry(phy_srch, &leapioraid_port->phy_list,
- port_siblings) {
- if (phy_srch == leapioraid_phy)
- return;
- }
- leapioraid_transport_add_phy(ioc, leapioraid_port, leapioraid_phy);
- return;
- }
-}
-#endif
-
-void
-leapioraid_transport_del_phy_from_an_existing_port(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_sas_node *sas_node,
- struct leapioraid_sas_phy *leapioraid_phy)
-{
- struct leapioraid_sas_port *leapioraid_port, *next;
- struct leapioraid_sas_phy *phy_srch;
-
- if (leapioraid_phy->phy_belongs_to_port == 0)
- return;
- list_for_each_entry_safe(leapioraid_port, next,
- &sas_node->sas_port_list, port_list) {
- list_for_each_entry(phy_srch, &leapioraid_port->phy_list,
- port_siblings) {
- if (phy_srch != leapioraid_phy)
- continue;
-#if defined(LEAPIORAID_WIDE_PORT_API)
- if (leapioraid_port->num_phys == 1
- && !ioc->shost_recovery)
- leapioraid_transport_delete_port(ioc, leapioraid_port);
- else
- leapioraid_transport_delete_phy(ioc, leapioraid_port,
- leapioraid_phy);
-#else
- leapioraid_transport_delete_port(ioc, leapioraid_port);
-#endif
- return;
- }
- }
-}
-
-static void
-leapioraid_transport_sanity_check(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_raid_sas_node *sas_node, u64 sas_address,
- struct leapioraid_hba_port *port)
-{
- int i;
-
- for (i = 0; i < sas_node->num_phys; i++) {
- if (sas_node->phy[i].remote_identify.sas_address != sas_address
- || sas_node->phy[i].port != port)
- continue;
- if (sas_node->phy[i].phy_belongs_to_port == 1)
- leapioraid_transport_del_phy_from_an_existing_port(ioc,
- sas_node,
- &sas_node->phy
- [i]);
- }
-}
-
-struct leapioraid_sas_port *leapioraid_transport_port_add(
- struct LEAPIORAID_ADAPTER *ioc,
- u16 handle, u64 sas_address,
- struct leapioraid_hba_port *hba_port)
-{
- struct leapioraid_sas_phy *leapioraid_phy, *next;
- struct leapioraid_sas_port *leapioraid_port;
- unsigned long flags;
- struct leapioraid_raid_sas_node *sas_node;
- struct sas_rphy *rphy;
- struct leapioraid_sas_device *sas_device = NULL;
- int i;
-#if defined(LEAPIORAID_WIDE_PORT_API)
- struct sas_port *port;
-#endif
- struct leapioraid_virtual_phy *vphy = NULL;
-
- if (!hba_port) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return NULL;
- }
- leapioraid_port = kzalloc(sizeof(struct leapioraid_sas_port), GFP_KERNEL);
- if (!leapioraid_port)
- return NULL;
- INIT_LIST_HEAD(&leapioraid_port->port_list);
- INIT_LIST_HEAD(&leapioraid_port->phy_list);
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_node = leapioraid_transport_sas_node_find_by_sas_address(
- ioc,
- sas_address,
- hba_port);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- if (!sas_node) {
- pr_err("%s %s: Could not find parent sas_address(0x%016llx)!\n",
- ioc->name,
- __func__, (unsigned long long)sas_address);
- goto out_fail;
- }
- if ((leapioraid_transport_set_identify(ioc, handle,
- &leapioraid_port->remote_identify))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out_fail;
- }
- if (leapioraid_port->remote_identify.device_type == SAS_PHY_UNUSED) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out_fail;
- }
- leapioraid_port->hba_port = hba_port;
- leapioraid_transport_sanity_check(ioc, sas_node,
- leapioraid_port->remote_identify.sas_address,
- hba_port);
- for (i = 0; i < sas_node->num_phys; i++) {
- if (sas_node->phy[i].remote_identify.sas_address !=
- leapioraid_port->remote_identify.sas_address ||
- sas_node->phy[i].port != hba_port)
- continue;
- list_add_tail(&sas_node->phy[i].port_siblings,
- &leapioraid_port->phy_list);
- leapioraid_port->num_phys++;
- if (sas_node->handle <= ioc->sas_hba.num_phys) {
- if (!sas_node->phy[i].hba_vphy) {
- hba_port->phy_mask |= (1 << i);
- continue;
- }
- vphy = leapioraid_get_vphy_by_phy(ioc, hba_port, i);
- if (!vphy) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out_fail;
- }
- }
- }
- if (!leapioraid_port->num_phys) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out_fail;
- }
- if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) {
- sas_device = leapioraid_get_sdev_by_addr(ioc,
- leapioraid_port->remote_identify.sas_address,
- leapioraid_port->hba_port);
- if (!sas_device) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out_fail;
- }
- sas_device->pend_sas_rphy_add = 1;
- }
-#if defined(LEAPIORAID_WIDE_PORT_API)
- if (!sas_node->parent_dev) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out_fail;
- }
- port = sas_port_alloc_num(sas_node->parent_dev);
- if ((sas_port_add(port))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out_fail;
- }
- list_for_each_entry(leapioraid_phy, &leapioraid_port->phy_list,
- port_siblings) {
- if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT))
- dev_info(&port->dev,
- "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n",
- handle,
- (unsigned long long)
- leapioraid_port->remote_identify.sas_address,
- leapioraid_phy->phy_id);
- sas_port_add_phy(port, leapioraid_phy->phy);
- leapioraid_phy->phy_belongs_to_port = 1;
- leapioraid_phy->port = hba_port;
- }
- leapioraid_port->port = port;
- if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) {
- rphy = sas_end_device_alloc(port);
- sas_device->rphy = rphy;
- if (sas_node->handle <= ioc->sas_hba.num_phys) {
- if (!vphy)
- hba_port->sas_address = sas_device->sas_address;
- else
- vphy->sas_address = sas_device->sas_address;
- }
- } else {
- rphy = sas_expander_alloc(port,
- leapioraid_port->remote_identify.device_type);
- if (sas_node->handle <= ioc->sas_hba.num_phys)
- hba_port->sas_address =
- leapioraid_port->remote_identify.sas_address;
- }
-#else
- leapioraid_phy =
- list_entry(leapioraid_port->phy_list.next, struct leapioraid_sas_phy,
- port_siblings);
- if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) {
- rphy = sas_end_device_alloc(leapioraid_phy->phy);
- sas_device->rphy = rphy;
- } else
- rphy = sas_expander_alloc(leapioraid_phy->phy,
- leapioraid_port->remote_identify.device_type);
-#endif
- rphy->identify = leapioraid_port->remote_identify;
- if ((sas_rphy_add(rphy))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- }
- if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) {
- sas_device->pend_sas_rphy_add = 0;
- leapioraid_sas_device_put(sas_device);
- }
- dev_info(&rphy->dev,
- "%s: added: handle(0x%04x), sas_addr(0x%016llx)\n",
- __func__, handle, (unsigned long long)
- leapioraid_port->remote_identify.sas_address);
- leapioraid_port->rphy = rphy;
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- list_add_tail(&leapioraid_port->port_list, &sas_node->sas_port_list);
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-#if defined(LEAPIORAID_WIDE_PORT_API)
- if (leapioraid_port->remote_identify.device_type ==
- LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER ||
- leapioraid_port->remote_identify.device_type ==
- LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER)
- leapioraid_transport_expander_report_manufacture(ioc,
- leapioraid_port->remote_identify.sas_address,
- rphy_to_expander_device
- (rphy),
- hba_port->port_id);
-#endif
- return leapioraid_port;
-out_fail:
- list_for_each_entry_safe(leapioraid_phy, next,
- &leapioraid_port->phy_list, port_siblings)
- list_del(&leapioraid_phy->port_siblings);
- kfree(leapioraid_port);
- return NULL;
-}
-
-void
-leapioraid_transport_port_remove(struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address, u64 sas_address_parent,
- struct leapioraid_hba_port *port)
-{
- int i;
- unsigned long flags;
- struct leapioraid_sas_port *leapioraid_port, *next;
- struct leapioraid_raid_sas_node *sas_node;
- u8 found = 0;
-#if defined(LEAPIORAID_WIDE_PORT_API)
- struct leapioraid_sas_phy *leapioraid_phy, *next_phy;
-#endif
- struct leapioraid_hba_port *hba_port, *hba_port_next = NULL;
- struct leapioraid_virtual_phy *vphy, *vphy_next = NULL;
-
- if (!port)
- return;
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_node = leapioraid_transport_sas_node_find_by_sas_address(
- ioc,
- sas_address_parent,
- port);
- if (!sas_node) {
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- return;
- }
- list_for_each_entry_safe(leapioraid_port, next,
- &sas_node->sas_port_list, port_list) {
- if (leapioraid_port->remote_identify.sas_address != sas_address)
- continue;
- if (leapioraid_port->hba_port != port)
- continue;
- found = 1;
- list_del(&leapioraid_port->port_list);
- goto out;
- }
-out:
- if (!found) {
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- return;
- }
- if ((sas_node->handle <= ioc->sas_hba.num_phys) &&
- (ioc->multipath_on_hba)) {
- if (port->vphys_mask) {
- list_for_each_entry_safe(vphy, vphy_next,
- &port->vphys_list, list) {
- if (vphy->sas_address != sas_address)
- continue;
- pr_err(
- "%s remove vphy entry: %p of port:%p,\n\t\t"
- "from %d port's vphys list\n",
- ioc->name,
- vphy,
- port,
- port->port_id);
- port->vphys_mask &= ~vphy->phy_mask;
- list_del(&vphy->list);
- kfree(vphy);
- }
- if (!port->vphys_mask && !port->sas_address) {
- pr_err(
- "%s remove hba_port entry: %p port: %d\n\t\t"
- "from hba_port list\n",
- ioc->name,
- port,
- port->port_id);
- list_del(&port->list);
- kfree(port);
- }
- }
- list_for_each_entry_safe(hba_port, hba_port_next,
- &ioc->port_table_list, list) {
- if (hba_port != port)
- continue;
- if (hba_port->sas_address != sas_address)
- continue;
- if (!port->vphys_mask) {
- pr_err(
- "%s remove hba_port entry: %p port: %d\n\t\t"
- "from hba_port list\n",
- ioc->name,
- hba_port,
- hba_port->port_id);
- list_del(&hba_port->list);
- kfree(hba_port);
- } else {
- pr_err(
- "%s clearing sas_address from hba_port entry: %p\n\t\t"
- "port: %d from hba_port list\n",
- ioc->name,
- hba_port,
- hba_port->port_id);
- port->sas_address = 0;
- }
- break;
- }
- }
- for (i = 0; i < sas_node->num_phys; i++) {
- if (sas_node->phy[i].remote_identify.sas_address == sas_address) {
- memset(&sas_node->phy[i].remote_identify, 0,
- sizeof(struct sas_identify));
- sas_node->phy[i].hba_vphy = 0;
- }
- }
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-#if defined(LEAPIORAID_WIDE_PORT_API)
- list_for_each_entry_safe(leapioraid_phy, next_phy,
- &leapioraid_port->phy_list, port_siblings) {
- if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT))
- pr_info("%s %s: remove: sas_addr(0x%016llx), phy(%d)\n",
- ioc->name, __func__,
- (unsigned long long)
- leapioraid_port->remote_identify.sas_address,
- leapioraid_phy->phy_id);
- leapioraid_phy->phy_belongs_to_port = 0;
- if (!ioc->remove_host)
- sas_port_delete_phy(leapioraid_port->port,
- leapioraid_phy->phy);
- list_del(&leapioraid_phy->port_siblings);
- }
- if (!ioc->remove_host)
- sas_port_delete(leapioraid_port->port);
- pr_info("%s %s: removed: sas_addr(0x%016llx)\n",
- ioc->name, __func__, (unsigned long long)sas_address);
-#else
- if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT))
- pr_info("%s %s: remove: sas_addr(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address);
- if (!ioc->remove_host)
- sas_rphy_delete(leapioraid_port->rphy);
- pr_info("%s %s: removed: sas_addr(0x%016llx)\n",
- ioc->name, __func__, (unsigned long long)sas_address);
-#endif
- kfree(leapioraid_port);
-}
-
-int
-leapioraid_transport_add_host_phy(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_phy *leapioraid_phy,
- struct LeapioraidSasPhyP0_t phy_pg0,
- struct device *parent_dev)
-{
- struct sas_phy *phy;
- int phy_index = leapioraid_phy->phy_id;
-
- INIT_LIST_HEAD(&leapioraid_phy->port_siblings);
- phy = sas_phy_alloc(parent_dev, phy_index);
- if (!phy) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -1;
- }
- if ((leapioraid_transport_set_identify(ioc, leapioraid_phy->handle,
- &leapioraid_phy->identify))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- sas_phy_free(phy);
- return -1;
- }
- phy->identify = leapioraid_phy->identify;
- leapioraid_phy->attached_handle =
- le16_to_cpu(phy_pg0.AttachedDevHandle);
- if (leapioraid_phy->attached_handle)
- leapioraid_transport_set_identify(
- ioc, leapioraid_phy->attached_handle,
- &leapioraid_phy->remote_identify);
- phy->identify.phy_identifier = leapioraid_phy->phy_id;
- phy->negotiated_linkrate =
- leapioraid_transport_convert_phy_link_rate(
- phy_pg0.NegotiatedLinkRate &
- LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
- phy->minimum_linkrate_hw =
- leapioraid_transport_convert_phy_link_rate(
- phy_pg0.HwLinkRate &
- LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK);
- phy->maximum_linkrate_hw =
- leapioraid_transport_convert_phy_link_rate(
- phy_pg0.HwLinkRate >> 4);
- phy->minimum_linkrate =
- leapioraid_transport_convert_phy_link_rate(
- phy_pg0.ProgrammedLinkRate &
- LEAPIORAID_SAS_PRATE_MIN_RATE_MASK);
- phy->maximum_linkrate =
- leapioraid_transport_convert_phy_link_rate(
- phy_pg0.ProgrammedLinkRate >> 4);
- phy->hostdata = leapioraid_phy->port;
-#if !defined(LEAPIORAID_WIDE_PORT_API_PLUS)
- phy->local_attached = 1;
-#endif
-#if !defined(LEAPIORAID_WIDE_PORT_API)
- phy->port_identifier = phy_index;
-#endif
- if ((sas_phy_add(phy))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- sas_phy_free(phy);
- return -1;
- }
- if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT))
- dev_info(&phy->dev,
- "add: handle(0x%04x), sas_addr(0x%016llx)\n"
- "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
- leapioraid_phy->handle, (unsigned long long)
- leapioraid_phy->identify.sas_address,
- leapioraid_phy->attached_handle, (unsigned long long)
- leapioraid_phy->remote_identify.sas_address);
- leapioraid_phy->phy = phy;
- return 0;
-}
-
-int
-leapioraid_transport_add_expander_phy(
- struct LEAPIORAID_ADAPTER *ioc,
- struct leapioraid_sas_phy *leapioraid_phy,
- struct LeapioraidExpanderP1_t expander_pg1,
- struct device *parent_dev)
-{
- struct sas_phy *phy;
- int phy_index = leapioraid_phy->phy_id;
-
- INIT_LIST_HEAD(&leapioraid_phy->port_siblings);
- phy = sas_phy_alloc(parent_dev, phy_index);
- if (!phy) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -1;
- }
- if ((leapioraid_transport_set_identify(ioc, leapioraid_phy->handle,
- &leapioraid_phy->identify))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- sas_phy_free(phy);
- return -1;
- }
- phy->identify = leapioraid_phy->identify;
- leapioraid_phy->attached_handle =
- le16_to_cpu(expander_pg1.AttachedDevHandle);
- if (leapioraid_phy->attached_handle)
- leapioraid_transport_set_identify(
- ioc, leapioraid_phy->attached_handle,
- &leapioraid_phy->remote_identify);
- phy->identify.phy_identifier = leapioraid_phy->phy_id;
- phy->negotiated_linkrate =
- leapioraid_transport_convert_phy_link_rate(
- expander_pg1.NegotiatedLinkRate &
- LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
- phy->minimum_linkrate_hw =
- leapioraid_transport_convert_phy_link_rate(
- expander_pg1.HwLinkRate &
- LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK);
- phy->maximum_linkrate_hw =
- leapioraid_transport_convert_phy_link_rate(
- expander_pg1.HwLinkRate >> 4);
- phy->minimum_linkrate =
- leapioraid_transport_convert_phy_link_rate(
- expander_pg1.ProgrammedLinkRate &
- LEAPIORAID_SAS_PRATE_MIN_RATE_MASK);
- phy->maximum_linkrate =
- leapioraid_transport_convert_phy_link_rate(
- expander_pg1.ProgrammedLinkRate >> 4);
- phy->hostdata = leapioraid_phy->port;
-#if !defined(LEAPIORAID_WIDE_PORT_API)
- phy->port_identifier = phy_index;
-#endif
- if ((sas_phy_add(phy))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- sas_phy_free(phy);
- return -1;
- }
- if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT))
- dev_info(&phy->dev,
- "add: handle(0x%04x), sas_addr(0x%016llx)\n"
- "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
- leapioraid_phy->handle, (unsigned long long)
- leapioraid_phy->identify.sas_address,
- leapioraid_phy->attached_handle, (unsigned long long)
- leapioraid_phy->remote_identify.sas_address);
- leapioraid_phy->phy = phy;
- return 0;
-}
-
-void
-leapioraid_transport_update_links(struct LEAPIORAID_ADAPTER *ioc,
- u64 sas_address, u16 handle, u8 phy_number,
- u8 link_rate, struct leapioraid_hba_port *port)
-{
- unsigned long flags;
- struct leapioraid_raid_sas_node *sas_node;
- struct leapioraid_sas_phy *leapioraid_phy;
- struct leapioraid_hba_port *hba_port = NULL;
-
- if (ioc->shost_recovery || ioc->pci_error_recovery)
- return;
- spin_lock_irqsave(&ioc->sas_node_lock, flags);
- sas_node = leapioraid_transport_sas_node_find_by_sas_address(ioc,
- sas_address, port);
- if (!sas_node) {
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- return;
- }
- leapioraid_phy = &sas_node->phy[phy_number];
- leapioraid_phy->attached_handle = handle;
- spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
- if (handle && (link_rate >= LEAPIORAID_SAS_NEG_LINK_RATE_1_5)) {
- leapioraid_transport_set_identify(ioc, handle,
- &leapioraid_phy->remote_identify);
-#if defined(LEAPIORAID_WIDE_PORT_API)
- if ((sas_node->handle <= ioc->sas_hba.num_phys) &&
- (ioc->multipath_on_hba)) {
- list_for_each_entry(hba_port,
- &ioc->port_table_list, list) {
- if (hba_port->sas_address == sas_address &&
- hba_port == port)
- hba_port->phy_mask |=
- (1 << leapioraid_phy->phy_id);
- }
- }
- leapioraid_transport_add_phy_to_an_existing_port(ioc, sas_node,
- leapioraid_phy,
- leapioraid_phy->remote_identify.sas_address,
- port);
-#endif
- } else
- memset(&leapioraid_phy->remote_identify, 0, sizeof(struct
- sas_identify));
- if (leapioraid_phy->phy)
- leapioraid_phy->phy->negotiated_linkrate =
- leapioraid_transport_convert_phy_link_rate(link_rate);
- if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT))
- dev_info(&leapioraid_phy->phy->dev,
- "refresh: parent sas_addr(0x%016llx),\n"
- "\tlink_rate(0x%02x), phy(%d)\n"
- "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
- (unsigned long long)sas_address,
- link_rate, phy_number, handle, (unsigned long long)
- leapioraid_phy->remote_identify.sas_address);
-}
-
-static inline void *phy_to_ioc(struct sas_phy *phy)
-{
- struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
-
- return leapioraid_shost_private(shost);
-}
-
-static inline void *rphy_to_ioc(struct sas_rphy *rphy)
-{
- struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
-
- return leapioraid_shost_private(shost);
-}
-
-struct leapioraid_phy_error_log_request {
- u8 smp_frame_type;
- u8 function;
- u8 allocated_response_length;
- u8 request_length;
- u8 reserved_1[5];
- u8 phy_identifier;
- u8 reserved_2[2];
-};
-
-struct leapioraid_phy_error_log_reply {
- u8 smp_frame_type;
- u8 function;
- u8 function_result;
- u8 response_length;
- __be16 expander_change_count;
- u8 reserved_1[3];
- u8 phy_identifier;
- u8 reserved_2[2];
- __be32 invalid_dword;
- __be32 running_disparity_error;
- __be32 loss_of_dword_sync;
- __be32 phy_reset_problem;
-};
-
-static int
-leapioraid_transport_get_expander_phy_error_log(
- struct LEAPIORAID_ADAPTER *ioc, struct sas_phy *phy)
-{
- struct LeapioraidSmpPassthroughReq_t *mpi_request;
- struct LeapioraidSmpPassthroughRep_t *mpi_reply;
- struct leapioraid_phy_error_log_request *phy_error_log_request;
- struct leapioraid_phy_error_log_reply *phy_error_log_reply;
- int rc;
- u16 smid;
- void *psge;
- u8 issue_reset = 0;
- void *data_out = NULL;
- dma_addr_t data_out_dma;
- u32 sz;
-
- if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info("%s %s: host reset in progress!\n",
- __func__, ioc->name);
- return -EFAULT;
- }
- mutex_lock(&ioc->transport_cmds.mutex);
- if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: transport_cmds in use\n",
- ioc->name, __func__);
- mutex_unlock(&ioc->transport_cmds.mutex);
- return -EAGAIN;
- }
- ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING;
- rc = leapioraid_wait_for_ioc_to_operational(ioc, 10);
- if (rc)
- goto out;
- smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto out;
- }
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->transport_cmds.smid = smid;
- sz = sizeof(struct leapioraid_phy_error_log_request) +
- sizeof(struct leapioraid_phy_error_log_reply);
- data_out =
- dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
- GFP_ATOMIC);
- if (!data_out) {
- pr_err("failure at %s:%d/%s()!\n", __FILE__,
- __LINE__, __func__);
- rc = -ENOMEM;
- leapioraid_base_free_smid(ioc, smid);
- goto out;
- }
- rc = -EINVAL;
- memset(data_out, 0, sz);
- phy_error_log_request = data_out;
- phy_error_log_request->smp_frame_type = 0x40;
- phy_error_log_request->function = 0x11;
- phy_error_log_request->request_length = 2;
- phy_error_log_request->allocated_response_length = 0;
- phy_error_log_request->phy_identifier = phy->number;
- memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_sas_phy(phy);
- mpi_request->VF_ID = 0;
- mpi_request->VP_ID = 0;
- mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
- mpi_request->RequestDataLength =
- cpu_to_le16(sizeof(struct leapioraid_phy_error_log_request));
- psge = &mpi_request->SGL;
- ioc->build_sg(ioc, psge, data_out_dma,
- sizeof(struct leapioraid_phy_error_log_request),
- data_out_dma + sizeof(struct leapioraid_phy_error_log_request),
- sizeof(struct leapioraid_phy_error_log_reply));
- dtransportprintk(ioc, pr_info(
- "%s phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
- ioc->name,
- (unsigned long long)phy->identify.sas_address,
- phy->number));
- init_completion(&ioc->transport_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ);
- if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- pr_err("%s %s: timeout\n",
- ioc->name, __func__);
- leapioraid_debug_dump_mf(mpi_request,
- sizeof(struct LeapioraidSmpPassthroughReq_t) / 4);
- if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET))
- issue_reset = 1;
- goto issue_host_reset;
- }
- dtransportprintk(ioc, pr_info("%s phy_error_log - complete\n", ioc->name));
- if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_err(
- "%s phy_error_log - reply data transfer size(%d)\n",
- ioc->name,
- le16_to_cpu(mpi_reply->ResponseDataLength)));
- if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
- sizeof(struct leapioraid_phy_error_log_reply))
- goto out;
- phy_error_log_reply = data_out +
- sizeof(struct leapioraid_phy_error_log_request);
- dtransportprintk(ioc, pr_err(
- "%s phy_error_log - function_result(%d)\n",
- ioc->name,
- phy_error_log_reply->function_result));
- phy->invalid_dword_count =
- be32_to_cpu(phy_error_log_reply->invalid_dword);
- phy->running_disparity_error_count =
- be32_to_cpu(phy_error_log_reply->running_disparity_error);
- phy->loss_of_dword_sync_count =
- be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
- phy->phy_reset_problem_count =
- be32_to_cpu(phy_error_log_reply->phy_reset_problem);
- rc = 0;
- } else
- dtransportprintk(ioc, pr_err(
- "%s phy_error_log - no reply\n",
- ioc->name));
-issue_host_reset:
- if (issue_reset)
- leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-out:
- ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED;
- if (data_out)
- dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma);
- mutex_unlock(&ioc->transport_cmds.mutex);
- return rc;
-}
-
-static int
-leapioraid_transport_get_linkerrors(struct sas_phy *phy)
-{
- struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy);
- struct LeapioraidCfgRep_t mpi_reply;
- struct LeapioraidSasPhyP1_t phy_pg1;
- int rc = 0;
-
- rc = leapioraid_transport_find_parent_node(ioc, phy);
- if (rc)
- return rc;
- if (phy->identify.sas_address != ioc->sas_hba.sas_address)
- return leapioraid_transport_get_expander_phy_error_log(ioc, phy);
- if ((leapioraid_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
- phy->number))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -ENXIO;
- }
- if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
- pr_info("%s phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name,
- phy->number,
- le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo));
- phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
- phy->running_disparity_error_count =
- le32_to_cpu(phy_pg1.RunningDisparityErrorCount);
- phy->loss_of_dword_sync_count =
- le32_to_cpu(phy_pg1.LossDwordSynchCount);
- phy->phy_reset_problem_count =
- le32_to_cpu(phy_pg1.PhyResetProblemCount);
- return 0;
-}
-
-static int
-leapioraid_transport_get_enclosure_identifier(
- struct sas_rphy *rphy, u64 *identifier)
-{
- struct LEAPIORAID_ADAPTER *ioc = rphy_to_ioc(rphy);
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc,
- rphy->identify.sas_address, rphy);
- if (sas_device) {
- *identifier = sas_device->enclosure_logical_id;
- rc = 0;
- leapioraid_sas_device_put(sas_device);
- } else {
- *identifier = 0;
- rc = -ENXIO;
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return rc;
-}
-
-static int
-leapioraid_transport_get_bay_identifier(struct sas_rphy *rphy)
-{
- struct LEAPIORAID_ADAPTER *ioc = rphy_to_ioc(rphy);
- struct leapioraid_sas_device *sas_device;
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc,
- rphy->identify.sas_address, rphy);
- if (sas_device) {
- rc = sas_device->slot;
- leapioraid_sas_device_put(sas_device);
- } else {
- rc = -ENXIO;
- }
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- return rc;
-}
-
-struct leapioraid_phy_control_request {
- u8 smp_frame_type;
- u8 function;
- u8 allocated_response_length;
- u8 request_length;
- u16 expander_change_count;
- u8 reserved_1[3];
- u8 phy_identifier;
- u8 phy_operation;
- u8 reserved_2[13];
- u64 attached_device_name;
- u8 programmed_min_physical_link_rate;
- u8 programmed_max_physical_link_rate;
- u8 reserved_3[6];
-};
-
-struct leapioraid_phy_control_reply {
- u8 smp_frame_type;
- u8 function;
- u8 function_result;
- u8 response_length;
-};
-
-#define LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET (0x01)
-#define LEAPIORAID_SMP_PHY_CONTROL_HARD_RESET (0x02)
-#define LEAPIORAID_SMP_PHY_CONTROL_DISABLE (0x03)
-static int
-leapioraid_transport_expander_phy_control(
- struct LEAPIORAID_ADAPTER *ioc,
- struct sas_phy *phy, u8 phy_operation)
-{
- struct LeapioraidSmpPassthroughReq_t *mpi_request;
- struct LeapioraidSmpPassthroughRep_t *mpi_reply;
- struct leapioraid_phy_control_request *phy_control_request;
- struct leapioraid_phy_control_reply *phy_control_reply;
- int rc;
- u16 smid;
- void *psge;
- u8 issue_reset = 0;
- void *data_out = NULL;
- dma_addr_t data_out_dma;
- u32 sz;
-
- if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info("%s %s: host reset in progress!\n",
- __func__, ioc->name);
- return -EFAULT;
- }
- mutex_lock(&ioc->transport_cmds.mutex);
- if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: transport_cmds in use\n",
- ioc->name, __func__);
- mutex_unlock(&ioc->transport_cmds.mutex);
- return -EAGAIN;
- }
- ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING;
- rc = leapioraid_wait_for_ioc_to_operational(ioc, 10);
- if (rc)
- goto out;
- smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto out;
- }
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->transport_cmds.smid = smid;
- sz = sizeof(struct leapioraid_phy_control_request) +
- sizeof(struct leapioraid_phy_control_reply);
- data_out =
- dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
- GFP_ATOMIC);
- if (!data_out) {
- pr_err("failure at %s:%d/%s()!\n", __FILE__,
- __LINE__, __func__);
- rc = -ENOMEM;
- leapioraid_base_free_smid(ioc, smid);
- goto out;
- }
- rc = -EINVAL;
- memset(data_out, 0, sz);
- phy_control_request = data_out;
- phy_control_request->smp_frame_type = 0x40;
- phy_control_request->function = 0x91;
- phy_control_request->request_length = 9;
- phy_control_request->allocated_response_length = 0;
- phy_control_request->phy_identifier = phy->number;
- phy_control_request->phy_operation = phy_operation;
- phy_control_request->programmed_min_physical_link_rate =
- phy->minimum_linkrate << 4;
- phy_control_request->programmed_max_physical_link_rate =
- phy->maximum_linkrate << 4;
- memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_sas_phy(phy);
- mpi_request->VF_ID = 0;
- mpi_request->VP_ID = 0;
- mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
- mpi_request->RequestDataLength =
- cpu_to_le16(sizeof(struct leapioraid_phy_error_log_request));
- psge = &mpi_request->SGL;
- ioc->build_sg(ioc, psge, data_out_dma,
- sizeof(struct leapioraid_phy_control_request),
- data_out_dma + sizeof(struct leapioraid_phy_control_request),
- sizeof(struct leapioraid_phy_control_reply));
- dtransportprintk(ioc, pr_info(
- "%s phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
- ioc->name,
- (unsigned long long)phy->identify.sas_address,
- phy->number, phy_operation));
- init_completion(&ioc->transport_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ);
- if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- pr_err("%s %s: timeout\n",
- ioc->name, __func__);
- leapioraid_debug_dump_mf(mpi_request,
- sizeof(struct LeapioraidSmpPassthroughReq_t) / 4);
- if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET))
- issue_reset = 1;
- goto issue_host_reset;
- }
- dtransportprintk(ioc, pr_info(
- "%s phy_control - complete\n", ioc->name));
- if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) {
- mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_err(
- "%s phy_control - reply data transfer size(%d)\n",
- ioc->name,
- le16_to_cpu(mpi_reply->ResponseDataLength)));
- if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
- sizeof(struct leapioraid_phy_control_reply))
- goto out;
- phy_control_reply = data_out +
- sizeof(struct leapioraid_phy_control_request);
- dtransportprintk(ioc, pr_err(
- "%s phy_control - function_result(%d)\n",
- ioc->name,
- phy_control_reply->function_result));
- rc = 0;
- } else
- dtransportprintk(ioc, pr_err(
- "%s phy_control - no reply\n",
- ioc->name));
-issue_host_reset:
- if (issue_reset)
- leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-out:
- ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED;
- if (data_out)
- dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma);
- mutex_unlock(&ioc->transport_cmds.mutex);
- return rc;
-}
-
-static int
-leapioraid_transport_phy_reset(struct sas_phy *phy, int hard_reset)
-{
- struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy);
- struct LeapioraidSasIoUnitControlRep_t mpi_reply;
- struct LeapioraidSasIoUnitControlReq_t mpi_request;
- int rc = 0;
-
- rc = leapioraid_transport_find_parent_node(ioc, phy);
- if (rc)
- return rc;
- if (phy->identify.sas_address != ioc->sas_hba.sas_address)
- return leapioraid_transport_expander_phy_control(ioc, phy,
- (hard_reset ==
- 1) ?
- LEAPIORAID_SMP_PHY_CONTROL_HARD_RESET
- :
- LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET);
- memset(&mpi_request, 0, sizeof(struct LeapioraidSasIoUnitControlReq_t));
- mpi_request.Function = LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL;
- mpi_request.Operation = hard_reset ?
- LEAPIORAID_SAS_OP_PHY_HARD_RESET : LEAPIORAID_SAS_OP_PHY_LINK_RESET;
- mpi_request.PhyNum = phy->number;
- if ((leapioraid_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return -ENXIO;
- }
- if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
- pr_info("%s phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name,
- phy->number,
- le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo));
- return 0;
-}
-
-static int
-leapioraid_transport_phy_enable(struct sas_phy *phy, int enable)
-{
- struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy);
- struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL;
- struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL;
- struct LeapioraidCfgRep_t mpi_reply;
- u16 ioc_status;
- u16 sz;
- int rc = 0;
- int i, discovery_active;
-
- rc = leapioraid_transport_find_parent_node(ioc, phy);
- if (rc)
- return rc;
- if (phy->identify.sas_address != ioc->sas_hba.sas_address)
- return leapioraid_transport_expander_phy_control(ioc, phy,
- (enable ==
- 1) ?
- LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET
- :
- LEAPIORAID_SMP_PHY_CONTROL_DISABLE);
- sz = offsetof(struct LeapioraidSasIOUnitP0_t,
- PhyData) +
- (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA));
- sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
- if (!sas_iounit_pg0) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -ENOMEM;
- goto out;
- }
- if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply,
- sas_iounit_pg0, sz))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -ENXIO;
- goto out;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -EIO;
- goto out;
- }
- for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys; i++) {
- if (sas_iounit_pg0->PhyData[i].PortFlags &
- LEAPIORAID_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
- pr_err(
- "%s discovery is active on port = %d, phy = %d:\n\t\t"
- "unable to enable/disable phys, try again later!\n",
- ioc->name,
- sas_iounit_pg0->PhyData[i].Port,
- i);
- discovery_active = 1;
- }
- }
- if (discovery_active) {
- rc = -EAGAIN;
- goto out;
- }
- sz = offsetof(struct LeapioraidSasIOUnitP1_t,
- PhyData) +
- (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA));
- sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
- if (!sas_iounit_pg1) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -ENOMEM;
- goto out;
- }
- if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply,
- sas_iounit_pg1, sz))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -ENXIO;
- goto out;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -EIO;
- goto out;
- }
- for (i = 0; i < ioc->sas_hba.num_phys; i++) {
- sas_iounit_pg1->PhyData[i].Port =
- sas_iounit_pg0->PhyData[i].Port;
- sas_iounit_pg1->PhyData[i].PortFlags =
- (sas_iounit_pg0->PhyData[i].PortFlags &
- LEAPIORAID_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
- sas_iounit_pg1->PhyData[i].PhyFlags =
- (sas_iounit_pg0->PhyData[i].PhyFlags &
- (LEAPIORAID_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
- LEAPIORAID_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
- }
- if (enable)
- sas_iounit_pg1->PhyData[phy->number].PhyFlags
- &= ~LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
- else
- sas_iounit_pg1->PhyData[phy->number].PhyFlags
- |= LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
- leapioraid_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
- sz);
- if (enable)
- leapioraid_transport_phy_reset(phy, 0);
-out:
- kfree(sas_iounit_pg1);
- kfree(sas_iounit_pg0);
- return rc;
-}
-
-static int
-leapioraid_transport_phy_speed(
- struct sas_phy *phy, struct sas_phy_linkrates *rates)
-{
- struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy);
- struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL;
- struct LeapioraidSasPhyP0_t phy_pg0;
- struct LeapioraidCfgRep_t mpi_reply;
- u16 ioc_status;
- u16 sz;
- int i;
- int rc = 0;
-
- rc = leapioraid_transport_find_parent_node(ioc, phy);
- if (rc)
- return rc;
- if (!rates->minimum_linkrate)
- rates->minimum_linkrate = phy->minimum_linkrate;
- else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
- rates->minimum_linkrate = phy->minimum_linkrate_hw;
- if (!rates->maximum_linkrate)
- rates->maximum_linkrate = phy->maximum_linkrate;
- else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
- rates->maximum_linkrate = phy->maximum_linkrate_hw;
- if (phy->identify.sas_address != ioc->sas_hba.sas_address) {
- phy->minimum_linkrate = rates->minimum_linkrate;
- phy->maximum_linkrate = rates->maximum_linkrate;
- return leapioraid_transport_expander_phy_control(ioc, phy,
- LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET);
- }
- sz = offsetof(struct LeapioraidSasIOUnitP1_t,
- PhyData) +
- (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA));
- sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
- if (!sas_iounit_pg1) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -ENOMEM;
- goto out;
- }
- if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply,
- sas_iounit_pg1, sz))) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -ENXIO;
- goto out;
- }
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK;
- if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -EIO;
- goto out;
- }
- for (i = 0; i < ioc->sas_hba.num_phys; i++) {
- if (phy->number != i) {
- sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
- (ioc->sas_hba.phy[i].phy->minimum_linkrate +
- (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
- } else {
- sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
- (rates->minimum_linkrate +
- (rates->maximum_linkrate << 4));
- }
- }
- if (leapioraid_config_set_sas_iounit_pg1
- (ioc, &mpi_reply, sas_iounit_pg1, sz)) {
- pr_err("%s failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- rc = -ENXIO;
- goto out;
- }
- leapioraid_transport_phy_reset(phy, 0);
- if (!leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
- phy->number)) {
- phy->minimum_linkrate =
- leapioraid_transport_convert_phy_link_rate(
- phy_pg0.ProgrammedLinkRate &
- LEAPIORAID_SAS_PRATE_MIN_RATE_MASK);
- phy->maximum_linkrate =
- leapioraid_transport_convert_phy_link_rate(
- phy_pg0.ProgrammedLinkRate >> 4);
- phy->negotiated_linkrate =
- leapioraid_transport_convert_phy_link_rate(
- phy_pg0.NegotiatedLinkRate &
- LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
- }
-out:
- kfree(sas_iounit_pg1);
- return rc;
-}
-
-static int
-leapioraid_transport_map_smp_buffer(
- struct device *dev, struct bsg_buffer *buf,
- dma_addr_t *dma_addr, size_t *dma_len, void **p)
-{
- if (buf->sg_cnt > 1) {
- *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
- GFP_KERNEL);
- if (!*p)
- return -ENOMEM;
- *dma_len = buf->payload_len;
- } else {
- if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
- return -ENOMEM;
- *dma_addr = sg_dma_address(buf->sg_list);
- *dma_len = sg_dma_len(buf->sg_list);
- *p = NULL;
- }
- return 0;
-}
-
-static void
-leapioraid_transport_unmap_smp_buffer(
- struct device *dev, struct bsg_buffer *buf,
- dma_addr_t dma_addr, void *p)
-{
- if (p)
- dma_free_coherent(dev, buf->payload_len, p, dma_addr);
- else
- dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
-}
-
-static void
-leapioraid_transport_smp_handler(
- struct bsg_job *job, struct Scsi_Host *shost,
- struct sas_rphy *rphy)
-{
- struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost);
- struct LeapioraidSmpPassthroughReq_t *mpi_request;
- struct LeapioraidSmpPassthroughRep_t *mpi_reply;
- int rc;
- u16 smid;
- u32 ioc_state;
- void *psge;
- dma_addr_t dma_addr_in;
- dma_addr_t dma_addr_out;
- void *addr_in = NULL;
- void *addr_out = NULL;
- size_t dma_len_in;
- size_t dma_len_out;
- u16 wait_state_count;
- unsigned int reslen = 0;
-
- if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info("%s %s: host reset in progress!\n",
- __func__, ioc->name);
- rc = -EFAULT;
- goto job_done;
- }
- rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
- if (rc)
- goto job_done;
- if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) {
- pr_err("%s %s: transport_cmds in use\n",
- ioc->name, __func__);
- mutex_unlock(&ioc->transport_cmds.mutex);
- rc = -EAGAIN;
- goto job_done;
- }
- ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING;
- rc = leapioraid_transport_map_smp_buffer(
- &ioc->pdev->dev, &job->request_payload,
- &dma_addr_out, &dma_len_out, &addr_out);
- if (rc)
- goto out;
- if (addr_out) {
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt, addr_out,
- job->request_payload.payload_len);
- }
- rc = leapioraid_transport_map_smp_buffer(
- &ioc->pdev->dev, &job->reply_payload,
- &dma_addr_in, &dma_len_in, &addr_in);
- if (rc)
- goto unmap_out;
- wait_state_count = 0;
- ioc_state = leapioraid_base_get_iocstate(ioc, 1);
- while (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) {
- if (wait_state_count++ == 10) {
- pr_err(
- "%s %s: failed due to ioc not operational\n",
- ioc->name, __func__);
- rc = -EFAULT;
- goto unmap_in;
- }
- ssleep(1);
- ioc_state = leapioraid_base_get_iocstate(ioc, 1);
- pr_info(
- "%s %s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
- }
- if (wait_state_count)
- pr_info("%s %s: ioc is operational\n",
- ioc->name, __func__);
- smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx);
- if (!smid) {
- pr_err("%s %s: failed obtaining a smid\n",
- ioc->name, __func__);
- rc = -EAGAIN;
- goto unmap_in;
- }
- rc = 0;
- mpi_request = leapioraid_base_get_msg_frame(ioc, smid);
- ioc->transport_cmds.smid = smid;
- memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t));
- mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH;
- mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_rphy(
- ioc, rphy);
- mpi_request->SASAddress = (rphy) ?
- cpu_to_le64(rphy->identify.sas_address) :
- cpu_to_le64(ioc->sas_hba.sas_address);
- mpi_request->RequestDataLength = cpu_to_le16(dma_len_out - 4);
- psge = &mpi_request->SGL;
- ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in,
- dma_len_in - 4);
- dtransportprintk(ioc, pr_info(
- "%s %s - sending smp request\n", ioc->name,
- __func__));
- init_completion(&ioc->transport_cmds.done);
- ioc->put_smid_default(ioc, smid);
- wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ);
- if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) {
- pr_err("%s %s : timeout\n", __func__, ioc->name);
- leapioraid_debug_dump_mf(mpi_request,
- sizeof(struct LeapioraidSmpPassthroughReq_t) / 4);
- if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) {
- leapioraid_base_hard_reset_handler(ioc,
- FORCE_BIG_HAMMER);
- rc = -ETIMEDOUT;
- goto unmap_in;
- }
- }
- dtransportprintk(ioc, pr_info(
- "%s %s - complete\n", ioc->name, __func__));
- if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID)) {
- dtransportprintk(ioc, pr_info(
- "%s %s - no reply\n", ioc->name,
- __func__));
- rc = -ENXIO;
- goto unmap_in;
- }
- mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc,
- pr_info(
- "%s %s - reply data transfer size(%d)\n",
- ioc->name, __func__,
- le16_to_cpu(mpi_reply->ResponseDataLength)));
- memcpy(job->reply, mpi_reply, sizeof(*mpi_reply));
- job->reply_len = sizeof(*mpi_reply);
- reslen = le16_to_cpu(mpi_reply->ResponseDataLength);
- if (addr_in) {
- sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, addr_in,
- job->reply_payload.payload_len);
- }
- rc = 0;
-unmap_in:
- leapioraid_transport_unmap_smp_buffer(
- &ioc->pdev->dev, &job->reply_payload,
- dma_addr_in, addr_in);
-unmap_out:
- leapioraid_transport_unmap_smp_buffer(
- &ioc->pdev->dev, &job->request_payload,
- dma_addr_out, addr_out);
-out:
- ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED;
- mutex_unlock(&ioc->transport_cmds.mutex);
-job_done:
- bsg_job_done(job, rc, reslen);
-}
-
-struct sas_function_template leapioraid_transport_functions = {
- .get_linkerrors = leapioraid_transport_get_linkerrors,
- .get_enclosure_identifier = leapioraid_transport_get_enclosure_identifier,
- .get_bay_identifier = leapioraid_transport_get_bay_identifier,
- .phy_reset = leapioraid_transport_phy_reset,
- .phy_enable = leapioraid_transport_phy_enable,
- .set_phy_speed = leapioraid_transport_phy_speed,
- .smp_handler = leapioraid_transport_smp_handler,
-};
-
-struct scsi_transport_template *leapioraid_transport_template;
diff --git a/drivers/scsi/leapraid/Kconfig b/drivers/scsi/leapraid/Kconfig
new file mode 100644
index 000000000000..b539183b24a7
--- /dev/null
+++ b/drivers/scsi/leapraid/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config SCSI_LEAPRAID
+ tristate "LeapIO RAID Adapter"
+ depends on PCI && SCSI
+ select SCSI_SAS_ATTRS
+ help
+ This driver supports LeapIO PCIe-based Storage
+ and RAID controllers.
+
+ <http://www.leap-io.com>
+
+ To compile this driver as a module, choose M here: the
+ resulting kernel module will be named leapraid.
diff --git a/drivers/scsi/leapraid/Makefile b/drivers/scsi/leapraid/Makefile
new file mode 100644
index 000000000000..bdafc036cd00
--- /dev/null
+++ b/drivers/scsi/leapraid/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the LEAPRAID drivers.
+#
+
+obj-$(CONFIG_SCSI_LEAPRAID) += leapraid.o
+leapraid-objs += leapraid_func.o \
+ leapraid_os.o \
+ leapraid_transport.o \
+ leapraid_app.o
diff --git a/drivers/scsi/leapraid/leapraid.h b/drivers/scsi/leapraid/leapraid.h
new file mode 100644
index 000000000000..842810d41542
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid.h
@@ -0,0 +1,2070 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+#ifndef LEAPRAID_H
+#define LEAPRAID_H
+
+/* doorbell register definitions */
+#define LEAPRAID_DB_RESET 0x00000000
+#define LEAPRAID_DB_READY 0x10000000
+#define LEAPRAID_DB_OPERATIONAL 0x20000000
+#define LEAPRAID_DB_FAULT 0x40000000
+
+#define LEAPRAID_DB_MASK 0xF0000000
+
+#define LEAPRAID_DB_OVER_TEMPERATURE 0x2810
+
+#define LEAPRAID_DB_USED 0x08000000
+#define LEAPRAID_DB_DATA_MASK 0x0000FFFF
+#define LEAPRAID_DB_FUNC_SHIFT 24
+#define LEAPRAID_DB_ADD_DWORDS_SHIFT 16
+
+/* maximum number of retries waiting for doorbell to become ready */
+#define LEAPRAID_DB_RETRY_COUNT_MAX 10
+/* maximum number of retries waiting for doorbell to become operational */
+#define LEAPRAID_DB_WAIT_OPERATIONAL 10
+/* sleep interval (in seconds) between doorbell polls */
+#define LEAPRAID_DB_POLL_INTERVAL_S 1
+
+/* maximum number of retries waiting for host to end recovery */
+#define LEAPRAID_WAIT_SHOST_RECOVERY 30
+
+/* diagnostic register definitions */
+#define LEAPRAID_DIAG_WRITE_ENABLE 0x00000080
+#define LEAPRAID_DIAG_RESET 0x00000004
+#define LEAPRAID_DIAG_HOLD_ADAPTER_RESET 0x00000002
+
+/* interrupt status register definitions */
+#define LEAPRAID_HOST2ADAPTER_DB_STATUS 0x80000000
+#define LEAPRAID_ADAPTER2HOST_DB_STATUS 0x00000001
+
+/* the number of debug register */
+#define LEAPRAID_DEBUGLOG_SZ_MAX 16
+
+/* reply post host register defines */
+#define REP_POST_HOST_IDX_REG_CNT 16
+#define LEAPRAID_RPHI_MSIX_IDX_SHIFT 24
+
+/* vphy flags */
+#define LEAPRAID_SAS_PHYINFO_VPHY 0x00001000
+
+/* linux driver init fw */
+#define LEAPRAID_WHOINIT_LINUX_DRIVER 0x04
+
+/* rdpq array mode */
+#define LEAPRAID_ADAPTER_INIT_MSGFLG_RDPQ_ARRAY_MODE 0x01
+
+/* request description flags */
+#define LEAPRAID_REQ_DESC_FLG_SCSI_IO 0x00
+#define LEAPRAID_REQ_DESC_FLG_HPR 0x06
+#define LEAPRAID_REQ_DESC_FLG_DFLT_TYPE 0x08
+
+/* reply description flags */
+#define LEAPRAID_RPY_DESC_FLG_TYPE_MASK 0x0F
+#define LEAPRAID_RPY_DESC_FLG_SCSI_IO_SUCCESS 0x00
+#define LEAPRAID_RPY_DESC_FLG_ADDRESS_REPLY 0x01
+#define LEAPRAID_RPY_DESC_FLG_FP_SCSI_IO_SUCCESS 0x06
+#define LEAPRAID_RPY_DESC_FLG_UNUSED 0x0F
+
+/* MPI functions */
+#define LEAPRAID_FUNC_SCSIIO_REQ 0x00
+#define LEAPRAID_FUNC_SCSI_TMF 0x01
+#define LEAPRAID_FUNC_ADAPTER_INIT 0x02
+#define LEAPRAID_FUNC_GET_ADAPTER_FEATURES 0x03
+#define LEAPRAID_FUNC_CONFIG_OP 0x04
+#define LEAPRAID_FUNC_SCAN_DEV 0x06
+#define LEAPRAID_FUNC_EVENT_NOTIFY 0x07
+#define LEAPRAID_FUNC_FW_DOWNLOAD 0x09
+#define LEAPRAID_FUNC_FW_UPLOAD 0x12
+#define LEAPRAID_FUNC_RAID_ACTION 0x15
+#define LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH 0x16
+#define LEAPRAID_FUNC_SCSI_ENC_PROCESSOR 0x18
+#define LEAPRAID_FUNC_SMP_PASSTHROUGH 0x1A
+#define LEAPRAID_FUNC_SAS_IO_UNIT_CTRL 0x1B
+#define LEAPRAID_FUNC_SATA_PASSTHROUGH 0x1C
+#define LEAPRAID_FUNC_ADAPTER_UNIT_RESET 0x40
+#define LEAPRAID_FUNC_HANDSHAKE 0x42
+#define LEAPRAID_FUNC_LOGBUF_INIT 0x57
+
+/* adapter status values */
+#define LEAPRAID_ADAPTER_STATUS_MASK 0x7FFF
+#define LEAPRAID_ADAPTER_STATUS_SUCCESS 0x0000
+#define LEAPRAID_ADAPTER_STATUS_BUSY 0x0002
+#define LEAPRAID_ADAPTER_STATUS_INTERNAL_ERROR 0x0004
+#define LEAPRAID_ADAPTER_STATUS_INSUFFICIENT_RESOURCES 0x0006
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_ACTION 0x0020
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_TYPE 0x0021
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE 0x0022
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_DATA 0x0023
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_NO_DEFAULTS 0x0024
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_CANT_COMMIT 0x0025
+#define LEAPRAID_ADAPTER_STATUS_SCSI_RECOVERED_ERROR 0x0040
+#define LEAPRAID_ADAPTER_STATUS_SCSI_DEVICE_NOT_THERE 0x0043
+#define LEAPRAID_ADAPTER_STATUS_SCSI_DATA_OVERRUN 0x0044
+#define LEAPRAID_ADAPTER_STATUS_SCSI_DATA_UNDERRUN 0x0045
+#define LEAPRAID_ADAPTER_STATUS_SCSI_IO_DATA_ERROR 0x0046
+#define LEAPRAID_ADAPTER_STATUS_SCSI_PROTOCOL_ERROR 0x0047
+#define LEAPRAID_ADAPTER_STATUS_SCSI_TASK_TERMINATED 0x0048
+#define LEAPRAID_ADAPTER_STATUS_SCSI_RESIDUAL_MISMATCH 0x0049
+#define LEAPRAID_ADAPTER_STATUS_SCSI_TASK_MGMT_FAILED 0x004A
+#define LEAPRAID_ADAPTER_STATUS_SCSI_ADAPTER_TERMINATED 0x004B
+#define LEAPRAID_ADAPTER_STATUS_SCSI_EXT_TERMINATED 0x004C
+
+/* sge flags */
+#define LEAPRAID_SGE_FLG_LAST_ONE 0x80
+#define LEAPRAID_SGE_FLG_EOB 0x40
+#define LEAPRAID_SGE_FLG_EOL 0x01
+#define LEAPRAID_SGE_FLG_SHIFT 24
+#define LEAPRAID_SGE_FLG_SIMPLE_ONE 0x10
+#define LEAPRAID_SGE_FLG_SYSTEM_ADDR 0x00
+#define LEAPRAID_SGE_FLG_H2C 0x04
+#define LEAPRAID_SGE_FLG_32 0x00
+#define LEAPRAID_SGE_FLG_64 0x02
+
+#define LEAPRAID_IEEE_SGE_FLG_EOL 0x40
+#define LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE 0x00
+#define LEAPRAID_IEEE_SGE_FLG_CHAIN_ONE 0x80
+#define LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR 0x00
+
+#define LEAPRAID_SGE_OFFSET_SIZE 4
+
+/* page and ext page type */
+#define LEAPRAID_CFG_PT_IO_UNIT 0x00
+#define LEAPRAID_CFG_PT_ADAPTER 0x01
+#define LEAPRAID_CFG_PT_BIOS 0x02
+#define LEAPRAID_CFG_PT_RAID_VOLUME 0x08
+#define LEAPRAID_CFG_PT_RAID_PHYSDISK 0x0A
+#define LEAPRAID_CFG_PT_EXTENDED 0x0F
+#define LEAPRAID_CFG_EXTPT_SAS_IO_UNIT 0x10
+#define LEAPRAID_CFG_EXTPT_SAS_EXP 0x11
+#define LEAPRAID_CFG_EXTPT_SAS_DEV 0x12
+#define LEAPRAID_CFG_EXTPT_SAS_PHY 0x13
+#define LEAPRAID_CFG_EXTPT_ENC 0x15
+#define LEAPRAID_CFG_EXTPT_RAID_CONFIG 0x16
+
+/* config page address */
+#define LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP 0x00000000
+#define LEAPRAID_SAS_ENC_CFG_PGAD_HDL 0x10000000
+#define LEAPRAID_SAS_DEV_CFG_PGAD_HDL 0x20000000
+#define LEAPRAID_SAS_EXP_CFG_PGAD_HDL_PHY_NUM 0x10000000
+#define LEAPRAID_SAS_EXP_CFD_PGAD_HDL 0x20000000
+#define LEAPRAID_SAS_EXP_CFG_PGAD_PHYNUM_SHIFT 16
+#define LEAPRAID_RAID_VOL_CFG_PGAD_HDL 0x10000000
+#define LEAPRAID_SAS_PHY_CFG_PGAD_PHY_NUMBER 0x00000000
+#define LEAPRAID_PHYSDISK_CFG_PGAD_PHYSDISKNUM 0x10000000
+
+/* config page operations */
+#define LEAPRAID_CFG_ACT_PAGE_HEADER 0x00
+#define LEAPRAID_CFG_ACT_PAGE_READ_CUR 0x01
+#define LEAPRAID_CFG_ACT_PAGE_WRITE_CUR 0x02
+
+/* bios pages */
+#define LEAPRAID_CFG_PAGE_NUM_BIOS2 0x2
+#define LEAPRAID_CFG_PAGE_NUM_BIOS3 0x3
+
+/* sas device pages */
+#define LEAPRAID_CFG_PAGE_NUM_DEV0 0x0
+
+/* sas device page 0 flags */
+#define LEAPRAID_SAS_DEV_P0_FLG_FP_CAP 0x2000
+#define LEAPRAID_SAS_DEV_P0_FLG_SATA_SMART 0x0040
+#define LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID 0x0002
+#define LEAPRAID_SAS_DEV_P0_FLG_DEV_PRESENT 0x0001
+
+/* sas IO unit pages */
+#define LEAPRAID_CFG_PAGE_NUM_IOUNIT0 0x0
+#define LEAPRAID_CFG_PAGE_NUM_IOUNIT1 0x1
+
+/* sas expander pages */
+#define LEAPRAID_CFG_PAGE_NUM_EXP0 0x0
+#define LEAPRAID_CFG_PAGE_NUM_EXP1 0x1
+
+/* sas enclosure page */
+#define LEAPRAID_CFG_PAGE_NUM_ENC0 0x0
+
+/* sas phy page */
+#define LEAPRAID_CFG_PAGE_NUM_PHY0 0x0
+
+/* raid volume pages */
+#define LEAPRAID_CFG_PAGE_NUM_VOL0 0x0
+#define LEAPRAID_CFG_PAGE_NUM_VOL1 0x1
+
+/* physical disk page */
+#define LEAPRAID_CFG_PAGE_NUM_PD0 0x0
+
+/* adapter page */
+#define LEAPRAID_CFG_PAGE_NUM_ADAPTER1 0x1
+
+#define LEAPRAID_CFG_UNIT_SIZE 4
+
+/* raid volume type and state */
+#define LEAPRAID_VOL_STATE_MISSING 0x00
+#define LEAPRAID_VOL_STATE_FAILED 0x01
+#define LEAPRAID_VOL_STATE_INITIALIZING 0x02
+#define LEAPRAID_VOL_STATE_ONLINE 0x03
+#define LEAPRAID_VOL_STATE_DEGRADED 0x04
+#define LEAPRAID_VOL_STATE_OPTIMAL 0x05
+#define LEAPRAID_VOL_TYPE_RAID0 0x00
+#define LEAPRAID_VOL_TYPE_RAID1E 0x01
+#define LEAPRAID_VOL_TYPE_RAID1 0x02
+#define LEAPRAID_VOL_TYPE_RAID10 0x05
+#define LEAPRAID_VOL_TYPE_UNKNOWN 0xFF
+
+/* raid volume element flags */
+#define LEAPRAID_RAIDCFG_P0_EFLG_MASK_ELEMENT_TYPE 0x000F
+#define LEAPRAID_RAIDCFG_P0_EFLG_VOL_PHYS_DISK_ELEMENT 0x0001
+#define LEAPRAID_RAIDCFG_P0_EFLG_HOT_SPARE_ELEMENT 0x0002
+#define LEAPRAID_RAIDCFG_P0_EFLG_OCE_ELEMENT 0x0003
+
+/* raid action */
+#define LEAPRAID_RAID_ACT_SYSTEM_SHUTDOWN_INITIATED 0x20
+#define LEAPRAID_RAID_ACT_PHYSDISK_HIDDEN 0x24
+
+/* sas negotiated link rates */
+#define LEAPRAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL 0x0F
+#define LEAPRAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE 0x00
+#define LEAPRAID_SAS_NEG_LINK_RATE_PHY_DISABLED 0x01
+#define LEAPRAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED 0x02
+#define LEAPRAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE 0x03
+#define LEAPRAID_SAS_NEG_LINK_RATE_PORT_SELECTOR 0x04
+#define LEAPRAID_SAS_NEG_LINK_RATE_SMP_RESETTING 0x05
+
+#define LEAPRAID_SAS_NEG_LINK_RATE_1_5 0x08
+#define LEAPRAID_SAS_NEG_LINK_RATE_3_0 0x09
+#define LEAPRAID_SAS_NEG_LINK_RATE_6_0 0x0A
+#define LEAPRAID_SAS_NEG_LINK_RATE_12_0 0x0B
+
+#define LEAPRAID_SAS_PRATE_MIN_RATE_MASK 0x0F
+#define LEAPRAID_SAS_HWRATE_MIN_RATE_MASK 0x0F
+
+/* scsi IO control bits */
+#define LEAPRAID_SCSIIO_CTRL_ADDCDBLEN_SHIFT 26
+#define LEAPRAID_SCSIIO_CTRL_NODATATRANSFER 0x00000000
+#define LEAPRAID_SCSIIO_CTRL_WRITE 0x01000000
+#define LEAPRAID_SCSIIO_CTRL_READ 0x02000000
+#define LEAPRAID_SCSIIO_CTRL_BIDIRECTIONAL 0x03000000
+#define LEAPRAID_SCSIIO_CTRL_SIMPLEQ 0x00000000
+#define LEAPRAID_SCSIIO_CTRL_ORDEREDQ 0x00000200
+#define LEAPRAID_SCSIIO_CTRL_CMDPRI 0x00000800
+
+/* scsi state and status */
+#define LEAPRAID_SCSI_STATUS_BUSY 0x08
+#define LEAPRAID_SCSI_STATUS_RESERVATION_CONFLICT 0x18
+#define LEAPRAID_SCSI_STATUS_TASK_SET_FULL 0x28
+
+#define LEAPRAID_SCSI_STATE_RESPONSE_INFO_VALID 0x10
+#define LEAPRAID_SCSI_STATE_TERMINATED 0x08
+#define LEAPRAID_SCSI_STATE_NO_SCSI_STATUS 0x04
+#define LEAPRAID_SCSI_STATE_AUTOSENSE_FAILED 0x02
+#define LEAPRAID_SCSI_STATE_AUTOSENSE_VALID 0x01
+
+/* scsi task management defines */
+#define LEAPRAID_TM_TASKTYPE_ABORT_TASK 0x01
+#define LEAPRAID_TM_TASKTYPE_ABRT_TASK_SET 0x02
+#define LEAPRAID_TM_TASKTYPE_TARGET_RESET 0x03
+#define LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET 0x05
+#define LEAPRAID_TM_TASKTYPE_CLEAR_TASK_SET 0x06
+#define LEAPRAID_TM_TASKTYPE_QUERY_TASK 0x07
+#define LEAPRAID_TM_TASKTYPE_CLEAR_ACA 0x08
+#define LEAPRAID_TM_TASKTYPE_QUERY_TASK_SET 0x09
+#define LEAPRAID_TM_TASKTYPE_QUERY_ASYNC_EVENT 0x0A
+
+#define LEAPRAID_TM_MSGFLAGS_LINK_RESET 0x00
+#define LEAPRAID_TM_RSP_INVALID_FRAME 0x02
+#define LEAPRAID_TM_RSP_TM_SUCCEEDED 0x08
+#define LEAPRAID_TM_RSP_IO_QUEUED_ON_ADAPTER 0x80
+
+/* scsi sep request defines */
+#define LEAPRAID_SEP_REQ_ACT_WRITE_STATUS 0x00
+#define LEAPRAID_SEP_REQ_FLG_DEVHDL_ADDRESS 0x00
+#define LEAPRAID_SEP_REQ_FLG_ENCLOSURE_SLOT_ADDRESS 0x01
+#define LEAPRAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT 0x00000040
+
+/* the capabilities of the adapter */
+#define LEAPRAID_ADAPTER_FEATURES_CAP_ATOMIC_REQ 0x00080000
+#define LEAPRAID_ADAPTER_FEATURES_CAP_RDPQ_ARRAY_CAPABLE 0x00040000
+#define LEAPRAID_ADAPTER_FEATURES_CAP_EVENT_REPLAY 0x00002000
+#define LEAPRAID_ADAPTER_FEATURES_CAP_INTEGRATED_RAID 0x00001000
+
+/* event code definitions for the firmware */
+#define LEAPRAID_EVT_SAS_DEV_STATUS_CHANGE 0x000F
+#define LEAPRAID_EVT_SAS_DISCOVERY 0x0016
+#define LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST 0x001C
+#define LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE 0x001D
+#define LEAPRAID_EVT_IR_CHANGE 0x0020
+#define LEAPRAID_EVT_TURN_ON_PFA_LED 0xFFFC
+#define LEAPRAID_EVT_SCAN_DEV_DONE 0xFFFD
+#define LEAPRAID_EVT_REMOVE_DEAD_DEV 0xFFFF
+#define LEAPRAID_MAX_EVENT_NUM 128
+
+#define LEAPRAID_EVT_SAS_DEV_STAT_RC_INTERNAL_DEV_RESET 0x08
+#define LEAPRAID_EVT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET 0x0E
+
+/* raid configuration change event */
+#define LEAPRAID_EVT_IR_RC_VOLUME_ADD 0x01
+#define LEAPRAID_EVT_IR_RC_VOLUME_DELETE 0x02
+#define LEAPRAID_EVT_IR_RC_PD_HIDDEN_TO_ADD 0x03
+#define LEAPRAID_EVT_IR_RC_PD_UNHIDDEN_TO_DELETE 0x04
+#define LEAPRAID_EVT_IR_RC_PD_CREATED_TO_HIDE 0x05
+#define LEAPRAID_EVT_IR_RC_PD_DELETED_TO_EXPOSE 0x06
+
+/* sas topology change event */
+#define LEAPRAID_EVT_SAS_TOPO_ES_NO_EXPANDER 0x00
+#define LEAPRAID_EVT_SAS_TOPO_ES_ADDED 0x01
+#define LEAPRAID_EVT_SAS_TOPO_ES_NOT_RESPONDING 0x02
+#define LEAPRAID_EVT_SAS_TOPO_ES_RESPONDING 0x03
+
+#define LEAPRAID_EVT_SAS_TOPO_RC_MASK 0x0F
+#define LEAPRAID_EVT_SAS_TOPO_RC_CLEAR_MASK 0xF0
+#define LEAPRAID_EVT_SAS_TOPO_RC_TARG_ADDED 0x01
+#define LEAPRAID_EVT_SAS_TOPO_RC_TARG_NOT_RESPONDING 0x02
+#define LEAPRAID_EVT_SAS_TOPO_RC_PHY_CHANGED 0x03
+
+/* sas discovery event defines */
+#define LEAPRAID_EVT_SAS_DISC_RC_STARTED 0x01
+#define LEAPRAID_EVT_SAS_DISC_RC_COMPLETED 0x02
+
+/* enclosure device status change event */
+#define LEAPRAID_EVT_SAS_ENCL_RC_ADDED 0x01
+#define LEAPRAID_EVT_SAS_ENCL_RC_NOT_RESPONDING 0x02
+
+/* device type and identifiers */
+#define LEAPRAID_DEVTYP_SEP 0x00004000
+#define LEAPRAID_DEVTYP_SSP_TGT 0x00000400
+#define LEAPRAID_DEVTYP_STP_TGT 0x00000200
+#define LEAPRAID_DEVTYP_SMP_TGT 0x00000100
+#define LEAPRAID_DEVTYP_SATA_DEV 0x00000080
+#define LEAPRAID_DEVTYP_SSP_INIT 0x00000040
+#define LEAPRAID_DEVTYP_STP_INIT 0x00000020
+#define LEAPRAID_DEVTYP_SMP_INIT 0x00000010
+#define LEAPRAID_DEVTYP_SATA_HOST 0x00000008
+
+#define LEAPRAID_DEVTYP_MASK_DEV_TYPE 0x00000007
+#define LEAPRAID_DEVTYP_NO_DEV 0x00000000
+#define LEAPRAID_DEVTYP_END_DEV 0x00000001
+#define LEAPRAID_DEVTYP_EDGE_EXPANDER 0x00000002
+#define LEAPRAID_DEVTYP_FANOUT_EXPANDER 0x00000003
+
+/* sas control operation */
+#define LEAPRAID_SAS_OP_PHY_LINK_RESET 0x06
+#define LEAPRAID_SAS_OP_PHY_HARD_RESET 0x07
+#define LEAPRAID_SAS_OP_SET_PARAMETER 0x0F
+
+/* boot device defines */
+#define LEAPRAID_BOOTDEV_FORM_MASK 0x0F
+#define LEAPRAID_BOOTDEV_FORM_NONE 0x00
+#define LEAPRAID_BOOTDEV_FORM_SAS_WWID 0x05
+#define LEAPRAID_BOOTDEV_FORM_ENC_SLOT 0x06
+#define LEAPRAID_BOOTDEV_FORM_DEV_NAME 0x07
+
+/**
+ * struct leapraid_reg_base - Register layout of the LeapRAID controller
+ *
+ * @db: Doorbell register used to signal commands or status to firmware
+ * @ws: Write sequence register for synchronizing doorbell operations
+ * @host_diag: Diagnostic register used for status or debug reporting
+ * @r1: Reserved
+ * @host_int_status: Interrupt status register reporting active interrupts
+ * @host_int_mask: Interrupt mask register enabling or disabling sources
+ * @r2: Reserved
+ * @rep_msg_host_idx: Reply message index for the next available reply slot
+ * @r3: Reserved
+ * @debug_log: DebugLog registers for firmware debug and diagnostic output
+ * @r4: Reserved
+ * @atomic_req_desc_post: Atomic register for single descriptor posting
+ * @adapter_log_buf_pos: Adapter log buffer write position
+ * @host_log_buf_pos: Host log buffer write position
+ * @r5: Reserved
+ * @rep_post_reg_idx: Array of reply post index registers, one per queue.
+ * The number of entries is defined by
+ * REP_POST_HOST_IDX_REG_CNT.
+ */
+struct leapraid_reg_base {
+ __le32 db;
+ __le32 ws;
+ __le32 host_diag;
+ __le32 r1[9];
+ __le32 host_int_status;
+ __le32 host_int_mask;
+ __le32 r2[4];
+ __le32 rep_msg_host_idx;
+ __le32 r3[13];
+ __le32 debug_log[LEAPRAID_DEBUGLOG_SZ_MAX];
+ __le32 r4[2];
+ __le32 atomic_req_desc_post;
+ __le32 adapter_log_buf_pos;
+ __le32 host_log_buf_pos;
+ __le32 r5[142];
+ struct leapraid_rep_post_reg_idx {
+ __le32 idx;
+ __le32 r1;
+ __le32 r2;
+ __le32 r3;
+ } rep_post_reg_idx[REP_POST_HOST_IDX_REG_CNT];
+} __packed;
+
+/**
+ * struct leapraid_atomic_req_desc - Atomic request descriptor
+ *
+ * @flg: Descriptor flag indicating the type of request (e.g. SCSI I/O)
+ * @msix_idx: MSI-X vector index used for interrupt routing
+ * @taskid: Unique task identifier associated with this request
+ */
+struct leapraid_atomic_req_desc {
+ u8 flg;
+ u8 msix_idx;
+ __le16 taskid;
+};
+
+/**
+ * union leapraid_rep_desc_union - Unified reply descriptor format
+ *
+ * @dflt_rep: Default reply descriptor containing basic completion info
+ * @dflt_rep.rep_flg: Reply flag indicating reply type or status
+ * @dflt_rep.msix_idx: MSI-X index for interrupt routing
+ * @dflt_rep.taskid: Task identifier matching the submitted request
+ * @r1: Reserved
+ *
+ * @addr_rep: Address reply descriptor used when firmware returns a
+ * memory address associated with the reply
+ * @addr_rep.rep_flg: Reply flag indicating reply type or status
+ * @addr_rep.msix_idx: MSI-X index for interrupt routing
+ * @addr_rep.taskid: Task identifier matching the submitted request
+ * @addr_rep.rep_frame_addr: Physical address of the reply frame
+ *
+ * @words: Raw 64-bit representation of the reply descriptor
+ * @u: Alternative access using 32-bit low/high words
+ * @u.low: Lower 32 bits of the descriptor
+ * @u.high: Upper 32 bits of the descriptor
+ */
+union leapraid_rep_desc_union {
+ struct leapraid_rep_desc {
+ u8 rep_flg;
+ u8 msix_idx;
+ __le16 taskid;
+ u8 r1[4];
+ } dflt_rep;
+ struct leapraid_add_rep_desc {
+ u8 rep_flg;
+ u8 msix_idx;
+ __le16 taskid;
+ __le32 rep_frame_addr;
+ } addr_rep;
+ __le64 words;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_req - Generic request header
+ *
+ * @func_dep1: Function-dependent parameter (low 16 bits)
+ * @r1: Reserved
+ * @func: Function code identifying the command type
+ * @r2: Reserved
+ */
+struct leapraid_req {
+ __le16 func_dep1;
+ u8 r1;
+ u8 func;
+ u8 r2[8];
+};
+
+/**
+ * struct leapraid_rep - Generic reply header
+ *
+ * @r1: Reserved
+ * @msg_len: Length of the reply message in bytes
+ * @function: Function code corresponding to the request
+ * @r2: Reserved
+ * @adapter_status: Status code reported by the adapter
+ * @r3: Reserved
+ */
+struct leapraid_rep {
+ u8 r1[2];
+ u8 msg_len;
+ u8 function;
+ u8 r2[10];
+ __le16 adapter_status;
+ u8 r3[4];
+};
+
+/**
+ * struct leapraid_sge_simple32 - 32-bit simple scatter-gather entry
+ *
+ * @flg_and_len: Combined field for flags and segment length
+ * @addr: 32-bit physical address of the data buffer
+ */
+struct leapraid_sge_simple32 {
+ __le32 flg_and_len;
+ __le32 addr;
+};
+
+/**
+ * struct leapraid_sge_simple64 - 64-bit simple scatter-gather entry
+ *
+ * @flg_and_len: Combined field for flags and segment length
+ * @addr: 64-bit physical address of the data buffer
+ */
+struct leapraid_sge_simple64 {
+ __le32 flg_and_len;
+ __le64 addr;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_sge_simple_union - Unified 32/64-bit SGE representation
+ *
+ * @flg_and_len: Combined field for flags and segment length
+ * @u.addr32: 32-bit address field
+ * @u.addr64: 64-bit address field
+ */
+struct leapraid_sge_simple_union {
+ __le32 flg_and_len;
+ union {
+ __le32 addr32;
+ __le64 addr64;
+ } u;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_sge_chain_union - Chained scatter-gather entry
+ *
+ * @len: Length of the chain descriptor
+ * @next_chain_offset: Offset to the next SGE chain
+ * @flg: Flags indicating chain or termination properties
+ * @u.addr32: 32-bit physical address
+ * @u.addr64: 64-bit physical address
+ */
+struct leapraid_sge_chain_union {
+ __le16 len;
+ u8 next_chain_offset;
+ u8 flg;
+ union {
+ __le32 addr32;
+ __le64 addr64;
+ } u;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_ieee_sge_simple32 - IEEE 32-bit simple SGE format
+ *
+ * @addr: 32-bit physical address of the data buffer
+ * @flg_and_len: Combined field for flags and data length
+ */
+struct leapraid_ieee_sge_simple32 {
+ __le32 addr;
+ __le32 flg_and_len;
+};
+
+/**
+ * struct leapraid_ieee_sge_simple64 - IEEE 64-bit simple SGE format
+ *
+ * @addr: 64-bit physical address of the data buffer
+ * @len: Length of the data segment
+ * @r1: Reserved
+ * @flg: Flags indicating transfer properties
+ */
+struct leapraid_ieee_sge_simple64 {
+ __le64 addr;
+ __le32 len;
+ u8 r1[3];
+ u8 flg;
+} __packed __aligned(4);
+
+/**
+ * union leapraid_ieee_sge_simple_union - Unified IEEE SGE format
+ *
+ * @simple32: IEEE 32-bit simple SGE entry
+ * @simple64: IEEE 64-bit simple SGE entry
+ */
+union leapraid_ieee_sge_simple_union {
+ struct leapraid_ieee_sge_simple32 simple32;
+ struct leapraid_ieee_sge_simple64 simple64;
+};
+
+/**
+ * union leapraid_ieee_sge_chain_union - Unified IEEE SGE chain format
+ *
+ * @chain32: IEEE 32-bit chain SGE entry
+ * @chain64: IEEE 64-bit chain SGE entry
+ */
+union leapraid_ieee_sge_chain_union {
+ struct leapraid_ieee_sge_simple32 chain32;
+ struct leapraid_ieee_sge_simple64 chain64;
+};
+
+/**
+ * struct leapraid_chain64_ieee_sg - 64-bit IEEE chain SGE descriptor
+ *
+ * @addr: Physical address of the next chain segment
+ * @len: Length of the current SGE
+ * @r1: Reserved
+ * @next_chain_offset: Offset to the next chain element
+ * @flg: Flags that describe SGE attributes
+ */
+struct leapraid_chain64_ieee_sg {
+ __le64 addr;
+ __le32 len;
+ u8 r1[2];
+ u8 next_chain_offset;
+ u8 flg;
+} __packed __aligned(4);
+
+/**
+ * union leapraid_ieee_sge_io_union - IEEE-style SGE union for I/O
+ *
+ * @ieee_simple: Simple IEEE SGE descriptor
+ * @ieee_chain: IEEE chain SGE descriptor
+ */
+union leapraid_ieee_sge_io_union {
+ struct leapraid_ieee_sge_simple64 ieee_simple;
+ struct leapraid_chain64_ieee_sg ieee_chain;
+};
+
+/**
+ * union leapraid_simple_sge_union - Union of simple SGE descriptors
+ *
+ * @leapio_simple: LeapIO-style simple SGE
+ * @ieee_simple: IEEE-style simple SGE
+ */
+union leapraid_simple_sge_union {
+ struct leapraid_sge_simple_union leapio_simple;
+ union leapraid_ieee_sge_simple_union ieee_simple;
+};
+
+/**
+ * union leapraid_sge_io_union - Combined SGE union for all I/O types
+ *
+ * @leapio_simple: LeapIO simple SGE format
+ * @leapio_chain: LeapIO chain SGE format
+ * @ieee_simple: IEEE simple SGE format
+ * @ieee_chain: IEEE chain SGE format
+ */
+union leapraid_sge_io_union {
+ struct leapraid_sge_simple_union leapio_simple;
+ struct leapraid_sge_chain_union leapio_chain;
+ union leapraid_ieee_sge_simple_union ieee_simple;
+ union leapraid_ieee_sge_chain_union ieee_chain;
+};
+
+/**
+ * struct leapraid_cfg_pg_header - Standard configuration page header
+ *
+ * @r1: Reserved
+ * @page_len: Length of the page in 4-byte units
+ * @page_num: Page number
+ * @page_type: Page type
+ */
+struct leapraid_cfg_pg_header {
+ u8 r1;
+ u8 page_len;
+ u8 page_num;
+ u8 page_type;
+};
+
+/**
+ * struct leapraid_cfg_ext_pg_header - Extended configuration page header
+ *
+ * @r1: Reserved
+ * @r2: Reserved
+ * @page_num: Page number
+ * @page_type: Page type
+ * @ext_page_len: Extended page length
+ * @ext_page_type: Extended page type
+ * @r3: Reserved
+ */
+struct leapraid_cfg_ext_pg_header {
+ u8 r1;
+ u8 r2;
+ u8 page_num;
+ u8 page_type;
+ __le16 ext_page_len;
+ u8 ext_page_type;
+ u8 r3;
+};
+
+/**
+ * struct leapraid_cfg_req - Configuration request message
+ *
+ * @action: Requested action type
+ * @sgl_flag: SGL flag field
+ * @chain_offset: Offset to next chain SGE
+ * @func: Function code
+ * @ext_page_len: Extended page length
+ * @ext_page_type: Extended page type
+ * @msg_flag: Message flags
+ * @r1: Reserved
+ * @header: Configuration page header
+ * @page_addr: Address of the page buffer
+ * @page_buf_sge: SGE describing the page buffer
+ */
+struct leapraid_cfg_req {
+ u8 action;
+ u8 sgl_flag;
+ u8 chain_offset;
+ u8 func;
+ __le16 ext_page_len;
+ u8 ext_page_type;
+ u8 msg_flag;
+ u8 r1[12];
+ struct leapraid_cfg_pg_header header;
+ __le32 page_addr;
+ union leapraid_sge_io_union page_buf_sge;
+};
+
+/**
+ * struct leapraid_cfg_rep - Configuration reply message
+ *
+ * @action: Action type from the request
+ * @r1: Reserved
+ * @msg_len: Message length in bytes
+ * @func: Function code
+ * @ext_page_len: Extended page length
+ * @ext_page_type: Extended page type
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Adapter status code
+ * @r3: Reserved
+ * @header: Configuration page header
+ */
+struct leapraid_cfg_rep {
+ u8 action;
+ u8 r1;
+ u8 msg_len;
+ u8 func;
+ __le16 ext_page_len;
+ u8 ext_page_type;
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+ u8 r3[4];
+ struct leapraid_cfg_pg_header header;
+};
+
+/**
+ * struct leapraid_boot_dev_format_sas_wwid - Boot device identified by wwid
+ *
+ * @sas_addr: SAS address of the device
+ * @lun: Logical unit number
+ * @r1: Reserved
+ */
+struct leapraid_boot_dev_format_sas_wwid {
+ __le64 sas_addr;
+ u8 lun[8];
+ u8 r1[8];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_boot_dev_format_enc_slot - identified by enclosure
+ *
+ * @enc_lid: Enclosure logical ID
+ * @r1: Reserved
+ * @slot_num: Slot number in the enclosure
+ * @r2: Reserved
+ */
+struct leapraid_boot_dev_format_enc_slot {
+ __le64 enc_lid;
+ u8 r1[8];
+ __le16 slot_num;
+ u8 r2[6];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_boot_dev_format_dev_name - Boot device by device name
+ *
+ * @dev_name: Device name identifier
+ * @lun: Logical unit number
+ * @r1: Reserved
+ */
+struct leapraid_boot_dev_format_dev_name {
+ __le64 dev_name;
+ u8 lun[8];
+ u8 r1[8];
+} __packed __aligned(4);
+
+/**
+ * union leapraid_boot_dev_format - Boot device format union
+ *
+ * @sas_wwid: Format using SAS WWID and LUN
+ * @enc_slot: Format using enclosure slot and ID
+ * @dev_name: Format using device name and LUN
+ */
+union leapraid_boot_dev_format {
+ struct leapraid_boot_dev_format_sas_wwid sas_wwid;
+ struct leapraid_boot_dev_format_enc_slot enc_slot;
+ struct leapraid_boot_dev_format_dev_name dev_name;
+};
+
+/**
+ * struct leapraid_bios_page2 - BIOS configuration page 2
+ *
+ * @header: Configuration page header
+ * @r1: Reserved
+ * @requested_boot_dev_form: Format type of the requested boot device
+ * @r2: Reserved
+ * @requested_boot_dev: Boot device requested by BIOS or user
+ * @requested_alt_boot_dev_form: Format of the alternate boot device
+ * @r3: Reserved
+ * @requested_alt_boot_dev: Alternate boot device requested
+ * @current_boot_dev_form: Format type of the active boot device
+ * @r4: Reserved
+ * @current_boot_dev: Currently active boot device in use
+ */
+struct leapraid_bios_page2 {
+ struct leapraid_cfg_pg_header header;
+ u8 r1[24];
+ u8 requested_boot_dev_form;
+ u8 r2[3];
+ union leapraid_boot_dev_format requested_boot_dev;
+ u8 requested_alt_boot_dev_form;
+ u8 r3[3];
+ union leapraid_boot_dev_format requested_alt_boot_dev;
+ u8 current_boot_dev_form;
+ u8 r4[3];
+ union leapraid_boot_dev_format current_boot_dev;
+};
+
+/**
+ * struct leapraid_bios_page3 - BIOS configuration page 3
+ *
+ * @header: Configuration page header
+ * @r1: Reserved
+ * @bios_version: BIOS firmware version number
+ * @r2: Reserved
+ */
+struct leapraid_bios_page3 {
+ struct leapraid_cfg_pg_header header;
+ u8 r1[4];
+ __le32 bios_version;
+ u8 r2[84];
+};
+
+/**
+ * struct leapraid_raidvol0_phys_disk - Physical disk in RAID volume
+ *
+ * @r1: Reserved
+ * @phys_disk_num: Physical disk number within the RAID volume
+ * @r2: Reserved
+ */
+struct leapraid_raidvol0_phys_disk {
+ u8 r1[2];
+ u8 phys_disk_num;
+ u8 r2;
+};
+
+/**
+ * struct leapraid_raidvol_p0 - RAID volume configuration page 0
+ *
+ * @header: Configuration page header
+ * @dev_hdl: Device handle for the RAID volume
+ * @volume_state: State of the RAID volume
+ * @volume_type: RAID type
+ * @r1: Reserved
+ * @num_phys_disks: Number of physical disks in the volume
+ * @r2: Reserved
+ * @phys_disk: Array of physical disks in this volume
+ */
+struct leapraid_raidvol_p0 {
+ struct leapraid_cfg_pg_header header;
+ __le16 dev_hdl;
+ u8 volume_state;
+ u8 volume_type;
+ u8 r1[28];
+ u8 num_phys_disks;
+ u8 r2[3];
+ struct leapraid_raidvol0_phys_disk phys_disk[];
+};
+
+/**
+ * struct leapraid_raidvol_p1 - RAID volume configuration page 1
+ *
+ * @header: Configuration page header
+ * @dev_hdl: Device handle of the RAID volume
+ * @r1: Reserved
+ * @wwid: World-wide identifier for the volume
+ * @r2: Reserved
+ */
+struct leapraid_raidvol_p1 {
+ struct leapraid_cfg_pg_header header;
+ __le16 dev_hdl;
+ u8 r1[42];
+ __le64 wwid;
+ u8 r2[8];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_raidpd_p0 - Physical disk configuration page 0
+ *
+ * @header: Configuration page header
+ * @dev_hdl: Device handle of the physical disk
+ * @r1: Reserved
+ * @phys_disk_num: Physical disk number
+ * @r2: Reserved
+ */
+struct leapraid_raidpd_p0 {
+ struct leapraid_cfg_pg_header header;
+ __le16 dev_hdl;
+ u8 r1;
+ u8 phys_disk_num;
+ u8 r2[112];
+};
+
+/**
+ * struct leapraid_sas_io_unit0_phy_info - PHY info for SAS I/O unit
+ *
+ * @port: Port number the PHY belongs to
+ * @port_flg: Flags describing port status
+ * @phy_flg: Flags describing PHY status
+ * @neg_link_rate: Negotiated link rate of the PHY
+ * @controller_phy_dev_info: Controller PHY device info
+ * @attached_dev_hdl: Handle of attached device
+ * @controller_dev_hdl: Handle of the controller device
+ * @r1: Reserved
+ */
+struct leapraid_sas_io_unit0_phy_info {
+ u8 port;
+ u8 port_flg;
+ u8 phy_flg;
+ u8 neg_link_rate;
+ __le32 controller_phy_dev_info;
+ __le16 attached_dev_hdl;
+ __le16 controller_dev_hdl;
+ u8 r1[8];
+};
+
+/**
+ * struct leapraid_sas_io_unit_p0 - SAS I/O unit configuration page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @phy_num: Number of PHYs in this unit
+ * @r2: Reserved
+ * @phy_info: Array of PHY information
+ */
+struct leapraid_sas_io_unit_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[4];
+ u8 phy_num;
+ u8 r2[3];
+ struct leapraid_sas_io_unit0_phy_info phy_info[];
+};
+
+/**
+ * struct leapraid_sas_io_unit1_phy_info - Placeholder for SAS unit page 1 PHY
+ *
+ * @r1: Reserved
+ */
+struct leapraid_sas_io_unit1_phy_info {
+ u8 r1[12];
+};
+
+/**
+ * struct leapraid_sas_io_unit_page1 - SAS I/O unit configuration page 1
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @narrowport_max_queue_depth: Maximum queue depth for narrow ports
+ * @r2: Reserved
+ * @wideport_max_queue_depth: Maximum queue depth for wide ports
+ * @r3: Reserved
+ * @sata_max_queue_depth: Maximum SATA queue depth
+ * @r4: Reserved
+ * @phy_info: Array of PHY info structures
+ */
+struct leapraid_sas_io_unit_page1 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[2];
+ __le16 narrowport_max_queue_depth;
+ u8 r2[2];
+ __le16 wideport_max_queue_depth;
+ u8 r3;
+ u8 sata_max_queue_depth;
+ u8 r4[2];
+ struct leapraid_sas_io_unit1_phy_info phy_info[];
+};
+
+/**
+ * struct leapraid_exp_p0 - SAS expander page 0
+ *
+ * @header: Extended page header
+ * @physical_port: Physical port number
+ * @r1: Reserved
+ * @enc_hdl: Enclosure handle
+ * @sas_address: SAS address of the expander
+ * @r2: Reserved
+ * @dev_hdl: Device handle of this expander
+ * @parent_dev_hdl: Device handle of parent expander
+ * @r3: Reserved
+ * @phy_num: Number of PHYs
+ * @r4: Reserved
+ */
+struct leapraid_exp_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 physical_port;
+ u8 r1;
+ __le16 enc_hdl;
+ __le64 sas_address;
+ u8 r2[4];
+ __le16 dev_hdl;
+ __le16 parent_dev_hdl;
+ u8 r3[4];
+ u8 phy_num;
+ u8 r4[27];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_exp_p1 - SAS expander page 1
+ *
+ * @header: Extended page header
+ * @r1: Reserved
+ * @p_link_rate: PHY link rate
+ * @hw_link_rate: Hardware supported link rate
+ * @attached_dev_hdl: Attached device handle
+ * @r2: Reserved
+ * @neg_link_rate: Negotiated link rate
+ * @r3: Reserved
+ */
+struct leapraid_exp_p1 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[8];
+ u8 p_link_rate;
+ u8 hw_link_rate;
+ __le16 attached_dev_hdl;
+ u8 r2[11];
+ u8 neg_link_rate;
+ u8 r3[12];
+};
+
+/**
+ * struct leapraid_sas_dev_p0 - SAS device page 0
+ *
+ * @header: Extended configuration page header
+ * @slot: Slot number
+ * @enc_hdl: Enclosure handle
+ * @sas_address: SAS address
+ * @parent_dev_hdl: Parent device handle
+ * @phy_num: Number of PHYs
+ * @r1: Reserved
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ * @dev_info: Device information
+ * @flg: Flags
+ * @physical_port: Physical port number
+ * @max_port_connections: Maximum port connections
+ * @dev_name: Device name
+ * @port_groups: Number of port groups
+ * @r3: Reserved
+ * @enc_level: Enclosure level
+ * @connector_name: Connector identifier
+ * @r4: Reserved
+ */
+struct leapraid_sas_dev_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ __le16 slot;
+ __le16 enc_hdl;
+ __le64 sas_address;
+ __le16 parent_dev_hdl;
+ u8 phy_num;
+ u8 r1;
+ __le16 dev_hdl;
+ u8 r2[2];
+ __le32 dev_info;
+ __le16 flg;
+ u8 physical_port;
+ u8 max_port_connections;
+ __le64 dev_name;
+ u8 port_groups;
+ u8 r3[2];
+ u8 enc_level;
+ u8 connector_name[4];
+ u8 r4[4];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_sas_phy_p0 - SAS PHY configuration page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @attached_dev_hdl: Handle of attached device
+ * @r2: Reserved
+ * @p_link_rate: PHY link rate
+ * @hw_link_rate: Hardware supported link rate
+ * @r3: Reserved
+ * @phy_info: PHY information
+ * @neg_link_rate: Negotiated link rate
+ * @r4: Reserved
+ */
+struct leapraid_sas_phy_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[4];
+ __le16 attached_dev_hdl;
+ u8 r2[6];
+ u8 p_link_rate;
+ u8 hw_link_rate;
+ u8 r3[2];
+ __le32 phy_info;
+ u8 neg_link_rate;
+ u8 r4[3];
+};
+
+/**
+ * struct leapraid_enc_p0 - SAS enclosure page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @enc_lid: Enclosure logical ID
+ * @r2: Reserved
+ * @enc_hdl: Enclosure handle
+ * @r3: Reserved
+ */
+struct leapraid_enc_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[4];
+ __le64 enc_lid;
+ u8 r2[2];
+ __le16 enc_hdl;
+ u8 r3[15];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_raid_cfg_p0_element - RAID configuration element
+ *
+ * @element_flg: Element flags
+ * @vol_dev_hdl: Volume device handle
+ * @r1: Reserved
+ * @phys_disk_dev_hdl: Physical disk device handle
+ */
+struct leapraid_raid_cfg_p0_element {
+ __le16 element_flg;
+ __le16 vol_dev_hdl;
+ u8 r1[2];
+ __le16 phys_disk_dev_hdl;
+};
+
+/**
+ * struct leapraid_raid_cfg_p0 - RAID configuration page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @cfg_num: Configuration number
+ * @r2: Reserved
+ * @elements_num: Number of RAID elements
+ * @r3: Reserved
+ * @cfg_element: Array of RAID elements
+ */
+struct leapraid_raid_cfg_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[3];
+ u8 cfg_num;
+ u8 r2[32];
+ u8 elements_num;
+ u8 r3[3];
+ struct leapraid_raid_cfg_p0_element cfg_element[];
+};
+
+/**
+ * union leapraid_mpi_scsi_io_cdb_union - SCSI I/O CDB or simple SGE
+ *
+ * @cdb32: 32-byte SCSI command descriptor block
+ * @sge: Simple SGE format
+ */
+union leapraid_mpi_scsi_io_cdb_union {
+ u8 cdb32[32];
+ struct leapraid_sge_simple_union sge;
+};
+
+/**
+ * struct leapraid_mpi_scsiio_req - MPI SCSI I/O request
+ *
+ * @dev_hdl: Device handle for the target
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flg: Message flags
+ * @r2: Reserved
+ * @sense_buffer_low_add: Lower 32-bit address of sense buffer
+ * @dma_flag: DMA flags
+ * @r3: Reserved
+ * @sense_buffer_len: Sense buffer length
+ * @r4: Reserved
+ * @sgl_offset0..3: SGL offsets
+ * @skip_count: Bytes to skip before transfer
+ * @data_len: Length of data transfer
+ * @bi_dir_data_len: Bi-directional transfer length
+ * @io_flg: I/O flags
+ * @eedp_flag: EEDP flags
+ * @eedp_block_size: EEDP block size
+ * @r5: Reserved
+ * @secondary_ref_tag: Secondary reference tag
+ * @secondary_app_tag: Secondary application tag
+ * @app_tag_trans_mask: Application tag mask
+ * @lun: Logical Unit Number
+ * @ctrl: Control flags
+ * @cdb: SCSI Command Descriptor Block or simple SGE
+ * @sgl: Scatter-gather list
+ */
+struct leapraid_mpi_scsiio_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flg;
+ u8 r2[4];
+ __le32 sense_buffer_low_add;
+ u8 dma_flag;
+ u8 r3;
+ u8 sense_buffer_len;
+ u8 r4;
+ u8 sgl_offset0;
+ u8 sgl_offset1;
+ u8 sgl_offset2;
+ u8 sgl_offset3;
+ __le32 skip_count;
+ __le32 data_len;
+ __le32 bi_dir_data_len;
+ __le16 io_flg;
+ __le16 eedp_flag;
+ __le16 eedp_block_size;
+ u8 r5[2];
+ __le32 secondary_ref_tag;
+ __le16 secondary_app_tag;
+ __le16 app_tag_trans_mask;
+ u8 lun[8];
+ __le32 ctrl;
+ union leapraid_mpi_scsi_io_cdb_union cdb;
+ union leapraid_sge_io_union sgl;
+};
+
+/**
+ * union leapraid_scsi_io_cdb_union - SCSI I/O CDB or IEEE simple SGE
+ *
+ * @cdb32: 32-byte SCSI CDB
+ * @sge: IEEE simple 64-bit SGE
+ */
+union leapraid_scsi_io_cdb_union {
+ u8 cdb32[32];
+ struct leapraid_ieee_sge_simple64 sge;
+};
+
+/**
+ * struct leapraid_scsiio_req - SCSI I/O request
+ *
+ * @dev_hdl: Device handle
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flg: Message flags
+ * @r2: Reserved
+ * @sense_buffer_low_add: Lower 32-bit address of sense buffer
+ * @dma_flag: DMA flag
+ * @r3: Reserved
+ * @sense_buffer_len: Sense buffer length
+ * @r4: Reserved
+ * @sgl_offset0-3: SGL offsets
+ * @skip_count: Bytes to skip before transfer
+ * @data_len: Length of data transfer
+ * @bi_dir_data_len: Bi-directional transfer length
+ * @io_flg: I/O flags
+ * @eedp_flag: EEDP flags
+ * @eedp_block_size: EEDP block size
+ * @r5: Reserved
+ * @secondary_ref_tag: Secondary reference tag
+ * @secondary_app_tag: Secondary application tag
+ * @app_tag_trans_mask: Application tag mask
+ * @lun: Logical Unit Number
+ * @ctrl: Control flags
+ * @cdb: SCSI Command Descriptor Block or simple SGE
+ * @sgl: Scatter-gather list
+ */
+struct leapraid_scsiio_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flg;
+ u8 r2[4];
+ __le32 sense_buffer_low_add;
+ u8 dma_flag;
+ u8 r3;
+ u8 sense_buffer_len;
+ u8 r4;
+ u8 sgl_offset0;
+ u8 sgl_offset1;
+ u8 sgl_offset2;
+ u8 sgl_offset3;
+ __le32 skip_count;
+ __le32 data_len;
+ __le32 bi_dir_data_len;
+ __le16 io_flg;
+ __le16 eedp_flag;
+ __le16 eedp_block_size;
+ u8 r5[2];
+ __le32 secondary_ref_tag;
+ __le16 secondary_app_tag;
+ __le16 app_tag_trans_mask;
+ u8 lun[8];
+ __le32 ctrl;
+ union leapraid_scsi_io_cdb_union cdb;
+ union leapraid_ieee_sge_io_union sgl;
+};
+
+/**
+ * struct leapraid_scsiio_rep - SCSI I/O response
+ *
+ * @dev_hdl: Device handle
+ * @msg_len: Length of response message
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flg: Message flags
+ * @r2: Reserved
+ * @scsi_status: SCSI status
+ * @scsi_state: SCSI state
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ * @transfer_count: Number of bytes transferred
+ * @sense_count: Number of sense bytes
+ * @resp_info: Additional response info
+ * @task_tag: Task identifier
+ * @scsi_status_qualifier: SCSI status qualifier
+ * @bi_dir_trans_count: Bi-directional transfer count
+ * @r4: Reserved
+ */
+struct leapraid_scsiio_rep {
+ __le16 dev_hdl;
+ u8 msg_len;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flg;
+ u8 r2[4];
+ u8 scsi_status;
+ u8 scsi_state;
+ __le16 adapter_status;
+ u8 r3[4];
+ __le32 transfer_count;
+ __le32 sense_count;
+ __le32 resp_info;
+ __le16 task_tag;
+ __le16 scsi_status_qualifier;
+ __le32 bi_dir_trans_count;
+ __le32 r4[3];
+};
+
+/**
+ * struct leapraid_scsi_tm_req - SCSI Task Management request
+ *
+ * @dev_hdl: Device handle
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r1: Reserved
+ * @task_type: Task management function type
+ * @r2: Reserved
+ * @msg_flg: Message flags
+ * @r3: Reserved
+ * @lun: Logical Unit Number
+ * @r4: Reserved
+ * @task_mid: Task identifier
+ * @r5: Reserved
+ */
+struct leapraid_scsi_tm_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 r1;
+ u8 task_type;
+ u8 r2;
+ u8 msg_flg;
+ u8 r3[4];
+ u8 lun[8];
+ u8 r4[28];
+ __le16 task_mid;
+ u8 r5[2];
+};
+
+/**
+ * struct leapraid_scsi_tm_rep - SCSI Task Management response
+ *
+ * @dev_hdl: Device handle
+ * @msg_len: Length of response message
+ * @func: Function code
+ * @resp_code: Response code
+ * @task_type: Task management type
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ * @termination_count: Count of terminated tasks
+ * @response_info: Additional response info
+ */
+struct leapraid_scsi_tm_rep {
+ __le16 dev_hdl;
+ u8 msg_len;
+ u8 func;
+ u8 resp_code;
+ u8 task_type;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+ u8 r3[4];
+ __le32 termination_count;
+ __le32 response_info;
+};
+
+/**
+ * struct leapraid_sep_req - SEP (SCSI Enclosure Processor) request
+ *
+ * @dev_hdl: Device handle
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @act: Action to perform
+ * @flg: Flags
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @slot_status: Slot status
+ * @r3: Reserved
+ * @slot: Slot number
+ * @enc_hdl: Enclosure handle
+ */
+struct leapraid_sep_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 act;
+ u8 flg;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[4];
+ __le32 slot_status;
+ u8 r3[12];
+ __le16 slot;
+ __le16 enc_hdl;
+};
+
+/**
+ * struct leapraid_sep_rep - SEP response
+ *
+ * @dev_hdl: Device handle
+ * @msg_len: Message length
+ * @func: Function code
+ * @act: Action performed
+ * @flg: Flags
+ * @msg_flag: Message flags
+ * @r1: Reserved
+ * @adapter_status: Adapter status
+ * @r2: Reserved
+ * @slot_status: Slot status
+ * @r3: Reserved
+ * @slot: Slot number
+ * @enc_hdl: Enclosure handle
+ */
+struct leapraid_sep_rep {
+ __le16 dev_hdl;
+ u8 msg_len;
+ u8 func;
+ u8 act;
+ u8 flg;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+ u8 r3[4];
+ __le32 slot_status;
+ u8 r4[4];
+ __le16 slot;
+ __le16 enc_hdl;
+};
+
+/**
+ * struct leapraid_adapter_init_req - Adapter initialization request
+ *
+ * @who_init: Initiator of the initialization
+ * @r1: Reserved
+ * @chain_offset: Chain offset
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flg: Message flags
+ * @r3: Reserved
+ * @msg_ver: Message version
+ * @header_ver: Header version
+ * @host_buf_addr: Host buffer address (non adapter-ref)
+ * @r4: Reserved
+ * @host_buf_size: Host buffer size (non adapter-ref)
+ * @host_msix_vectors: Number of host MSI-X vectors
+ * @r6: Reserved
+ * @req_frame_size: Request frame size
+ * @rep_desc_qd: Reply descriptor queue depth
+ * @rep_msg_qd: Reply message queue depth
+ * @sense_buffer_add_high: High 32-bit of sense buffer address
+ * @rep_msg_dma_high: High 32-bit of reply message DMA address
+ * @task_desc_base_addr: Base address of task descriptors
+ * @rep_desc_q_arr_addr: Address of reply descriptor queue array
+ * @rep_msg_addr_dma: Reply message DMA address
+ * @time_stamp: Timestamp
+ */
+struct leapraid_adapter_init_req {
+ u8 who_init;
+ u8 r1;
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flg;
+ __le32 driver_ver;
+ __le16 msg_ver;
+ __le16 header_ver;
+ __le32 host_buf_addr;
+ u8 r4[2];
+ u8 host_buf_size;
+ u8 host_msix_vectors;
+ u8 r6[2];
+ __le16 req_frame_size;
+ __le16 rep_desc_qd;
+ __le16 rep_msg_qd;
+ __le32 sense_buffer_add_high;
+ __le32 rep_msg_dma_high;
+ __le64 task_desc_base_addr;
+ __le64 rep_desc_q_arr_addr;
+ __le64 rep_msg_addr_dma;
+ __le64 time_stamp;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_rep_desc_q_arr - Reply descriptor queue array
+ *
+ * @rep_desc_base_addr: Base address of the reply descriptors
+ * @r1: Reserved
+ */
+struct leapraid_rep_desc_q_arr {
+ __le64 rep_desc_base_addr;
+ __le64 r1;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_adapter_init_rep - Adapter initialization reply
+ *
+ * @who_init: Initiator of the initialization
+ * @r1: Reserved
+ * @msg_len: Length of reply message
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @adapter_status: Adapter status
+ * @r4: Reserved
+ */
+struct leapraid_adapter_init_rep {
+ u8 who_init;
+ u8 r1;
+ u8 msg_len;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[6];
+ __le16 adapter_status;
+ u8 r4[4];
+};
+
+/**
+ * struct leapraid_adapter_log_req - Adapter log request
+ *
+ * @action: Action code
+ * @type: Log type
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * r1: Reserved
+ * @msg_flag: Message flags
+ * r2: Reserved
+ * @mbox: Mailbox for command-specific parameters
+ * @sge: Scatter-gather entry for data buffer
+ */
+struct leapraid_adapter_log_req {
+ u8 action;
+ u8 type;
+ u8 chain_offset;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flag;
+ u8 r2[4];
+ union {
+ u8 b[12];
+ __le16 s[6];
+ __le32 w[3];
+ } mbox;
+ struct leapraid_sge_simple64 sge;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_adapter_log_rep - Adapter log reply
+ *
+ * @action: Action code echoed
+ * @type: Log type echoed
+ * @msg_len: Length of message
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Status returned by adapter
+ */
+struct leapraid_adapter_log_rep {
+ u8 action;
+ u8 type;
+ u8 msg_len;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+};
+
+/**
+ * struct leapraid_adapter_features_req - Request adapter features
+ *
+ * @r1: Reserved
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ */
+struct leapraid_adapter_features_req {
+ u8 r1[2];
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[4];
+};
+
+/**
+ * struct leapraid_adapter_features_rep - Adapter features reply
+ *
+ * @msg_ver: Message version
+ * @msg_len: Length of reply message
+ * @func: Function code
+ * @header_ver: Header version
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ * @who_init: Who initialized the adapter
+ * @r4: Reserved
+ * @max_msix_vectors: Max MSI-X vectors supported
+ * @req_slot: Number of request slots
+ * @r5: Reserved
+ * @adapter_caps: Adapter capabilities
+ * @fw_version: Firmware version
+ * @sas_wide_max_qdepth: Max wide SAS queue depth
+ * @sas_narrow_max_qdepth: Max narrow SAS queue depth
+ * @r6: Reserved
+ * @hp_slot: Number of high-priority slots
+ * @r7: Reserved
+ * @max_volumes: Maximum supported volumes
+ * @max_dev_hdl: Maximum device handle
+ * @r8: Reserved
+ * @min_dev_hdl: Minimum device handle
+ * @r9: Reserved
+ */
+struct leapraid_adapter_features_rep {
+ u16 msg_ver;
+ u8 msg_len;
+ u8 func;
+ u16 header_ver;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[6];
+ u16 adapter_status;
+ u8 r3[4];
+ u8 sata_max_qdepth;
+ u8 who_init;
+ u8 r4;
+ u8 max_msix_vectors;
+ __le16 req_slot;
+ u8 r5[2];
+ __le32 adapter_caps;
+ __le32 fw_version;
+ __le16 sas_wide_max_qdepth;
+ __le16 sas_narrow_max_qdepth;
+ u8 r6[10];
+ __le16 hp_slot;
+ u8 r7[3];
+ u8 max_volumes;
+ __le16 max_dev_hdl;
+ u8 r8[2];
+ __le16 min_dev_hdl;
+ u8 r9[6];
+};
+
+/**
+ * struct leapraid_scan_dev_req - Request to scan devices
+ *
+ * @r1: Reserved
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ */
+struct leapraid_scan_dev_req {
+ u8 r1[2];
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[4];
+};
+
+/**
+ * struct leapraid_scan_dev_rep - Scan devices reply
+ *
+ * @r1: Reserved
+ * @msg_len: Length of message
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @adapter_status: Adapter status
+ * @r4: Reserved
+ */
+struct leapraid_scan_dev_rep {
+ u8 r1[2];
+ u8 msg_len;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[6];
+ __le16 adapter_status;
+ u8 r4[4];
+};
+
+/**
+ * struct leapraid_evt_notify_req - Event notification request
+ *
+ * @r1: Reserved
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @evt_masks: Event masks to enable notifications
+ * @r4: Reserved
+ */
+struct leapraid_evt_notify_req {
+ u8 r1[2];
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[12];
+ __le32 evt_masks[4];
+ u8 r4[8];
+};
+
+/**
+ * struct leapraid_evt_notify_rep - Event notification reply
+ *
+ * @evt_data_len: Length of event data
+ * @msg_len: Length of message
+ * @func: Function code
+ * @r1: Reserved
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @adapter_status: Adapter status
+ * @r4: Reserved
+ * @evt: Event code
+ * @r5: Reserved
+ * @evt_data: Event data array
+ */
+struct leapraid_evt_notify_rep {
+ __le16 evt_data_len;
+ u8 msg_len;
+ u8 func;
+ u8 r1[2];
+ u8 r2;
+ u8 msg_flag;
+ u8 r3[6];
+ __le16 adapter_status;
+ u8 r4[4];
+ __le16 evt;
+ u8 r5[6];
+ __le32 evt_data[];
+};
+
+/**
+ * struct leapraid_evt_data_sas_dev_status_change - SAS device status change
+ *
+ * @task_tag: Task identifier
+ * @reason_code: Reason for status change
+ * @physical_port: Physical port number
+ * @r1: Reserved
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ * @sas_address: SAS address of device
+ * @lun: Logical Unit Number
+ */
+struct leapraid_evt_data_sas_dev_status_change {
+ __le16 task_tag;
+ u8 reason_code;
+ u8 physical_port;
+ u8 r1[2];
+ __le16 dev_hdl;
+ u8 r2[4];
+ __le64 sas_address;
+ u8 lun[8];
+} __packed __aligned(4);
+/**
+ * struct leapraid_evt_data_ir_change - IR (Integrated RAID) change event data
+ *
+ * @r1: Reserved
+ * @reason_code: Reason for IR change
+ * @r2: Reserved
+ * @vol_dev_hdl: Volume device handle
+ * @phys_disk_dev_hdl: Physical disk device handle
+ */
+struct leapraid_evt_data_ir_change {
+ u8 r1;
+ u8 reason_code;
+ u8 r2[2];
+ __le16 vol_dev_hdl;
+ __le16 phys_disk_dev_hdl;
+};
+
+/**
+ * struct leapraid_evt_data_sas_disc - SAS discovery event data
+ *
+ * @r1: Reserved
+ * @reason_code: Reason for discovery event
+ * @physical_port: Physical port number where event occurred
+ * @r2: Reserved
+ */
+struct leapraid_evt_data_sas_disc {
+ u8 r1;
+ u8 reason_code;
+ u8 physical_port;
+ u8 r2[5];
+};
+
+/**
+ * struct leapraid_evt_sas_topo_phy_entry - SAS topology PHY entry
+ *
+ * @attached_dev_hdl: Device handle attached to PHY
+ * @link_rate: Current link rate
+ * @phy_status: PHY status flags
+ */
+struct leapraid_evt_sas_topo_phy_entry {
+ __le16 attached_dev_hdl;
+ u8 link_rate;
+ u8 phy_status;
+};
+
+/**
+ * struct leapraid_evt_data_sas_topo_change_list - SAS topology change list
+ *
+ * @encl_hdl: Enclosure handle
+ * @exp_dev_hdl: Expander device handle
+ * @num_phys: Number of PHYs in this entry
+ * @r1: Reserved
+ * @entry_num: Entry index
+ * @start_phy_num: Start PHY number
+ * @exp_status: Expander status
+ * @physical_port: Physical port number
+ * @phy: Array of SAS PHY entries
+ */
+struct leapraid_evt_data_sas_topo_change_list {
+ __le16 encl_hdl;
+ __le16 exp_dev_hdl;
+ u8 num_phys;
+ u8 r1[3];
+ u8 entry_num;
+ u8 start_phy_num;
+ u8 exp_status;
+ u8 physical_port;
+ struct leapraid_evt_sas_topo_phy_entry phy[];
+};
+
+/**
+ * struct leapraid_evt_data_sas_enc_dev_status_change - SAS enclosure device status
+ *
+ * @enc_hdl: Enclosure handle
+ * @reason_code: Reason code for status change
+ * @physical_port: Physical port number
+ * @encl_logical_id: Enclosure logical ID
+ * @num_slots: Number of slots in enclosure
+ * @start_slot: First affected slot
+ * @phy_bits: Bitmap of affected PHYs
+ */
+struct leapraid_evt_data_sas_enc_dev_status_change {
+ __le16 enc_hdl;
+ u8 reason_code;
+ u8 physical_port;
+ __le64 encl_logical_id;
+ __le16 num_slots;
+ __le16 start_slot;
+ __le32 phy_bits;
+};
+
+/**
+ * struct leapraid_io_unit_ctrl_req - IO unit control request
+ *
+ * @op: Operation code
+ * @r1: Reserved
+ * @chain_offset: SGE chain offset
+ * @func: Function code
+ * @dev_hdl: Device handle
+ * @adapter_para: Adapter parameter selector
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @phy_num: PHY number
+ * @r3: Reserved
+ * @adapter_para_value: Value for adapter parameter
+ * @adapter_para_value2: Optional second parameter value
+ * @r4: Reserved
+ */
+struct leapraid_io_unit_ctrl_req {
+ u8 op;
+ u8 r1;
+ u8 chain_offset;
+ u8 func;
+ u16 dev_hdl;
+ u8 adapter_para;
+ u8 msg_flag;
+ u8 r2[6];
+ u8 phy_num;
+ u8 r3[17];
+ __le32 adapter_para_value;
+ __le32 adapter_para_value2;
+ u8 r4[4];
+};
+
+/**
+ * struct leapraid_io_unit_ctrl_rep - IO unit control reply
+ *
+ * @op: Operation code echoed
+ * @r1: Reserved
+ * @func: Function code
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ */
+struct leapraid_io_unit_ctrl_rep {
+ u8 op;
+ u8 r1[2];
+ u8 func;
+ __le16 dev_hdl;
+ u8 r2[14];
+};
+
+/**
+ * struct leapraid_raid_act_req - RAID action request
+ *
+ * @act: RAID action code
+ * @r1: Reserved
+ * @func: Function code
+ * @r2: Reserved
+ * @phys_disk_num: Number of physical disks involved
+ * @r3: Reserved
+ * @action_data_sge: SGE describing action-specific data
+ */
+struct leapraid_raid_act_req {
+ u8 act;
+ u8 r1[2];
+ u8 func;
+ u8 r2[2];
+ u8 phys_disk_num;
+ u8 r3[13];
+ struct leapraid_sge_simple_union action_data_sge;
+};
+
+/**
+ * struct leapraid_raid_act_rep - RAID action reply
+ *
+ * @act: RAID action code echoed
+ * @r1: Reserved
+ * @func: Function code
+ * @vol_dev_hdl: Volume device handle
+ * @r2: Reserved
+ * @adapter_status: Status returned by adapter
+ * @r3: Reserved
+ */
+struct leapraid_raid_act_rep {
+ u8 act;
+ u8 r1[2];
+ u8 func;
+ __le16 vol_dev_hdl;
+ u8 r2[8];
+ __le16 adapter_status;
+ u8 r3[76];
+};
+
+/**
+ * struct leapraid_smp_passthrough_req - SMP passthrough request
+ *
+ * @passthrough_flg: Passthrough flags
+ * @physical_port: Target PHY port
+ * @r1: Reserved
+ * @func: Function code
+ * @req_data_len: Request data length
+ * @r2: Reserved
+ * @sas_address: SAS address of target device
+ * @r3: Reserved
+ * @sgl: Scatter-gather list describing request buffer
+ */
+struct leapraid_smp_passthrough_req {
+ u8 passthrough_flg;
+ u8 physical_port;
+ u8 r1;
+ u8 func;
+ __le16 req_data_len;
+ u8 r2[10];
+ __le64 sas_address;
+ u8 r3[8];
+ union leapraid_simple_sge_union sgl;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_smp_passthrough_rep - SMP passthrough reply
+ *
+ * @passthrough_flg: Passthrough flags echoed
+ * @physical_port: Target PHY port
+ * @r1: Reserved
+ * @func: Function code
+ * @resp_data_len: Length of response data
+ * @r2: Reserved
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ */
+struct leapraid_smp_passthrough_rep {
+ u8 passthrough_flg;
+ u8 physical_port;
+ u8 r1;
+ u8 func;
+ __le16 resp_data_len;
+ u8 r2[8];
+ __le16 adapter_status;
+ u8 r3[12];
+};
+
+/**
+ * struct leapraid_sas_io_unit_ctrl_req - SAS IO unit control request
+ *
+ * @op: Operation code
+ * @r1: Reserved
+ * @func: Function code
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ */
+struct leapraid_sas_io_unit_ctrl_req {
+ u8 op;
+ u8 r1[2];
+ u8 func;
+ __le16 dev_hdl;
+ u8 r2[38];
+};
+
+#endif /* LEAPRAID_H */
diff --git a/drivers/scsi/leapraid/leapraid_app.c b/drivers/scsi/leapraid/leapraid_app.c
new file mode 100644
index 000000000000..f838bd5aa20e
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_app.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+
+#include "leapraid_func.h"
+
+/* ioctl device file */
+#define LEAPRAID_DEV_NAME "leapraid_ctl"
+
+/* ioctl version */
+#define LEAPRAID_IOCTL_VERSION 0x07
+
+/* ioctl command */
+#define LEAPRAID_ADAPTER_INFO 17
+#define LEAPRAID_COMMAND 20
+#define LEAPRAID_EVENTQUERY 21
+#define LEAPRAID_EVENTREPORT 23
+
+/**
+ * struct leapraid_ioctl_header - IOCTL command header
+ * @adapter_id : Adapter identifier
+ * @port_number: Port identifier
+ * @max_data_size: Maximum data size for transfer
+ */
+struct leapraid_ioctl_header {
+ u32 adapter_id;
+ u32 port_number;
+ u32 max_data_size;
+};
+
+/**
+ * struct leapraid_ioctl_diag_reset - Diagnostic reset request
+ * @hdr: Common IOCTL header
+ */
+struct leapraid_ioctl_diag_reset {
+ struct leapraid_ioctl_header hdr;
+};
+
+/**
+ * struct leapraid_ioctl_pci_info - PCI device information
+ * @u: Union holding PCI bus/device/function information
+ * @u.bits.dev: PCI device number
+ * @u.bits.func: PCI function number
+ * @u.bits.bus: PCI bus number
+ * @u.word: Combined representation of PCI BDF
+ * @seg_id: PCI segment identifier
+ */
+struct leapraid_ioctl_pci_info {
+ union {
+ struct {
+ u32 dev:5;
+ u32 func:3;
+ u32 bus:24;
+ } bits;
+ u32 word;
+ } u;
+ u32 seg_id;
+};
+
+/**
+ * struct leapraid_ioctl_adapter_info - Adapter information for IOCTL
+ * @hdr: IOCTL header
+ * @adapter_type: Adapter type identifier
+ * @port_number: Port number
+ * @pci_id: PCI device ID
+ * @revision: Revision number
+ * @sub_dev: Subsystem device ID
+ * @sub_vendor: Subsystem vendor ID
+ * @r0: Reserved
+ * @fw_ver: Firmware version
+ * @bios_ver: BIOS version
+ * @driver_ver: Driver version
+ * @r1: Reserved
+ * @scsi_id: SCSI ID
+ * @r2: Reserved
+ * @pci_info: PCI information structure
+ */
+struct leapraid_ioctl_adapter_info {
+ struct leapraid_ioctl_header hdr;
+ u32 adapter_type;
+ u32 port_number;
+ u32 pci_id;
+ u32 revision;
+ u32 sub_dev;
+ u32 sub_vendor;
+ u32 r0;
+ u32 fw_ver;
+ u32 bios_ver;
+ u8 driver_ver[32];
+ u8 r1;
+ u8 scsi_id;
+ u16 r2;
+ struct leapraid_ioctl_pci_info pci_info;
+};
+
+/**
+ * struct leapraid_ioctl_command - IOCTL command structure
+ * @hdr: IOCTL header
+ * @timeout: Command timeout
+ * @rep_msg_buf_ptr: User pointer to reply message buffer
+ * @c2h_buf_ptr: User pointer to card-to-host data buffer
+ * @h2c_buf_ptr: User pointer to host-to-card data buffer
+ * @sense_data_ptr: User pointer to sense data buffer
+ * @max_rep_bytes: Maximum reply bytes
+ * @c2h_size: Card-to-host data size
+ * @h2c_size: Host-to-card data size
+ * @max_sense_bytes: Maximum sense data bytes
+ * @data_sge_offset: Data SGE offset
+ * @mf: Message frame data (flexible array)
+ */
+struct leapraid_ioctl_command {
+ struct leapraid_ioctl_header hdr;
+ u32 timeout;
+ void __user *rep_msg_buf_ptr;
+ void __user *c2h_buf_ptr;
+ void __user *h2c_buf_ptr;
+ void __user *sense_data_ptr;
+ u32 max_rep_bytes;
+ u32 c2h_size;
+ u32 h2c_size;
+ u32 max_sense_bytes;
+ u32 data_sge_offset;
+ u8 mf[];
+};
+
+static struct leapraid_adapter *leapraid_ctl_lookup_adapter(int adapter_id)
+{
+ struct leapraid_adapter *adapter;
+
+ spin_lock(&leapraid_adapter_lock);
+ list_for_each_entry(adapter, &leapraid_adapter_list, list) {
+ if (adapter->adapter_attr.id == adapter_id) {
+ spin_unlock(&leapraid_adapter_lock);
+ return adapter;
+ }
+ }
+ spin_unlock(&leapraid_adapter_lock);
+
+ return NULL;
+}
+
+static void leapraid_cli_scsiio_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_req *ctl_sp_mpi_req, u16 taskid,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size,
+ u16 dev_hdl, void *psge)
+{
+ struct leapraid_mpi_scsiio_req *scsiio_request =
+ (struct leapraid_mpi_scsiio_req *)ctl_sp_mpi_req;
+
+ scsiio_request->sense_buffer_len = SCSI_SENSE_BUFFERSIZE;
+ scsiio_request->sense_buffer_low_add =
+ leapraid_get_sense_buffer_dma(adapter, taskid);
+ memset((void *)(&adapter->driver_cmds.ctl_cmd.sense),
+ 0, SCSI_SENSE_BUFFERSIZE);
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+ if (scsiio_request->func == LEAPRAID_FUNC_SCSIIO_REQ)
+ leapraid_fire_scsi_io(adapter, taskid, dev_hdl);
+ else
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_smp_passthrough_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_req *ctl_sp_mpi_req,
+ u16 taskid,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, void *h2c)
+{
+ struct leapraid_smp_passthrough_req *smp_pt_req =
+ (struct leapraid_smp_passthrough_req *)ctl_sp_mpi_req;
+ u8 *data;
+
+ if (!adapter->adapter_attr.enable_mp)
+ smp_pt_req->physical_port = LEAPRAID_DISABLE_MP_PORT_ID;
+ if (smp_pt_req->passthrough_flg & LEAPRAID_SMP_PT_FLAG_SGL_PTR)
+ data = (u8 *)&smp_pt_req->sgl;
+ else
+ data = h2c;
+
+ if (data[1] == LEAPRAID_SMP_FN_REPORT_PHY_ERR_LOG &&
+ (data[10] == 1 || data[10] == 2))
+ adapter->reset_desc.adapter_link_resetting = true;
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_fire_ieee_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size);
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_sata_passthrough_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_ctl_fire_ieee_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+}
+
+static void leapraid_ctl_load_fw_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_ctl_fire_ieee_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+}
+
+static void leapraid_ctl_fire_mpi_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_build_mpi_sg(adapter, psge, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_sas_io_unit_ctrl_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_req *ctl_sp_mpi_req,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ struct leapraid_sas_io_unit_ctrl_req *sas_io_unit_ctrl_req =
+ (struct leapraid_sas_io_unit_ctrl_req *)ctl_sp_mpi_req;
+
+ if (sas_io_unit_ctrl_req->op == LEAPRAID_SAS_OP_PHY_HARD_RESET ||
+ sas_io_unit_ctrl_req->op == LEAPRAID_SAS_OP_PHY_LINK_RESET)
+ adapter->reset_desc.adapter_link_resetting = true;
+ leapraid_ctl_fire_mpi_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+}
+
+static long leapraid_ctl_do_command(struct leapraid_adapter *adapter,
+ struct leapraid_ioctl_command *karg,
+ void __user *mf)
+{
+ struct leapraid_req *leap_mpi_req = NULL;
+ struct leapraid_req *ctl_sp_mpi_req = NULL;
+ u16 taskid;
+ void *h2c = NULL;
+ size_t h2c_size = 0;
+ dma_addr_t h2c_dma_addr = 0;
+ void *c2h = NULL;
+ size_t c2h_size = 0;
+ dma_addr_t c2h_dma_addr = 0;
+ void *psge;
+ unsigned long timeout;
+ u16 dev_hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ bool issue_reset = false;
+ u32 sz;
+ long rc = 0;
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ leap_mpi_req = kzalloc(LEAPRAID_REQUEST_SIZE, GFP_KERNEL);
+ if (!leap_mpi_req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (karg->data_sge_offset * LEAPRAID_SGE_OFFSET_SIZE > LEAPRAID_REQUEST_SIZE ||
+ karg->data_sge_offset > ((UINT_MAX) / LEAPRAID_SGE_OFFSET_SIZE)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_user(leap_mpi_req, mf,
+ karg->data_sge_offset * LEAPRAID_SGE_OFFSET_SIZE)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ taskid = adapter->driver_cmds.ctl_cmd.taskid;
+
+ adapter->driver_cmds.ctl_cmd.status = LEAPRAID_CMD_PENDING;
+ memset((void *)(&adapter->driver_cmds.ctl_cmd.reply), 0,
+ LEAPRAID_REPLY_SIEZ);
+ ctl_sp_mpi_req = leapraid_get_task_desc(adapter, taskid);
+ memset(ctl_sp_mpi_req, 0, LEAPRAID_REQUEST_SIZE);
+ memcpy(ctl_sp_mpi_req,
+ leap_mpi_req,
+ karg->data_sge_offset * LEAPRAID_SGE_OFFSET_SIZE);
+
+ if (ctl_sp_mpi_req->func == LEAPRAID_FUNC_SCSIIO_REQ ||
+ ctl_sp_mpi_req->func == LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH ||
+ ctl_sp_mpi_req->func == LEAPRAID_FUNC_SATA_PASSTHROUGH) {
+ dev_hdl = le16_to_cpu(ctl_sp_mpi_req->func_dep1);
+ if (!dev_hdl || dev_hdl > adapter->adapter_attr.features.max_dev_handle) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (WARN_ON(ctl_sp_mpi_req->func == LEAPRAID_FUNC_SCSI_TMF))
+ return -EINVAL;
+
+ h2c_size = karg->h2c_size;
+ c2h_size = karg->c2h_size;
+ if (h2c_size) {
+ h2c = dma_alloc_coherent(&adapter->pdev->dev, h2c_size,
+ &h2c_dma_addr, GFP_ATOMIC);
+ if (!h2c) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (copy_from_user(h2c, karg->h2c_buf_ptr, h2c_size)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ }
+ if (c2h_size) {
+ c2h = dma_alloc_coherent(&adapter->pdev->dev,
+ c2h_size, &c2h_dma_addr, GFP_ATOMIC);
+ if (!c2h) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ psge = (void *)ctl_sp_mpi_req + (karg->data_sge_offset *
+ LEAPRAID_SGE_OFFSET_SIZE);
+ init_completion(&adapter->driver_cmds.ctl_cmd.done);
+
+ switch (ctl_sp_mpi_req->func) {
+ case LEAPRAID_FUNC_SCSIIO_REQ:
+ case LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH:
+ if (test_bit(dev_hdl, (unsigned long *)adapter->dev_topo.dev_removing)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ leapraid_cli_scsiio_cmd(adapter, ctl_sp_mpi_req, taskid,
+ h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size,
+ dev_hdl, psge);
+ break;
+ case LEAPRAID_FUNC_SMP_PASSTHROUGH:
+ if (!h2c) {
+ rc = -EINVAL;
+ goto out;
+ }
+ leapraid_ctl_smp_passthrough_cmd(adapter,
+ ctl_sp_mpi_req, taskid,
+ h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size,
+ psge, h2c);
+ break;
+ case LEAPRAID_FUNC_SATA_PASSTHROUGH:
+ if (test_bit(dev_hdl, (unsigned long *)adapter->dev_topo.dev_removing)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ leapraid_ctl_sata_passthrough_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+ break;
+ case LEAPRAID_FUNC_FW_DOWNLOAD:
+ case LEAPRAID_FUNC_FW_UPLOAD:
+ leapraid_ctl_load_fw_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+ break;
+ case LEAPRAID_FUNC_SAS_IO_UNIT_CTRL:
+ leapraid_ctl_sas_io_unit_ctrl_cmd(adapter, ctl_sp_mpi_req,
+ h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size,
+ psge, taskid);
+ break;
+ default:
+ leapraid_ctl_fire_mpi_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+ break;
+ }
+
+ timeout = karg->timeout;
+ if (timeout < LEAPRAID_CTL_CMD_TIMEOUT)
+ timeout = LEAPRAID_CTL_CMD_TIMEOUT;
+ wait_for_completion_timeout(&adapter->driver_cmds.ctl_cmd.done,
+ timeout * HZ);
+
+ if ((leap_mpi_req->func == LEAPRAID_FUNC_SMP_PASSTHROUGH ||
+ leap_mpi_req->func == LEAPRAID_FUNC_SAS_IO_UNIT_CTRL) &&
+ adapter->reset_desc.adapter_link_resetting) {
+ adapter->reset_desc.adapter_link_resetting = false;
+ }
+ if (!(adapter->driver_cmds.ctl_cmd.status & LEAPRAID_CMD_DONE)) {
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.ctl_cmd.status);
+ goto reset;
+ }
+
+ if (c2h_size) {
+ if (copy_to_user(karg->c2h_buf_ptr, c2h, c2h_size)) {
+ rc = -ENODATA;
+ goto out;
+ }
+ }
+ if (karg->max_rep_bytes) {
+ sz = min_t(u32, karg->max_rep_bytes, LEAPRAID_REPLY_SIEZ);
+ if (copy_to_user(karg->rep_msg_buf_ptr,
+ (void *)&adapter->driver_cmds.ctl_cmd.reply,
+ sz)) {
+ rc = -ENODATA;
+ goto out;
+ }
+ }
+
+ if (karg->max_sense_bytes &&
+ (leap_mpi_req->func == LEAPRAID_FUNC_SCSIIO_REQ ||
+ leap_mpi_req->func == LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH)) {
+ if (!karg->sense_data_ptr)
+ goto out;
+
+ sz = min_t(u32, karg->max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ if (copy_to_user(karg->sense_data_ptr,
+ (void *)&adapter->driver_cmds.ctl_cmd.sense,
+ sz)) {
+ rc = -ENODATA;
+ goto out;
+ }
+ }
+reset:
+ if (issue_reset) {
+ rc = -ENODATA;
+ if (leap_mpi_req->func == LEAPRAID_FUNC_SCSIIO_REQ ||
+ leap_mpi_req->func == LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH ||
+ leap_mpi_req->func == LEAPRAID_FUNC_SATA_PASSTHROUGH) {
+ dev_err(&adapter->pdev->dev,
+ "fire tgt reset: hdl=0x%04x\n",
+ le16_to_cpu(leap_mpi_req->func_dep1));
+ leapraid_issue_locked_tm(adapter,
+ le16_to_cpu(leap_mpi_req->func_dep1), 0, 0, 0,
+ LEAPRAID_TM_TASKTYPE_TARGET_RESET, taskid,
+ LEAPRAID_TM_MSGFLAGS_LINK_RESET);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+ }
+out:
+ if (c2h)
+ dma_free_coherent(&adapter->pdev->dev, c2h_size,
+ c2h, c2h_dma_addr);
+ if (h2c)
+ dma_free_coherent(&adapter->pdev->dev, h2c_size,
+ h2c, h2c_dma_addr);
+ kfree(leap_mpi_req);
+ adapter->driver_cmds.ctl_cmd.status = LEAPRAID_CMD_NOT_USED;
+ return rc;
+}
+
+static long leapraid_ctl_get_adapter_info(struct leapraid_adapter *adapter,
+ void __user *arg)
+{
+ struct leapraid_ioctl_adapter_info *karg;
+ ssize_t __maybe_unused ret;
+ u8 revision;
+
+ karg = kzalloc(sizeof(*karg), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+
+ pci_read_config_byte(adapter->pdev, PCI_CLASS_REVISION, &revision);
+ karg->revision = revision;
+ karg->pci_id = adapter->pdev->device;
+ karg->sub_dev = adapter->pdev->subsystem_device;
+ karg->sub_vendor = adapter->pdev->subsystem_vendor;
+ karg->pci_info.u.bits.bus = adapter->pdev->bus->number;
+ karg->pci_info.u.bits.dev = PCI_SLOT(adapter->pdev->devfn);
+ karg->pci_info.u.bits.func = PCI_FUNC(adapter->pdev->devfn);
+ karg->pci_info.seg_id = pci_domain_nr(adapter->pdev->bus);
+ karg->fw_ver = adapter->adapter_attr.features.fw_version;
+ ret = strscpy(karg->driver_ver, LEAPRAID_DRIVER_NAME,
+ sizeof(karg->driver_ver));
+ strcat(karg->driver_ver, "-");
+ strcat(karg->driver_ver, LEAPRAID_DRIVER_VERSION);
+ karg->adapter_type = LEAPRAID_IOCTL_VERSION;
+ karg->bios_ver = adapter->adapter_attr.bios_version;
+ if (copy_to_user(arg, karg,
+ sizeof(struct leapraid_ioctl_adapter_info))) {
+ kfree(karg);
+ return -EFAULT;
+ }
+
+ kfree(karg);
+ return 0;
+}
+
+static long leapraid_ctl_ioctl_main(struct file *file, unsigned int cmd,
+ void __user *arg, u8 compat)
+{
+ struct leapraid_ioctl_header ioctl_header;
+ struct leapraid_adapter *adapter;
+ long rc = -ENOIOCTLCMD;
+ int count;
+
+ if (copy_from_user(&ioctl_header, (char __user *)arg,
+ sizeof(struct leapraid_ioctl_header)))
+ return -EFAULT;
+
+ adapter = leapraid_ctl_lookup_adapter(ioctl_header.adapter_id);
+ if (!adapter)
+ return -EFAULT;
+
+ mutex_lock(&adapter->access_ctrl.pci_access_lock);
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ count = LEAPRAID_WAIT_SHOST_RECOVERY;
+ while (count--) {
+ if (!adapter->access_ctrl.shost_recovering)
+ break;
+ ssleep(1);
+ }
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering ||
+ adapter->scan_dev_desc.driver_loading ||
+ adapter->access_ctrl.host_removing) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (!mutex_trylock(&adapter->driver_cmds.ctl_cmd.mutex)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ } else if (mutex_lock_interruptible(&adapter->driver_cmds.ctl_cmd.mutex)) {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
+ switch (_IOC_NR(cmd)) {
+ case LEAPRAID_ADAPTER_INFO:
+ if (_IOC_SIZE(cmd) == sizeof(struct leapraid_ioctl_adapter_info))
+ rc = leapraid_ctl_get_adapter_info(adapter, arg);
+ break;
+ case LEAPRAID_COMMAND:
+ {
+ struct leapraid_ioctl_command __user *uarg;
+ struct leapraid_ioctl_command karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (karg.hdr.adapter_id != ioctl_header.adapter_id) {
+ rc = -EINVAL;
+ break;
+ }
+
+ if (_IOC_SIZE(cmd) == sizeof(struct leapraid_ioctl_command)) {
+ uarg = arg;
+ rc = leapraid_ctl_do_command(adapter, &karg,
+ &uarg->mf);
+ }
+ break;
+ }
+ case LEAPRAID_EVENTQUERY:
+ case LEAPRAID_EVENTREPORT:
+ rc = 0;
+ break;
+ default:
+ pr_err("unknown ioctl opcode=0x%08x\n", cmd);
+ break;
+ }
+ mutex_unlock(&adapter->driver_cmds.ctl_cmd.mutex);
+
+out:
+ mutex_unlock(&adapter->access_ctrl.pci_access_lock);
+ return rc;
+}
+
+static long leapraid_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return leapraid_ctl_ioctl_main(file, cmd,
+ (void __user *)arg, 0);
+}
+
+static int leapraid_fw_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct leapraid_adapter *adapter;
+ unsigned long length;
+ unsigned long pfn;
+
+ length = vma->vm_end - vma->vm_start;
+
+ adapter = list_first_entry(&leapraid_adapter_list,
+ struct leapraid_adapter, list);
+
+ if (length > (LEAPRAID_SYS_LOG_BUF_SIZE +
+ LEAPRAID_SYS_LOG_BUF_RESERVE)) {
+ dev_err(&adapter->pdev->dev,
+ "requested mapping size is too large!\n");
+ return -EINVAL;
+ }
+
+ if (!adapter->fw_log_desc.fw_log_buffer) {
+ dev_err(&adapter->pdev->dev, "no log buffer!\n");
+ return -EINVAL;
+ }
+
+ pfn = virt_to_phys(adapter->fw_log_desc.fw_log_buffer) >> PAGE_SHIFT;
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, length,
+ vma->vm_page_prot)) {
+ dev_err(&adapter->pdev->dev,
+ "failed to map memory to user space!\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static const struct file_operations leapraid_ctl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = leapraid_ctl_ioctl,
+ .mmap = leapraid_fw_mmap,
+};
+
+static struct miscdevice leapraid_ctl_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = LEAPRAID_DEV_NAME,
+ .fops = &leapraid_ctl_fops,
+};
+
+void leapraid_ctl_init(void)
+{
+ if (misc_register(&leapraid_ctl_dev) < 0)
+ pr_err("%s can't register misc device\n", LEAPRAID_DRIVER_NAME);
+}
+
+void leapraid_ctl_exit(void)
+{
+ misc_deregister(&leapraid_ctl_dev);
+}
diff --git a/drivers/scsi/leapraid/leapraid_func.c b/drivers/scsi/leapraid/leapraid_func.c
new file mode 100644
index 000000000000..2fc142fff982
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_func.c
@@ -0,0 +1,8264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <linux/module.h>
+
+#include "leapraid_func.h"
+
+static int msix_disable;
+module_param(msix_disable, int, 0444);
+MODULE_PARM_DESC(msix_disable,
+ "disable msix routed interrupts (default=0)");
+
+static int smart_poll;
+module_param(smart_poll, int, 0444);
+MODULE_PARM_DESC(smart_poll,
+ "check SATA drive health via SMART polling: (default=0)");
+
+static int interrupt_mode;
+module_param(interrupt_mode, int, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "intr mode: 0 for MSI-X, 1 for MSI, 2 for legacy. (default=0)");
+
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues,
+ "specifies the number of queues for io_uring poll mode.");
+static int max_msix_vectors = -1;
+module_param(max_msix_vectors, int, 0444);
+MODULE_PARM_DESC(max_msix_vectors, " max msix vectors");
+
+static void leapraid_remove_device(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev);
+static void leapraid_set_led(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev, bool on);
+static void leapraid_ublk_io_dev(struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct leapraid_card_port *port);
+static int leapraid_make_adapter_available(struct leapraid_adapter *adapter);
+static int leapraid_fw_log_init(struct leapraid_adapter *adapter);
+static int leapraid_make_adapter_ready(struct leapraid_adapter *adapter,
+ enum reset_type type);
+
+static inline bool leapraid_is_end_dev(u32 dev_type)
+{
+ return (dev_type & LEAPRAID_DEVTYP_END_DEV) &&
+ ((dev_type & LEAPRAID_DEVTYP_SSP_TGT) ||
+ (dev_type & LEAPRAID_DEVTYP_STP_TGT) ||
+ (dev_type & LEAPRAID_DEVTYP_SATA_DEV));
+}
+
+bool leapraid_pci_removed(struct leapraid_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 vendor_id;
+
+ if (pci_bus_read_config_dword(pdev->bus, pdev->devfn, PCI_VENDOR_ID,
+ &vendor_id))
+ return true;
+
+ return ((vendor_id & LEAPRAID_PCI_VENDOR_ID_MASK) !=
+ LEAPRAID_VENDOR_ID);
+}
+
+static bool leapraid_pci_active(struct leapraid_adapter *adapter)
+{
+ return !(adapter->access_ctrl.pcie_recovering ||
+ leapraid_pci_removed(adapter));
+}
+
+void *leapraid_get_reply_vaddr(struct leapraid_adapter *adapter, u32 rep_paddr)
+{
+ if (!rep_paddr)
+ return NULL;
+
+ return adapter->mem_desc.rep_msg +
+ (rep_paddr - (u32)adapter->mem_desc.rep_msg_dma);
+}
+
+void *leapraid_get_task_desc(struct leapraid_adapter *adapter, u16 taskid)
+{
+ return (void *)(adapter->mem_desc.task_desc +
+ (taskid * LEAPRAID_REQUEST_SIZE));
+}
+
+void *leapraid_get_sense_buffer(struct leapraid_adapter *adapter, u16 taskid)
+{
+ return (void *)(adapter->mem_desc.sense_data +
+ ((taskid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+__le32 leapraid_get_sense_buffer_dma(struct leapraid_adapter *adapter,
+ u16 taskid)
+{
+ return cpu_to_le32(adapter->mem_desc.sense_data_dma +
+ ((taskid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+void leapraid_mask_int(struct leapraid_adapter *adapter)
+{
+ u32 reg;
+
+ adapter->mask_int = true;
+ reg = leapraid_readl(&adapter->iomem_base->host_int_mask);
+ reg |= LEAPRAID_TO_SYS_DB_MASK + LEAPRAID_REPLY_INT_MASK +
+ LEAPRAID_RESET_IRQ_MASK;
+ writel(reg, &adapter->iomem_base->host_int_mask);
+ leapraid_readl(&adapter->iomem_base->host_int_mask);
+}
+
+void leapraid_unmask_int(struct leapraid_adapter *adapter)
+{
+ u32 reg;
+
+ reg = leapraid_readl(&adapter->iomem_base->host_int_mask);
+ reg &= ~LEAPRAID_REPLY_INT_MASK;
+ writel(reg, &adapter->iomem_base->host_int_mask);
+ adapter->mask_int = false;
+}
+
+static void leapraid_flush_io_and_panic(struct leapraid_adapter *adapter)
+{
+ adapter->access_ctrl.adapter_thermal_alert = true;
+ leapraid_smart_polling_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+}
+
+static void leapraid_check_panic_needed(struct leapraid_adapter *adapter,
+ u32 db, u32 adapter_state)
+{
+ bool fault_1 = adapter_state == LEAPRAID_DB_MASK;
+ bool fault_2 = (adapter_state == LEAPRAID_DB_FAULT) &&
+ ((db & LEAPRAID_DB_DATA_MASK) == LEAPRAID_DB_OVER_TEMPERATURE);
+
+ if (!fault_1 && !fault_2)
+ return;
+
+ if (fault_1)
+ pr_err("%s, doorbell status 0xFFFF!\n", __func__);
+ else
+ pr_err("%s, adapter overheating detected!\n", __func__);
+
+ leapraid_flush_io_and_panic(adapter);
+ panic("%s overheating detected, panic now!!!\n", __func__);
+}
+
+u32 leapraid_get_adapter_state(struct leapraid_adapter *adapter)
+{
+ u32 db;
+ u32 adapter_state;
+
+ db = leapraid_readl(&adapter->iomem_base->db);
+ adapter_state = db & LEAPRAID_DB_MASK;
+ leapraid_check_panic_needed(adapter, db, adapter_state);
+ return adapter_state;
+}
+
+static bool leapraid_wait_adapter_ready(struct leapraid_adapter *adapter)
+{
+ u32 cur_state;
+ u32 cnt = LEAPRAID_ADAPTER_READY_MAX_RETRY;
+
+ do {
+ cur_state = leapraid_get_adapter_state(adapter);
+ if (cur_state == LEAPRAID_DB_READY)
+ return true;
+ if (cur_state == LEAPRAID_DB_FAULT)
+ break;
+ usleep_range(LEAPRAID_ADAPTER_READY_SLEEP_MIN_US,
+ LEAPRAID_ADAPTER_READY_SLEEP_MAX_US);
+ } while (--cnt);
+
+ return false;
+}
+
+static int leapraid_db_wait_int_host(struct leapraid_adapter *adapter)
+{
+ u32 cnt = LEAPRAID_DB_WAIT_MAX_RETRY;
+
+ do {
+ if (leapraid_readl(&adapter->iomem_base->host_int_status) &
+ LEAPRAID_ADAPTER2HOST_DB_STATUS)
+ return 0;
+ udelay(LEAPRAID_DB_WAIT_DELAY_US);
+ } while (--cnt);
+
+ return -EFAULT;
+}
+
+static int leapraid_db_wait_ack_and_clear_int(struct leapraid_adapter *adapter)
+{
+ u32 adapter_state;
+ u32 int_status;
+ u32 cnt;
+
+ cnt = LEAPRAID_ADAPTER_READY_MAX_RETRY;
+ do {
+ int_status =
+ leapraid_readl(&adapter->iomem_base->host_int_status);
+ if (!(int_status & LEAPRAID_HOST2ADAPTER_DB_STATUS)) {
+ return 0;
+ } else if (int_status & LEAPRAID_ADAPTER2HOST_DB_STATUS) {
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state == LEAPRAID_DB_FAULT)
+ return -EFAULT;
+ } else if (int_status == 0xFFFFFFFF) {
+ goto out;
+ }
+
+ usleep_range(LEAPRAID_ADAPTER_READY_SLEEP_MIN_US,
+ LEAPRAID_ADAPTER_READY_SLEEP_MAX_US);
+ } while (--cnt);
+
+out:
+ return -EFAULT;
+}
+
+static int leapraid_handshake_func(struct leapraid_adapter *adapter,
+ int req_bytes, u32 *req,
+ int rep_bytes, u16 *rep)
+{
+ int failed, i;
+
+ if ((leapraid_readl(&adapter->iomem_base->db) &
+ LEAPRAID_DB_USED)) {
+ dev_err(&adapter->pdev->dev, "doorbell used\n");
+ return -EFAULT;
+ }
+
+ if (leapraid_readl(&adapter->iomem_base->host_int_status) &
+ LEAPRAID_ADAPTER2HOST_DB_STATUS)
+ writel(0, &adapter->iomem_base->host_int_status);
+
+ writel(((LEAPRAID_FUNC_HANDSHAKE << LEAPRAID_DB_FUNC_SHIFT) |
+ ((req_bytes / LEAPRAID_DWORDS_BYTE_SIZE) <<
+ LEAPRAID_DB_ADD_DWORDS_SHIFT)),
+ &adapter->iomem_base->db);
+
+ if (leapraid_db_wait_int_host(adapter)) {
+ dev_err(&adapter->pdev->dev, "%d:wait db interrupt timeout\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ writel(0, &adapter->iomem_base->host_int_status);
+
+ if (leapraid_db_wait_ack_and_clear_int(adapter)) {
+ dev_err(&adapter->pdev->dev, "%d:wait ack failure\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ for (i = 0, failed = 0;
+ i < req_bytes / LEAPRAID_DWORDS_BYTE_SIZE && !failed;
+ i++) {
+ writel((u32)(req[i]), &adapter->iomem_base->db);
+ if (leapraid_db_wait_ack_and_clear_int(adapter))
+ failed = 1;
+ }
+ if (failed) {
+ dev_err(&adapter->pdev->dev, "%d:wait ack failure\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < rep_bytes / LEAPRAID_WORD_BYTE_SIZE; i++) {
+ if (leapraid_db_wait_int_host(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%d:wait db interrupt timeout\n", __LINE__);
+ return -EFAULT;
+ }
+ rep[i] = (u16)(leapraid_readl(&adapter->iomem_base->db)
+ & LEAPRAID_DB_DATA_MASK);
+ writel(0, &adapter->iomem_base->host_int_status);
+ }
+
+ if (leapraid_db_wait_int_host(adapter)) {
+ dev_err(&adapter->pdev->dev, "%d:wait db interrupt timeout\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ writel(0, &adapter->iomem_base->host_int_status);
+
+ return 0;
+}
+
+int leapraid_check_adapter_is_op(struct leapraid_adapter *adapter)
+{
+ int wait_count = LEAPRAID_DB_WAIT_OPERATIONAL;
+
+ do {
+ if (leapraid_pci_removed(adapter))
+ return -EFAULT;
+
+ if (leapraid_get_adapter_state(adapter) ==
+ LEAPRAID_DB_OPERATIONAL)
+ return 0;
+
+ dev_info(&adapter->pdev->dev,
+ "waiting for adapter to become op status(cnt=%d)\n",
+ LEAPRAID_DB_WAIT_OPERATIONAL - wait_count);
+
+ ssleep(1);
+ } while (--wait_count);
+
+ dev_err(&adapter->pdev->dev,
+ "adapter failed to become op state, last state=%d\n",
+ leapraid_get_adapter_state(adapter));
+
+ return -EFAULT;
+}
+
+struct leapraid_io_req_tracker *leapraid_get_io_tracker_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct scsi_cmnd *scmd;
+
+ if (WARN_ON(!taskid))
+ return NULL;
+
+ if (WARN_ON(taskid > adapter->shost->can_queue))
+ return NULL;
+
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (scmd)
+ return leapraid_get_scmd_priv(scmd);
+
+ return NULL;
+}
+
+static u8 leapraid_get_cb_idx(struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_driver_cmd *sp_cmd;
+ u8 cb_idx = 0xFF;
+
+ if (WARN_ON(!taskid))
+ return cb_idx;
+
+ list_for_each_entry(sp_cmd, &adapter->driver_cmds.special_cmd_list,
+ list)
+ if (taskid == sp_cmd->taskid ||
+ taskid == sp_cmd->hp_taskid ||
+ taskid == sp_cmd->inter_taskid)
+ return sp_cmd->cb_idx;
+
+ WARN_ON(cb_idx == 0xFF);
+ return cb_idx;
+}
+
+struct scsi_cmnd *leapraid_get_scmd_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_scsiio_req *leap_mpi_req;
+ struct leapraid_io_req_tracker *st;
+ struct scsi_cmnd *scmd;
+ u32 uniq_tag;
+
+ if (taskid <= 0 || taskid > adapter->shost->can_queue)
+ return NULL;
+
+ uniq_tag = adapter->mem_desc.taskid_to_uniq_tag[taskid - 1] <<
+ BLK_MQ_UNIQUE_TAG_BITS | (taskid - 1);
+ leap_mpi_req = leapraid_get_task_desc(adapter, taskid);
+ if (!leap_mpi_req->dev_hdl)
+ return NULL;
+
+ scmd = scsi_host_find_tag(adapter->shost, uniq_tag);
+ if (scmd) {
+ st = leapraid_get_scmd_priv(scmd);
+ if (st && st->taskid == taskid)
+ return scmd;
+ }
+
+ return NULL;
+}
+
+u16 leapraid_alloc_scsiio_taskid(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd)
+{
+ struct leapraid_io_req_tracker *request;
+ u16 taskid;
+ u32 tag = scsi_cmd_to_rq(scmd)->tag;
+ u32 unique_tag;
+
+ unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ tag = blk_mq_unique_tag_to_tag(unique_tag);
+ adapter->mem_desc.taskid_to_uniq_tag[tag] =
+ blk_mq_unique_tag_to_hwq(unique_tag);
+
+ request = leapraid_get_scmd_priv(scmd);
+ taskid = tag + 1;
+ request->taskid = taskid;
+ request->scmd = scmd;
+ return taskid;
+}
+
+static void leapraid_check_pending_io(struct leapraid_adapter *adapter)
+{
+ if (adapter->access_ctrl.shost_recovering &&
+ adapter->reset_desc.pending_io_cnt) {
+ if (adapter->reset_desc.pending_io_cnt == 1)
+ wake_up(&adapter->reset_desc.reset_wait_queue);
+ adapter->reset_desc.pending_io_cnt--;
+ }
+}
+
+static void leapraid_clear_io_tracker(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker)
+{
+ if (!io_tracker)
+ return;
+
+ if (WARN_ON(io_tracker->taskid == 0))
+ return;
+
+ io_tracker->scmd = NULL;
+}
+
+static bool leapraid_is_fixed_taskid(struct leapraid_adapter *adapter,
+ u16 taskid)
+{
+ return (taskid == adapter->driver_cmds.ctl_cmd.taskid ||
+ taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid ||
+ taskid == adapter->driver_cmds.tm_cmd.hp_taskid ||
+ taskid == adapter->driver_cmds.ctl_cmd.hp_taskid ||
+ taskid == adapter->driver_cmds.scan_dev_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.timestamp_sync_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.raid_action_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.transport_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.cfg_op_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.enc_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.notify_event_cmd.inter_taskid);
+}
+
+void leapraid_free_taskid(struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_io_req_tracker *io_tracker;
+ void *task_desc;
+
+ if (leapraid_is_fixed_taskid(adapter, taskid))
+ return;
+
+ if (taskid <= adapter->shost->can_queue) {
+ io_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ taskid);
+ if (!io_tracker) {
+ leapraid_check_pending_io(adapter);
+ return;
+ }
+
+ task_desc = leapraid_get_task_desc(adapter, taskid);
+ memset(task_desc, 0, LEAPRAID_REQUEST_SIZE);
+ leapraid_clear_io_tracker(adapter, io_tracker);
+ leapraid_check_pending_io(adapter);
+ adapter->mem_desc.taskid_to_uniq_tag[taskid - 1] = 0xFFFF;
+ }
+}
+
+static u8 leapraid_get_msix_idx(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd)
+{
+ if (scmd && adapter->shost->nr_hw_queues > 1) {
+ u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+
+ return blk_mq_unique_tag_to_hwq(tag);
+ }
+ return adapter->notification_desc.msix_cpu_map[raw_smp_processor_id()];
+}
+
+static u8 leapraid_get_and_set_msix_idx_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_io_req_tracker *io_tracker = NULL;
+
+ if (taskid <= adapter->shost->can_queue)
+ io_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ taskid);
+
+ if (!io_tracker)
+ return leapraid_get_msix_idx(adapter, NULL);
+
+ io_tracker->msix_io = leapraid_get_msix_idx(adapter, io_tracker->scmd);
+
+ return io_tracker->msix_io;
+}
+
+void leapraid_fire_scsi_io(struct leapraid_adapter *adapter, u16 taskid,
+ u16 handle)
+{
+ struct leapraid_atomic_req_desc desc;
+
+ desc.flg = LEAPRAID_REQ_DESC_FLG_SCSI_IO;
+ desc.msix_idx = leapraid_get_and_set_msix_idx_from_taskid(adapter,
+ taskid);
+ desc.taskid = cpu_to_le16(taskid);
+ writel((__force u32)cpu_to_le32(*((u32 *)&desc)),
+ &adapter->iomem_base->atomic_req_desc_post);
+}
+
+void leapraid_fire_hpr_task(struct leapraid_adapter *adapter, u16 taskid,
+ u16 msix_task)
+{
+ struct leapraid_atomic_req_desc desc;
+
+ desc.flg = LEAPRAID_REQ_DESC_FLG_HPR;
+ desc.msix_idx = msix_task;
+ desc.taskid = cpu_to_le16(taskid);
+ writel((__force u32)cpu_to_le32(*((u32 *)&desc)),
+ &adapter->iomem_base->atomic_req_desc_post);
+}
+
+void leapraid_fire_task(struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_atomic_req_desc desc;
+
+ desc.flg = LEAPRAID_REQ_DESC_FLG_DFLT_TYPE;
+ desc.msix_idx = leapraid_get_and_set_msix_idx_from_taskid(adapter,
+ taskid);
+ desc.taskid = cpu_to_le16(taskid);
+ writel((__force u32)cpu_to_le32(*((u32 *)&desc)),
+ &adapter->iomem_base->atomic_req_desc_post);
+}
+
+void leapraid_clean_active_scsi_cmds(struct leapraid_adapter *adapter)
+{
+ struct leapraid_io_req_tracker *io_tracker;
+ struct scsi_cmnd *scmd;
+ u16 taskid;
+
+ for (taskid = 1; taskid <= adapter->shost->can_queue; taskid++) {
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (!scmd)
+ continue;
+
+ io_tracker = leapraid_get_scmd_priv(scmd);
+ if (io_tracker && io_tracker->taskid == 0)
+ continue;
+
+ scsi_dma_unmap(scmd);
+ leapraid_clear_io_tracker(adapter, io_tracker);
+ if (!leapraid_pci_active(adapter) ||
+ adapter->reset_desc.adapter_reset_results != 0 ||
+ adapter->access_ctrl.adapter_thermal_alert ||
+ adapter->access_ctrl.host_removing)
+ scmd->result = DID_NO_CONNECT << 16;
+ else
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ scsi_done(scmd);
+ }
+}
+
+static void leapraid_clean_active_driver_cmd(
+ struct leapraid_driver_cmd *driver_cmd)
+{
+ if (driver_cmd->status & LEAPRAID_CMD_PENDING) {
+ driver_cmd->status |= LEAPRAID_CMD_RESET;
+ complete(&driver_cmd->done);
+ }
+}
+
+static void leapraid_clean_active_driver_cmds(struct leapraid_adapter *adapter)
+{
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.timestamp_sync_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.raid_action_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.driver_scsiio_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.tm_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.transport_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.enc_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.notify_event_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.cfg_op_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.ctl_cmd);
+
+ if (adapter->driver_cmds.scan_dev_cmd.status & LEAPRAID_CMD_PENDING) {
+ adapter->scan_dev_desc.scan_dev_failed = true;
+ adapter->driver_cmds.scan_dev_cmd.status |= LEAPRAID_CMD_RESET;
+ if (adapter->scan_dev_desc.driver_loading) {
+ adapter->scan_dev_desc.scan_start_failed =
+ LEAPRAID_ADAPTER_STATUS_INTERNAL_ERROR;
+ adapter->scan_dev_desc.scan_start = false;
+ } else {
+ complete(&adapter->driver_cmds.scan_dev_cmd.done);
+ }
+ }
+}
+
+static void leapraid_clean_active_cmds(struct leapraid_adapter *adapter)
+{
+ leapraid_clean_active_driver_cmds(adapter);
+ memset(adapter->dev_topo.pending_dev_add, 0,
+ adapter->dev_topo.pending_dev_add_sz);
+ memset(adapter->dev_topo.dev_removing, 0,
+ adapter->dev_topo.dev_removing_sz);
+ leapraid_clean_active_fw_evt(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+}
+
+static void leapraid_tgt_not_responding(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv = NULL;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ unsigned long flags = 0;
+ u32 adapter_state = 0;
+
+ if (adapter->access_ctrl.pcie_recovering)
+ return;
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state != LEAPRAID_DB_OPERATIONAL)
+ return;
+
+ if (test_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls))
+ return;
+
+ clear_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add);
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev && sas_dev->starget && sas_dev->starget->hostdata) {
+ starget_priv = sas_dev->starget->hostdata;
+ starget_priv->deleted = true;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (starget_priv)
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_tgt_rst_send(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv = NULL;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ struct leapraid_card_port *port = NULL;
+ u64 sas_address = 0;
+ unsigned long flags;
+ u32 adapter_state;
+
+ if (adapter->access_ctrl.pcie_recovering)
+ return;
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state != LEAPRAID_DB_OPERATIONAL)
+ return;
+
+ if (test_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls))
+ return;
+
+ clear_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add);
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev && sas_dev->starget && sas_dev->starget->hostdata) {
+ starget_priv = sas_dev->starget->hostdata;
+ starget_priv->deleted = true;
+ sas_address = sas_dev->sas_addr;
+ port = sas_dev->card_port;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (starget_priv) {
+ leapraid_ublk_io_dev(adapter, sas_address, port);
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ }
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static inline void leapraid_single_mpi_sg_append(struct leapraid_adapter *adapter,
+ void *sge, u32 flag_and_len,
+ dma_addr_t dma_addr)
+{
+ if (adapter->adapter_attr.use_32_dma_mask) {
+ ((struct leapraid_sge_simple32 *)sge)->flg_and_len =
+ cpu_to_le32(flag_and_len |
+ (LEAPRAID_SGE_FLG_32 |
+ LEAPRAID_SGE_FLG_SYSTEM_ADDR) <<
+ LEAPRAID_SGE_FLG_SHIFT);
+ ((struct leapraid_sge_simple32 *)sge)->addr =
+ cpu_to_le32(dma_addr);
+ } else {
+ ((struct leapraid_sge_simple64 *)sge)->flg_and_len =
+ cpu_to_le32(flag_and_len |
+ (LEAPRAID_SGE_FLG_64 |
+ LEAPRAID_SGE_FLG_SYSTEM_ADDR) <<
+ LEAPRAID_SGE_FLG_SHIFT);
+ ((struct leapraid_sge_simple64 *)sge)->addr =
+ cpu_to_le64(dma_addr);
+ }
+}
+
+static inline void leapraid_single_ieee_sg_append(void *sge, u8 flag,
+ u8 next_chain_offset,
+ u32 len,
+ dma_addr_t dma_addr)
+{
+ ((struct leapraid_chain64_ieee_sg *)sge)->flg = flag;
+ ((struct leapraid_chain64_ieee_sg *)sge)->next_chain_offset =
+ next_chain_offset;
+ ((struct leapraid_chain64_ieee_sg *)sge)->len = cpu_to_le32(len);
+ ((struct leapraid_chain64_ieee_sg *)sge)->addr = cpu_to_le64(dma_addr);
+}
+
+static void leapraid_build_nodata_mpi_sg(struct leapraid_adapter *adapter,
+ void *sge)
+{
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ (u32)((LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL |
+ LEAPRAID_SGE_FLG_SIMPLE_ONE) <<
+ LEAPRAID_SGE_FLG_SHIFT),
+ -1);
+}
+
+void leapraid_build_mpi_sg(struct leapraid_adapter *adapter, void *sge,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size)
+{
+ if (h2c_size && !c2h_size) {
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL |
+ LEAPRAID_SGE_FLG_H2C) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ h2c_size,
+ h2c_dma_addr);
+ } else if (!h2c_size && c2h_size) {
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ c2h_size,
+ c2h_dma_addr);
+ } else if (h2c_size && c2h_size) {
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_H2C) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ h2c_size,
+ h2c_dma_addr);
+ if (adapter->adapter_attr.use_32_dma_mask)
+ sge += sizeof(struct leapraid_sge_simple32);
+ else
+ sge += sizeof(struct leapraid_sge_simple64);
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ c2h_size,
+ c2h_dma_addr);
+ } else {
+ return leapraid_build_nodata_mpi_sg(adapter, sge);
+ }
+}
+
+void leapraid_build_ieee_nodata_sg(struct leapraid_adapter *adapter, void *sge)
+{
+ leapraid_single_ieee_sg_append(sge,
+ (LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR |
+ LEAPRAID_IEEE_SGE_FLG_EOL),
+ 0, 0, -1);
+}
+
+int leapraid_build_scmd_ieee_sg(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd, u16 taskid)
+{
+ struct leapraid_scsiio_req *scsiio_req;
+ struct leapraid_io_req_tracker *io_tracker;
+ struct scatterlist *scmd_sg_cur;
+ int sg_entries_left;
+ void *sg_entry_cur;
+ void *host_chain;
+ dma_addr_t host_chain_dma;
+ u8 host_chain_cursor;
+ u32 sg_entries_in_cur_seg;
+ u32 chain_offset_in_cur_seg;
+ u32 chain_len_in_cur_seg;
+
+ io_tracker = leapraid_get_scmd_priv(scmd);
+ scsiio_req = leapraid_get_task_desc(adapter, taskid);
+ scmd_sg_cur = scsi_sglist(scmd);
+ sg_entries_left = scsi_dma_map(scmd);
+ if (sg_entries_left < 0)
+ return -ENOMEM;
+ sg_entry_cur = &scsiio_req->sgl;
+ if (sg_entries_left <= LEAPRAID_SGL_INLINE_THRESHOLD)
+ goto fill_last_seg;
+
+ scsiio_req->chain_offset = LEAPRAID_CHAIN_OFFSET_DWORDS;
+ leapraid_single_ieee_sg_append(sg_entry_cur,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0, sg_dma_len(scmd_sg_cur),
+ sg_dma_address(scmd_sg_cur));
+ scmd_sg_cur = sg_next(scmd_sg_cur);
+ sg_entry_cur += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ sg_entries_left--;
+
+ host_chain_cursor = 0;
+ host_chain = io_tracker->chain +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_dma = io_tracker->chain_dma +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_cursor += 1;
+ for (;;) {
+ sg_entries_in_cur_seg =
+ (sg_entries_left <= LEAPRAID_MAX_SGES_IN_CHAIN) ?
+ sg_entries_left : LEAPRAID_MAX_SGES_IN_CHAIN;
+ chain_offset_in_cur_seg =
+ (sg_entries_left == (int)sg_entries_in_cur_seg) ?
+ 0 : sg_entries_in_cur_seg;
+ chain_len_in_cur_seg = sg_entries_in_cur_seg *
+ LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ if (chain_offset_in_cur_seg)
+ chain_len_in_cur_seg += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+
+ leapraid_single_ieee_sg_append(sg_entry_cur,
+ LEAPRAID_IEEE_SGE_FLG_CHAIN_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ chain_offset_in_cur_seg, chain_len_in_cur_seg,
+ host_chain_dma);
+ sg_entry_cur = host_chain;
+ if (!chain_offset_in_cur_seg)
+ goto fill_last_seg;
+
+ while (sg_entries_in_cur_seg) {
+ leapraid_single_ieee_sg_append(sg_entry_cur,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0, sg_dma_len(scmd_sg_cur),
+ sg_dma_address(scmd_sg_cur));
+ scmd_sg_cur = sg_next(scmd_sg_cur);
+ sg_entry_cur += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ sg_entries_left--;
+ sg_entries_in_cur_seg--;
+ }
+ host_chain = io_tracker->chain +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_dma = io_tracker->chain_dma +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_cursor += 1;
+ }
+
+fill_last_seg:
+ while (sg_entries_left > 0) {
+ u32 flags = LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR;
+ if (sg_entries_left == 1)
+ flags |= LEAPRAID_IEEE_SGE_FLG_EOL;
+ leapraid_single_ieee_sg_append(sg_entry_cur, flags,
+ 0, sg_dma_len(scmd_sg_cur),
+ sg_dma_address(scmd_sg_cur));
+ scmd_sg_cur = sg_next(scmd_sg_cur);
+ sg_entry_cur += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ sg_entries_left--;
+ }
+ return 0;
+}
+
+void leapraid_build_ieee_sg(struct leapraid_adapter *adapter, void *sge,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size)
+{
+ if (h2c_size && !c2h_size) {
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_EOL |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0,
+ h2c_size,
+ h2c_dma_addr);
+ } else if (!h2c_size && c2h_size) {
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_EOL |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0,
+ c2h_size,
+ c2h_dma_addr);
+ } else if (h2c_size && c2h_size) {
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0,
+ h2c_size,
+ h2c_dma_addr);
+ sge += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR |
+ LEAPRAID_IEEE_SGE_FLG_EOL,
+ 0,
+ c2h_size,
+ c2h_dma_addr);
+ } else {
+ return leapraid_build_ieee_nodata_sg(adapter, sge);
+ }
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv)
+{
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ if (tgt_priv->sas_dev)
+ leapraid_sdev_get(tgt_priv->sas_dev);
+
+ return tgt_priv->sas_dev;
+}
+
+struct leapraid_sas_dev *leapraid_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_from_tgt(adapter, tgt_priv);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+static struct leapraid_card_port *leapraid_get_port_by_id(
+ struct leapraid_adapter *adapter,
+ u8 port_id, bool skip_dirty)
+{
+ struct leapraid_card_port *port;
+ struct leapraid_card_port *dirty_port = NULL;
+
+ if (!adapter->adapter_attr.enable_mp)
+ port_id = LEAPRAID_DISABLE_MP_PORT_ID;
+
+ list_for_each_entry(port, &adapter->dev_topo.card_port_list, list) {
+ if (port->port_id != port_id)
+ continue;
+
+ if (!(port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY))
+ return port;
+
+ if (skip_dirty && !dirty_port)
+ dirty_port = port;
+ }
+
+ if (dirty_port)
+ return dirty_port;
+
+ if (unlikely(!adapter->adapter_attr.enable_mp)) {
+ port = kzalloc(sizeof(*port), GFP_ATOMIC);
+ if (!port)
+ return NULL;
+
+ port->port_id = LEAPRAID_DISABLE_MP_PORT_ID;
+ list_add_tail(&port->list, &adapter->dev_topo.card_port_list);
+ return port;
+ }
+
+ return NULL;
+}
+
+struct leapraid_vphy *leapraid_get_vphy_by_phy(struct leapraid_card_port *port,
+ u32 phy_seq_num)
+{
+ struct leapraid_vphy *vphy;
+
+ if (!port || !port->vphys_mask)
+ return NULL;
+
+ list_for_each_entry(vphy, &port->vphys_list, list) {
+ if (vphy->phy_mask & BIT(phy_seq_num))
+ return vphy;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->rphy == rphy) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_init_list,
+ list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->rphy == rphy) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ if (!port)
+ return NULL;
+
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->card_port == port) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_init_list,
+ list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->card_port == port) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+
+ if (!port)
+ return NULL;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter, sas_address,
+ port);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list)
+ if (sas_dev->hdl == hdl) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_init_list,
+ list)
+ if (sas_dev->hdl == hdl) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+void leapraid_sas_dev_remove(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ unsigned long flags;
+ bool del_from_list;
+
+ if (!sas_dev)
+ return;
+
+ del_from_list = false;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ del_from_list = true;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (del_from_list)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_sas_dev_remove_by_hdl(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ bool del_from_list;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ del_from_list = false;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev) {
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ del_from_list = true;
+ leapraid_sdev_put(sas_dev);
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (del_from_list) {
+ leapraid_remove_device(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+}
+
+void leapraid_sas_dev_remove_by_sas_address(struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct leapraid_card_port *port)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ bool del_from_list;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ del_from_list = false;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter, sas_address,
+ port);
+ if (sas_dev) {
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ del_from_list = true;
+ leapraid_sdev_put(sas_dev);
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (del_from_list) {
+ leapraid_remove_device(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+}
+
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_id(
+ struct leapraid_adapter *adapter, uint id, uint channel)
+{
+ struct leapraid_raid_volume *raid_volume;
+
+ list_for_each_entry(raid_volume, &adapter->dev_topo.raid_volume_list,
+ list) {
+ if (raid_volume->id == id &&
+ raid_volume->channel == channel) {
+ return raid_volume;
+ }
+ }
+
+ return NULL;
+}
+
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_raid_volume *raid_volume;
+
+ list_for_each_entry(raid_volume, &adapter->dev_topo.raid_volume_list,
+ list) {
+ if (raid_volume->hdl == hdl)
+ return raid_volume;
+ }
+
+ return NULL;
+}
+
+static struct leapraid_raid_volume *leapraid_raid_volume_find_by_wwid(
+ struct leapraid_adapter *adapter, u64 wwid)
+{
+ struct leapraid_raid_volume *raid_volume;
+
+ list_for_each_entry(raid_volume, &adapter->dev_topo.raid_volume_list,
+ list) {
+ if (raid_volume->wwid == wwid)
+ return raid_volume;
+ }
+
+ return NULL;
+}
+
+static void leapraid_raid_volume_add(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ list_add_tail(&raid_volume->list, &adapter->dev_topo.raid_volume_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+void leapraid_raid_volume_remove(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ list_del(&raid_volume->list);
+ kfree(raid_volume);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static struct leapraid_enc_node *leapraid_enc_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_enc_node *enc_dev;
+
+ list_for_each_entry(enc_dev, &adapter->dev_topo.enc_list, list) {
+ if (le16_to_cpu(enc_dev->pg0.enc_hdl) == hdl)
+ return enc_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_topo_node *leapraid_exp_find_by_sas_address(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port)
+{
+ struct leapraid_topo_node *sas_exp;
+
+ if (!port)
+ return NULL;
+
+ list_for_each_entry(sas_exp, &adapter->dev_topo.exp_list, list) {
+ if (sas_exp->sas_address == sas_address &&
+ sas_exp->card_port == port)
+ return sas_exp;
+ }
+
+ return NULL;
+}
+
+bool leapraid_scmd_find_by_tgt(struct leapraid_adapter *adapter, uint id,
+ uint channel)
+{
+ struct scsi_cmnd *scmd;
+ int taskid;
+
+ for (taskid = 1; taskid <= adapter->shost->can_queue; taskid++) {
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (!scmd)
+ continue;
+
+ if (scmd->device->id == id && scmd->device->channel == channel)
+ return true;
+ }
+
+ return false;
+}
+
+bool leapraid_scmd_find_by_lun(struct leapraid_adapter *adapter, uint id,
+ unsigned int lun, uint channel)
+{
+ struct scsi_cmnd *scmd;
+ int taskid;
+
+ for (taskid = 1; taskid <= adapter->shost->can_queue; taskid++) {
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (!scmd)
+ continue;
+
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel &&
+ scmd->device->lun == lun)
+ return true;
+ }
+
+ return false;
+}
+
+static struct leapraid_topo_node *leapraid_exp_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_topo_node *sas_exp;
+
+ list_for_each_entry(sas_exp, &adapter->dev_topo.exp_list, list) {
+ if (sas_exp->hdl == hdl)
+ return sas_exp;
+ }
+
+ return NULL;
+}
+
+static enum leapraid_card_port_checking_flg leapraid_get_card_port_feature(
+ struct leapraid_card_port *old_card_port,
+ struct leapraid_card_port *card_port,
+ struct leapraid_card_port_feature *feature)
+{
+ feature->dirty_flg =
+ old_card_port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY;
+ feature->same_addr =
+ old_card_port->sas_address == card_port->sas_address;
+ feature->exact_phy =
+ old_card_port->phy_mask == card_port->phy_mask;
+ feature->phy_overlap =
+ old_card_port->phy_mask & card_port->phy_mask;
+ feature->same_port =
+ old_card_port->port_id == card_port->port_id;
+ feature->cur_chking_old_port = old_card_port;
+
+ if (!feature->dirty_flg || !feature->same_addr)
+ return CARD_PORT_SKIP_CHECKING;
+
+ return CARD_PORT_FURTHER_CHECKING_NEEDED;
+}
+
+static int leapraid_process_card_port_feature(
+ struct leapraid_card_port_feature *feature)
+{
+ struct leapraid_card_port *old_card_port;
+
+ old_card_port = feature->cur_chking_old_port;
+ if (feature->exact_phy) {
+ feature->checking_state = SAME_PORT_WITH_NOTHING_CHANGED;
+ feature->expected_old_port = old_card_port;
+ return 1;
+ } else if (feature->phy_overlap) {
+ if (feature->same_port) {
+ feature->checking_state =
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS;
+ feature->expected_old_port = old_card_port;
+ } else if (feature->checking_state !=
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS) {
+ feature->checking_state =
+ SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS;
+ feature->expected_old_port = old_card_port;
+ }
+ } else {
+ if (feature->checking_state !=
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS &&
+ feature->checking_state !=
+ SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS) {
+ feature->checking_state = SAME_ADDR_ONLY;
+ feature->expected_old_port = old_card_port;
+ feature->same_addr_port_count++;
+ }
+ }
+
+ return 0;
+}
+
+static int leapraid_check_card_port(struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_card_port **expected_card_port,
+ int *count)
+{
+ struct leapraid_card_port *old_card_port;
+ struct leapraid_card_port_feature feature;
+
+ *expected_card_port = NULL;
+ memset(&feature, 0, sizeof(struct leapraid_card_port_feature));
+ feature.expected_old_port = NULL;
+ feature.same_addr_port_count = 0;
+ feature.checking_state = NEW_CARD_PORT;
+
+ list_for_each_entry(old_card_port, &adapter->dev_topo.card_port_list,
+ list) {
+ if (leapraid_get_card_port_feature(old_card_port, card_port,
+ &feature))
+ continue;
+
+ if (leapraid_process_card_port_feature(&feature))
+ break;
+ }
+
+ if (feature.checking_state == SAME_ADDR_ONLY)
+ *count = feature.same_addr_port_count;
+
+ *expected_card_port = feature.expected_old_port;
+ return feature.checking_state;
+}
+
+static void leapraid_del_phy_part_of_anther_port(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port_table, int index,
+ u8 port_count, int offset)
+{
+ struct leapraid_topo_node *card_topo_node;
+ bool found = false;
+ int i;
+
+ card_topo_node = &adapter->dev_topo.card;
+ for (i = 0; i < port_count; i++) {
+ if (i == index)
+ continue;
+
+ if (card_port_table[i].phy_mask & BIT(offset)) {
+ leapraid_transport_detach_phy_to_port(adapter,
+ card_topo_node,
+ &card_topo_node->card_phy[offset]);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ card_port_table[index].phy_mask |= BIT(offset);
+}
+
+static void leapraid_add_or_del_phys_from_existing_port(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_card_port *card_port_table,
+ int index, u8 port_count)
+{
+ struct leapraid_topo_node *card_topo_node;
+ u32 phy_mask_diff;
+ u32 offset = 0;
+
+ card_topo_node = &adapter->dev_topo.card;
+ phy_mask_diff = card_port->phy_mask ^
+ card_port_table[index].phy_mask;
+ for (offset = 0; offset < adapter->dev_topo.card.phys_num; offset++) {
+ if (!(phy_mask_diff & BIT(offset)))
+ continue;
+
+ if (!(card_port_table[index].phy_mask & BIT(offset))) {
+ leapraid_del_phy_part_of_anther_port(adapter,
+ card_port_table,
+ index, port_count,
+ offset);
+ continue;
+ }
+
+ if (card_topo_node->card_phy[offset].phy_is_assigned)
+ leapraid_transport_detach_phy_to_port(adapter,
+ card_topo_node,
+ &card_topo_node->card_phy[offset]);
+
+ leapraid_transport_attach_phy_to_port(adapter,
+ card_topo_node, &card_topo_node->card_phy[offset],
+ card_port->sas_address,
+ card_port);
+ }
+}
+
+struct leapraid_sas_dev *leapraid_get_next_sas_dev_from_init_list(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_sas_dev *sas_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ if (!list_empty(&adapter->dev_topo.sas_dev_init_list)) {
+ sas_dev = list_first_entry(&adapter->dev_topo.sas_dev_init_list,
+ struct leapraid_sas_dev, list);
+ leapraid_sdev_get(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+static bool leapraid_check_boot_dev_internal(u64 sas_address, u64 dev_name,
+ u64 enc_lid, u16 slot,
+ struct leapraid_boot_dev *boot_dev,
+ u8 form)
+{
+ if (!boot_dev)
+ return false;
+
+ switch (form & LEAPRAID_BOOTDEV_FORM_MASK) {
+ case LEAPRAID_BOOTDEV_FORM_SAS_WWID:
+ if (!sas_address)
+ return false;
+
+ return sas_address ==
+ le64_to_cpu(((struct leapraid_boot_dev_format_sas_wwid *)(
+ boot_dev->pg_dev))->sas_addr);
+ case LEAPRAID_BOOTDEV_FORM_ENC_SLOT:
+ if (!enc_lid)
+ return false;
+
+ return (enc_lid == le64_to_cpu(((struct leapraid_boot_dev_format_enc_slot *)(
+ boot_dev->pg_dev))->enc_lid) &&
+ slot == le16_to_cpu(((struct leapraid_boot_dev_format_enc_slot *)(
+ boot_dev->pg_dev))->slot_num));
+ case LEAPRAID_BOOTDEV_FORM_DEV_NAME:
+ if (!dev_name)
+ return false;
+
+ return dev_name == le64_to_cpu(((struct leapraid_boot_dev_format_dev_name *)(
+ boot_dev->pg_dev))->dev_name);
+ case LEAPRAID_BOOTDEV_FORM_NONE:
+ default:
+ return false;
+ }
+}
+
+static void leapraid_try_set_boot_dev(struct leapraid_boot_dev *boot_dev,
+ u64 sas_addr, u64 dev_name,
+ u64 enc_lid, u16 slot,
+ void *dev, u32 chnl)
+{
+ bool matched = false;
+
+ if (boot_dev->dev)
+ return;
+
+ matched = leapraid_check_boot_dev_internal(sas_addr, dev_name, enc_lid,
+ slot, boot_dev,
+ boot_dev->form);
+ if (matched) {
+ boot_dev->dev = dev;
+ boot_dev->chnl = chnl;
+ }
+}
+
+static void leapraid_check_boot_dev(struct leapraid_adapter *adapter,
+ void *dev, u32 chnl)
+{
+ u64 sas_addr = 0;
+ u64 dev_name = 0;
+ u64 enc_lid = 0;
+ u16 slot = 0;
+
+ if (!adapter->scan_dev_desc.driver_loading)
+ return;
+
+ switch (chnl) {
+ case RAID_CHANNEL:
+ {
+ struct leapraid_raid_volume *raid_volume =
+ (struct leapraid_raid_volume *)dev;
+
+ sas_addr = raid_volume->wwid;
+ break;
+ }
+ default:
+ {
+ struct leapraid_sas_dev *sas_dev =
+ (struct leapraid_sas_dev *)dev;
+ sas_addr = sas_dev->sas_addr;
+ dev_name = sas_dev->dev_name;
+ enc_lid = sas_dev->enc_lid;
+ slot = sas_dev->slot;
+ break;
+ }
+ }
+
+ leapraid_try_set_boot_dev(&adapter->boot_devs.requested_boot_dev,
+ sas_addr, dev_name, enc_lid,
+ slot, dev, chnl);
+ leapraid_try_set_boot_dev(&adapter->boot_devs.requested_alt_boot_dev,
+ sas_addr, dev_name, enc_lid,
+ slot, dev, chnl);
+ leapraid_try_set_boot_dev(&adapter->boot_devs.current_boot_dev,
+ sas_addr, dev_name, enc_lid,
+ slot, dev, chnl);
+}
+
+static void leapraid_build_and_fire_cfg_req(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep)
+{
+ struct leapraid_cfg_req *local_leap_cfg_req;
+
+ memset(leap_mpi_cfgp_rep, 0, sizeof(struct leapraid_cfg_rep));
+ memset((void *)(&adapter->driver_cmds.cfg_op_cmd.reply), 0,
+ sizeof(struct leapraid_cfg_rep));
+ adapter->driver_cmds.cfg_op_cmd.status = LEAPRAID_CMD_PENDING;
+ local_leap_cfg_req = leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.cfg_op_cmd.inter_taskid);
+ memcpy(local_leap_cfg_req, leap_mpi_cfgp_req,
+ sizeof(struct leapraid_cfg_req));
+ init_completion(&adapter->driver_cmds.cfg_op_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.cfg_op_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.cfg_op_cmd.done,
+ LEAPRAID_CFG_OP_TIMEOUT * HZ);
+}
+
+static int leapraid_req_cfg_func(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep,
+ void *target_cfg_pg, void *real_cfg_pg_addr,
+ u16 target_real_cfg_pg_sz)
+{
+ u32 adapter_status = UINT_MAX;
+ bool issue_reset = false;
+ u8 retry_cnt;
+ int rc;
+
+ retry_cnt = 0;
+ mutex_lock(&adapter->driver_cmds.cfg_op_cmd.mutex);
+retry:
+ if (retry_cnt) {
+ if (retry_cnt > LEAPRAID_CFG_REQ_RETRY_TIMES) {
+ rc = -EFAULT;
+ goto out;
+ }
+ dev_warn(&adapter->pdev->dev,
+ "cfg-req: retry request, cnt=%u\n", retry_cnt);
+ }
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: adapter not operational\n");
+ goto out;
+ }
+
+ leapraid_build_and_fire_cfg_req(adapter, leap_mpi_cfgp_req,
+ leap_mpi_cfgp_rep);
+ if (!(adapter->driver_cmds.cfg_op_cmd.status & LEAPRAID_CMD_DONE)) {
+ retry_cnt++;
+ if (adapter->driver_cmds.cfg_op_cmd.status &
+ LEAPRAID_CMD_RESET) {
+ dev_warn(&adapter->pdev->dev,
+ "cfg-req: cmd gg due to hard reset\n");
+ goto retry;
+ }
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering) {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: cmd not done during %s, skip reset\n",
+ adapter->access_ctrl.shost_recovering ?
+ "shost recovery" : "pcie recovery");
+ issue_reset = false;
+ rc = -EFAULT;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: cmd timeout, issuing hard reset\n");
+ issue_reset = true;
+ }
+
+ goto out;
+ }
+
+ if (adapter->driver_cmds.cfg_op_cmd.status &
+ LEAPRAID_CMD_REPLY_VALID) {
+ memcpy(leap_mpi_cfgp_rep,
+ (void *)(&adapter->driver_cmds.cfg_op_cmd.reply),
+ sizeof(struct leapraid_cfg_rep));
+ adapter_status = le16_to_cpu(
+ leap_mpi_cfgp_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status == LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ if (target_cfg_pg && real_cfg_pg_addr &&
+ target_real_cfg_pg_sz)
+ if (leap_mpi_cfgp_req->action ==
+ LEAPRAID_CFG_ACT_PAGE_READ_CUR)
+ memcpy(target_cfg_pg,
+ real_cfg_pg_addr,
+ target_real_cfg_pg_sz);
+ } else {
+ if (adapter_status !=
+ LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE)
+ dev_err(&adapter->pdev->dev,
+ "cfg-rep: adapter_status=0x%x\n",
+ adapter_status);
+ rc = -EFAULT;
+ }
+ } else {
+ dev_err(&adapter->pdev->dev, "cfg-rep: reply invalid\n");
+ rc = -EFAULT;
+ }
+
+out:
+ adapter->driver_cmds.cfg_op_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.cfg_op_cmd.mutex);
+ if (issue_reset) {
+ if (adapter->scan_dev_desc.first_scan_dev_fired) {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d cfg-req: failure, issuing reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ rc = -EFAULT;
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "cfg-req: cmd gg during init, skip reset\n");
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+
+static int leapraid_request_cfg_pg_header(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep)
+{
+ return leapraid_req_cfg_func(adapter, leap_mpi_cfgp_req,
+ leap_mpi_cfgp_rep, NULL, NULL, 0);
+}
+
+static int leapraid_request_cfg_pg(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep,
+ void *target_cfg_pg, void *real_cfg_pg_addr,
+ u16 target_real_cfg_pg_sz)
+{
+ return leapraid_req_cfg_func(adapter, leap_mpi_cfgp_req,
+ leap_mpi_cfgp_rep, target_cfg_pg,
+ real_cfg_pg_addr, target_real_cfg_pg_sz);
+}
+
+int leapraid_op_config_page(struct leapraid_adapter *adapter,
+ void *target_cfg_pg, union cfg_param_1 cfgp1,
+ union cfg_param_2 cfgp2,
+ enum config_page_action cfg_op)
+{
+ struct leapraid_cfg_req leap_mpi_cfgp_req;
+ struct leapraid_cfg_rep leap_mpi_cfgp_rep;
+ u16 real_cfg_pg_sz = 0;
+ void *real_cfg_pg_addr = NULL;
+ dma_addr_t real_cfg_pg_dma = 0;
+ u32 __page_size;
+ int rc;
+
+ memset(&leap_mpi_cfgp_req, 0, sizeof(struct leapraid_cfg_req));
+ leap_mpi_cfgp_req.func = LEAPRAID_FUNC_CONFIG_OP;
+ leap_mpi_cfgp_req.action = LEAPRAID_CFG_ACT_PAGE_HEADER;
+
+ switch (cfg_op) {
+ case GET_BIOS_PG3:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_BIOS;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_BIOS3;
+ __page_size = sizeof(struct leapraid_bios_page3);
+ break;
+ case GET_BIOS_PG2:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_BIOS;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_BIOS2;
+ __page_size = sizeof(struct leapraid_bios_page2);
+ break;
+ case GET_SAS_DEVICE_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_DEV;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_DEV0;
+ __page_size = sizeof(struct leapraid_sas_dev_p0);
+ break;
+ case GET_SAS_IOUNIT_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type =
+ LEAPRAID_CFG_EXTPT_SAS_IO_UNIT;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_IOUNIT0;
+ __page_size = cfgp1.size;
+ break;
+ case GET_SAS_IOUNIT_PG1:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type =
+ LEAPRAID_CFG_EXTPT_SAS_IO_UNIT;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_IOUNIT1;
+ __page_size = cfgp1.size;
+ break;
+ case GET_SAS_EXPANDER_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_EXP;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_EXP0;
+ __page_size = sizeof(struct leapraid_exp_p0);
+ break;
+ case GET_SAS_EXPANDER_PG1:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_EXP;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_EXP1;
+ __page_size = sizeof(struct leapraid_exp_p1);
+ break;
+ case GET_SAS_ENCLOSURE_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_ENC;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_ENC0;
+ __page_size = sizeof(struct leapraid_enc_p0);
+ break;
+ case GET_PHY_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_PHY;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_PHY0;
+ __page_size = sizeof(struct leapraid_sas_phy_p0);
+ break;
+ case GET_RAID_VOLUME_PG0:
+ leap_mpi_cfgp_req.header.page_type =
+ LEAPRAID_CFG_PT_RAID_VOLUME;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_VOL0;
+ __page_size = cfgp1.size;
+ break;
+ case GET_RAID_VOLUME_PG1:
+ leap_mpi_cfgp_req.header.page_type =
+ LEAPRAID_CFG_PT_RAID_VOLUME;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_VOL1;
+ __page_size = sizeof(struct leapraid_raidvol_p1);
+ break;
+ case GET_PHY_DISK_PG0:
+ leap_mpi_cfgp_req.header.page_type =
+ LEAPRAID_CFG_PT_RAID_PHYSDISK;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_PD0;
+ __page_size = sizeof(struct leapraid_raidpd_p0);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "unsupported config page action=%d!\n", cfg_op);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ leapraid_build_nodata_mpi_sg(adapter,
+ &leap_mpi_cfgp_req.page_buf_sge);
+ rc = leapraid_request_cfg_pg_header(adapter,
+ &leap_mpi_cfgp_req,
+ &leap_mpi_cfgp_rep);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: header failed rc=%dn", rc);
+ goto out;
+ }
+
+ if (cfg_op == GET_SAS_DEVICE_PG0 ||
+ cfg_op == GET_SAS_EXPANDER_PG0 ||
+ cfg_op == GET_SAS_ENCLOSURE_PG0 ||
+ cfg_op == GET_RAID_VOLUME_PG1)
+ leap_mpi_cfgp_req.page_addr = cpu_to_le32(cfgp1.form |
+ cfgp2.handle);
+ else if (cfg_op == GET_PHY_DISK_PG0)
+ leap_mpi_cfgp_req.page_addr = cpu_to_le32(cfgp1.form |
+ cfgp2.form_specific);
+ else if (cfg_op == GET_RAID_VOLUME_PG0)
+ leap_mpi_cfgp_req.page_addr =
+ cpu_to_le32(cfgp2.handle |
+ LEAPRAID_RAID_VOL_CFG_PGAD_HDL);
+ else if (cfg_op == GET_SAS_EXPANDER_PG1)
+ leap_mpi_cfgp_req.page_addr =
+ cpu_to_le32(cfgp2.handle |
+ (cfgp1.phy_number <<
+ LEAPRAID_SAS_EXP_CFG_PGAD_PHYNUM_SHIFT) |
+ LEAPRAID_SAS_EXP_CFG_PGAD_HDL_PHY_NUM);
+ else if (cfg_op == GET_PHY_PG0)
+ leap_mpi_cfgp_req.page_addr = cpu_to_le32(cfgp1.phy_number |
+ LEAPRAID_SAS_PHY_CFG_PGAD_PHY_NUMBER);
+
+ leap_mpi_cfgp_req.action = LEAPRAID_CFG_ACT_PAGE_READ_CUR;
+
+ leap_mpi_cfgp_req.header.page_num = leap_mpi_cfgp_rep.header.page_num;
+ leap_mpi_cfgp_req.header.page_type =
+ leap_mpi_cfgp_rep.header.page_type;
+ leap_mpi_cfgp_req.header.page_len = leap_mpi_cfgp_rep.header.page_len;
+ leap_mpi_cfgp_req.ext_page_len = leap_mpi_cfgp_rep.ext_page_len;
+ leap_mpi_cfgp_req.ext_page_type = leap_mpi_cfgp_rep.ext_page_type;
+
+ real_cfg_pg_sz = (leap_mpi_cfgp_req.header.page_len) ?
+ leap_mpi_cfgp_req.header.page_len * 4 :
+ le16_to_cpu(leap_mpi_cfgp_rep.ext_page_len) * 4;
+ real_cfg_pg_addr = dma_alloc_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz,
+ &real_cfg_pg_dma,
+ GFP_KERNEL);
+ if (!real_cfg_pg_addr) {
+ dev_err(&adapter->pdev->dev, "cfg-req: dma alloc failed\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (leap_mpi_cfgp_req.action == LEAPRAID_CFG_ACT_PAGE_WRITE_CUR) {
+ leapraid_single_mpi_sg_append(adapter,
+ &leap_mpi_cfgp_req.page_buf_sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL |
+ LEAPRAID_SGE_FLG_H2C) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ real_cfg_pg_sz,
+ real_cfg_pg_dma);
+ memcpy(real_cfg_pg_addr, target_cfg_pg,
+ min_t(u16, real_cfg_pg_sz, __page_size));
+ } else {
+ memset(target_cfg_pg, 0, __page_size);
+ leapraid_single_mpi_sg_append(adapter,
+ &leap_mpi_cfgp_req.page_buf_sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ real_cfg_pg_sz,
+ real_cfg_pg_dma);
+ memset(real_cfg_pg_addr, 0,
+ min_t(u16, real_cfg_pg_sz, __page_size));
+ }
+
+ rc = leapraid_request_cfg_pg(adapter,
+ &leap_mpi_cfgp_req,
+ &leap_mpi_cfgp_rep,
+ target_cfg_pg,
+ real_cfg_pg_addr,
+ min_t(u16, real_cfg_pg_sz, __page_size));
+ if (rc) {
+ u32 adapter_status;
+
+ adapter_status = le16_to_cpu(leap_mpi_cfgp_rep.adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status !=
+ LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE)
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: rc=%d, pg_info: 0x%x, 0x%x, %d\n",
+ rc, leap_mpi_cfgp_req.header.page_type,
+ leap_mpi_cfgp_req.ext_page_type,
+ leap_mpi_cfgp_req.header.page_num);
+ }
+
+ if (real_cfg_pg_addr)
+ dma_free_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz,
+ real_cfg_pg_addr,
+ real_cfg_pg_dma);
+out:
+ return rc;
+}
+
+static int leapraid_cfg_get_volume_hdl_dispatch(
+ struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *cfg_req,
+ struct leapraid_cfg_rep *cfg_rep,
+ struct leapraid_raid_cfg_p0 *raid_cfg_p0,
+ void *real_cfg_pg_addr,
+ u16 real_cfg_pg_sz,
+ u16 raid_cfg_p0_sz,
+ u16 pd_hdl, u16 *vol_hdl)
+{
+ u16 phys_disk_dev_hdl;
+ u16 adapter_status;
+ u16 element_type;
+ int config_num;
+ int rc, i;
+
+ config_num = 0xff;
+ while (true) {
+ cfg_req->page_addr =
+ cpu_to_le32(config_num +
+ LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP);
+ rc = leapraid_request_cfg_pg(
+ adapter, cfg_req, cfg_rep,
+ raid_cfg_p0, real_cfg_pg_addr,
+ min_t(u16, real_cfg_pg_sz, raid_cfg_p0_sz));
+ adapter_status = le16_to_cpu(cfg_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (rc) {
+ if (adapter_status ==
+ LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE) {
+ *vol_hdl = 0;
+ return 0;
+ }
+ return rc;
+ }
+
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS)
+ return -1;
+
+ for (i = 0; i < raid_cfg_p0->elements_num; i++) {
+ element_type =
+ le16_to_cpu(raid_cfg_p0->cfg_element[i].element_flg) &
+ LEAPRAID_RAIDCFG_P0_EFLG_MASK_ELEMENT_TYPE;
+
+ switch (element_type) {
+ case LEAPRAID_RAIDCFG_P0_EFLG_VOL_PHYS_DISK_ELEMENT:
+ case LEAPRAID_RAIDCFG_P0_EFLG_OCE_ELEMENT:
+ phys_disk_dev_hdl =
+ le16_to_cpu(raid_cfg_p0->cfg_element[i]
+ .phys_disk_dev_hdl);
+ if (phys_disk_dev_hdl == pd_hdl) {
+ *vol_hdl =
+ le16_to_cpu
+ (raid_cfg_p0->cfg_element[i]
+ .vol_dev_hdl);
+ return 0;
+ }
+ break;
+
+ case LEAPRAID_RAIDCFG_P0_EFLG_HOT_SPARE_ELEMENT:
+ *vol_hdl = 0;
+ return 0;
+ default:
+ break;
+ }
+ }
+ config_num = raid_cfg_p0->cfg_num;
+ }
+ return 0;
+}
+
+int leapraid_cfg_get_volume_hdl(struct leapraid_adapter *adapter,
+ u16 pd_hdl, u16 *vol_hdl)
+{
+ struct leapraid_raid_cfg_p0 *raid_cfg_p0 = NULL;
+ struct leapraid_cfg_req cfg_req;
+ struct leapraid_cfg_rep cfg_rep;
+ dma_addr_t real_cfg_pg_dma = 0;
+ void *real_cfg_pg_addr = NULL;
+ u16 real_cfg_pg_sz = 0;
+ int rc, raid_cfg_p0_sz;
+
+ *vol_hdl = 0;
+ memset(&cfg_req, 0, sizeof(struct leapraid_cfg_req));
+ cfg_req.func = LEAPRAID_FUNC_CONFIG_OP;
+ cfg_req.action = LEAPRAID_CFG_ACT_PAGE_HEADER;
+ cfg_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ cfg_req.ext_page_type = LEAPRAID_CFG_EXTPT_RAID_CONFIG;
+ cfg_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_VOL0;
+
+ leapraid_build_nodata_mpi_sg(adapter, &cfg_req.page_buf_sge);
+ rc = leapraid_request_cfg_pg_header(adapter, &cfg_req, &cfg_rep);
+ if (rc)
+ goto out;
+
+ cfg_req.action = LEAPRAID_CFG_ACT_PAGE_READ_CUR;
+ raid_cfg_p0_sz = le16_to_cpu(cfg_rep.ext_page_len) *
+ LEAPRAID_CFG_UNIT_SIZE;
+ raid_cfg_p0 = kmalloc(raid_cfg_p0_sz, GFP_KERNEL);
+ if (!raid_cfg_p0) {
+ rc = -1;
+ goto out;
+ }
+
+ real_cfg_pg_sz = (cfg_req.header.page_len) ?
+ cfg_req.header.page_len * LEAPRAID_CFG_UNIT_SIZE :
+ le16_to_cpu(cfg_rep.ext_page_len) * LEAPRAID_CFG_UNIT_SIZE;
+
+ real_cfg_pg_addr = dma_alloc_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz, &real_cfg_pg_dma,
+ GFP_KERNEL);
+ if (!real_cfg_pg_addr) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memset(raid_cfg_p0, 0, raid_cfg_p0_sz);
+ leapraid_single_mpi_sg_append(adapter,
+ &cfg_req.page_buf_sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ real_cfg_pg_sz,
+ real_cfg_pg_dma);
+ memset(real_cfg_pg_addr, 0,
+ min_t(u16, real_cfg_pg_sz, raid_cfg_p0_sz));
+
+ rc = leapraid_cfg_get_volume_hdl_dispatch(adapter,
+ &cfg_req, &cfg_rep,
+ raid_cfg_p0,
+ real_cfg_pg_addr,
+ real_cfg_pg_sz,
+ raid_cfg_p0_sz,
+ pd_hdl, vol_hdl);
+
+out:
+ if (real_cfg_pg_addr)
+ dma_free_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz, real_cfg_pg_addr,
+ real_cfg_pg_dma);
+ kfree(raid_cfg_p0);
+ return rc;
+}
+
+static int leapraid_get_adapter_phys(struct leapraid_adapter *adapter,
+ u8 *nr_phys)
+{
+ struct leapraid_sas_io_unit_p0 sas_io_unit_page0;
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ int rc = 0;
+
+ *nr_phys = 0;
+ cfgp1.size = sizeof(struct leapraid_sas_io_unit_p0);
+ rc = leapraid_op_config_page(adapter, &sas_io_unit_page0, cfgp1,
+ cfgp2, GET_SAS_IOUNIT_PG0);
+ if (rc)
+ return rc;
+
+ *nr_phys = sas_io_unit_page0.phy_num;
+
+ return 0;
+}
+
+static int leapraid_cfg_get_number_pds(struct leapraid_adapter *adapter,
+ u16 hdl, u8 *num_pds)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_raidvol_p0 raidvol_p0;
+ int rc;
+
+ *num_pds = 0;
+ cfgp1.size = sizeof(struct leapraid_raidvol_p0);
+ cfgp2.handle = hdl;
+ rc = leapraid_op_config_page(adapter, &raidvol_p0, cfgp1,
+ cfgp2, GET_RAID_VOLUME_PG0);
+
+ if (!rc)
+ *num_pds = raidvol_p0.num_phys_disks;
+
+ return rc;
+}
+
+int leapraid_cfg_get_volume_wwid(struct leapraid_adapter *adapter,
+ u16 vol_hdl, u64 *wwid)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_raidvol_p1 raidvol_p1;
+ int rc;
+
+ *wwid = 0;
+ cfgp1.form = LEAPRAID_RAID_VOL_CFG_PGAD_HDL;
+ cfgp2.handle = vol_hdl;
+ rc = leapraid_op_config_page(adapter, &raidvol_p1, cfgp1,
+ cfgp2, GET_RAID_VOLUME_PG1);
+ if (!rc)
+ *wwid = le64_to_cpu(raidvol_p1.wwid);
+
+ return rc;
+}
+
+static int leapraid_get_sas_io_unit_page0(struct leapraid_adapter *adapter,
+ struct leapraid_sas_io_unit_p0 *sas_io_unit_p0,
+ u16 sas_iou_pg0_sz)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+
+ cfgp1.size = sas_iou_pg0_sz;
+ return leapraid_op_config_page(adapter, sas_io_unit_p0, cfgp1,
+ cfgp2, GET_SAS_IOUNIT_PG0);
+}
+
+static int leapraid_get_sas_address(struct leapraid_adapter *adapter,
+ u16 hdl, u64 *sas_address)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+
+ *sas_address = 0;
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0)))
+ return -ENXIO;
+
+ if (hdl <= adapter->dev_topo.card.phys_num &&
+ (!(le32_to_cpu(sas_dev_p0.dev_info) & LEAPRAID_DEVTYP_SEP)))
+ *sas_address = adapter->dev_topo.card.sas_address;
+ else
+ *sas_address = le64_to_cpu(sas_dev_p0.sas_address);
+
+ return 0;
+}
+
+int leapraid_get_volume_cap(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_raidvol_p0 *raidvol_p0;
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_raidpd_p0 raidpd_p0;
+ u8 num_pds;
+ u16 sz;
+
+ if ((leapraid_cfg_get_number_pds(adapter, raid_volume->hdl,
+ &num_pds)) || !num_pds)
+ return -EFAULT;
+
+ raid_volume->pd_num = num_pds;
+ sz = offsetof(struct leapraid_raidvol_p0, phys_disk) +
+ (num_pds * sizeof(struct leapraid_raidvol0_phys_disk));
+ raidvol_p0 = kzalloc(sz, GFP_KERNEL);
+ if (!raidvol_p0)
+ return -EFAULT;
+
+ cfgp1.size = sz;
+ cfgp2.handle = raid_volume->hdl;
+ if ((leapraid_op_config_page(adapter, raidvol_p0, cfgp1, cfgp2,
+ GET_RAID_VOLUME_PG0))) {
+ kfree(raidvol_p0);
+ return -EFAULT;
+ }
+
+ raid_volume->vol_type = raidvol_p0->volume_type;
+ cfgp1.form = LEAPRAID_PHYSDISK_CFG_PGAD_PHYSDISKNUM;
+ cfgp2.form_specific = raidvol_p0->phys_disk[0].phys_disk_num;
+ if (!(leapraid_op_config_page(adapter, &raidpd_p0, cfgp1, cfgp2,
+ GET_PHY_DISK_PG0))) {
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = le16_to_cpu(raidpd_p0.dev_hdl);
+ if (!(leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0))) {
+ raid_volume->dev_info =
+ le32_to_cpu(sas_dev_p0.dev_info);
+ }
+ }
+
+ kfree(raidvol_p0);
+ return 0;
+}
+
+static void leapraid_fw_log_work(struct work_struct *work)
+{
+ struct leapraid_adapter *adapter = container_of(work,
+ struct leapraid_adapter, fw_log_desc.fw_log_work.work);
+ struct leapraid_fw_log_info *infom;
+ unsigned long flags;
+
+ infom = (struct leapraid_fw_log_info *)(adapter->fw_log_desc.fw_log_buffer +
+ LEAPRAID_SYS_LOG_BUF_SIZE);
+
+ if (adapter->fw_log_desc.fw_log_init_flag == 0) {
+ infom->user_position =
+ leapraid_readl(&adapter->iomem_base->host_log_buf_pos);
+ infom->adapter_position =
+ leapraid_readl(&adapter->iomem_base->adapter_log_buf_pos);
+ adapter->fw_log_desc.fw_log_init_flag++;
+ }
+
+ writel(infom->user_position, &adapter->iomem_base->host_log_buf_pos);
+ infom->adapter_position =
+ leapraid_readl(&adapter->iomem_base->adapter_log_buf_pos);
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->fw_log_desc.fw_log_wq)
+ queue_delayed_work(adapter->fw_log_desc.fw_log_wq,
+ &adapter->fw_log_desc.fw_log_work,
+ msecs_to_jiffies(LEAPRAID_PCIE_LOG_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+void leapraid_fw_log_stop(struct leapraid_adapter *adapter)
+{
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ wq = adapter->fw_log_desc.fw_log_wq;
+ adapter->fw_log_desc.fw_log_wq = NULL;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work_sync(&adapter->fw_log_desc.fw_log_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+void leapraid_fw_log_start(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return;
+
+ if (adapter->fw_log_desc.fw_log_wq)
+ return;
+
+ INIT_DELAYED_WORK(&adapter->fw_log_desc.fw_log_work,
+ leapraid_fw_log_work);
+ snprintf(adapter->fw_log_desc.fw_log_wq_name,
+ sizeof(adapter->fw_log_desc.fw_log_wq_name),
+ "poll_%s%u_fw_log",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+ adapter->fw_log_desc.fw_log_wq =
+ create_singlethread_workqueue(
+ adapter->fw_log_desc.fw_log_wq_name);
+ if (!adapter->fw_log_desc.fw_log_wq)
+ return;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->fw_log_desc.fw_log_wq)
+ queue_delayed_work(adapter->fw_log_desc.fw_log_wq,
+ &adapter->fw_log_desc.fw_log_work,
+ msecs_to_jiffies(LEAPRAID_PCIE_LOG_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+static void leapraid_timestamp_sync(struct leapraid_adapter *adapter)
+{
+ struct leapraid_io_unit_ctrl_req *io_unit_ctrl_req;
+ ktime_t current_time;
+ bool issue_reset = false;
+ u64 time_stamp = 0;
+
+ mutex_lock(&adapter->driver_cmds.timestamp_sync_cmd.mutex);
+ adapter->driver_cmds.timestamp_sync_cmd.status = LEAPRAID_CMD_PENDING;
+ io_unit_ctrl_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.timestamp_sync_cmd.inter_taskid);
+ memset(io_unit_ctrl_req, 0, sizeof(struct leapraid_io_unit_ctrl_req));
+ io_unit_ctrl_req->func = LEAPRAID_FUNC_SAS_IO_UNIT_CTRL;
+ io_unit_ctrl_req->op = LEAPRAID_SAS_OP_SET_PARAMETER;
+ io_unit_ctrl_req->adapter_para = LEAPRAID_SET_PARAMETER_SYNC_TIMESTAMP;
+
+ current_time = ktime_get_real();
+ time_stamp = ktime_to_ms(current_time);
+
+ io_unit_ctrl_req->adapter_para_value =
+ cpu_to_le32(time_stamp & 0xFFFFFFFF);
+ io_unit_ctrl_req->adapter_para_value2 =
+ cpu_to_le32(time_stamp >> 32);
+ init_completion(&adapter->driver_cmds.timestamp_sync_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.timestamp_sync_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.timestamp_sync_cmd.done,
+ LEAPRAID_TIMESTAMP_SYNC_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.timestamp_sync_cmd.status &
+ LEAPRAID_CMD_DONE))
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.timestamp_sync_cmd.status);
+
+ if (issue_reset) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+
+ adapter->driver_cmds.timestamp_sync_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.timestamp_sync_cmd.mutex);
+}
+
+static bool leapraid_should_skip_fault_check(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+ bool skip;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ skip = adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering ||
+ adapter->access_ctrl.host_removing;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+
+ return skip;
+}
+
+static void leapraid_check_scheduled_fault_work(struct work_struct *work)
+{
+ struct leapraid_adapter *adapter;
+ unsigned long flags;
+ u32 adapter_state;
+ int rc;
+
+ adapter = container_of(work, struct leapraid_adapter,
+ reset_desc.fault_reset_work.work);
+
+ if (leapraid_should_skip_fault_check(adapter))
+ goto scheduled_timer;
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state != LEAPRAID_DB_OPERATIONAL) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ dev_warn(&adapter->pdev->dev, "%s: hard reset: %s\n",
+ __func__, (rc == 0) ? "success" : "failed");
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (rc && adapter_state != LEAPRAID_DB_OPERATIONAL)
+ return;
+ }
+
+ if (++adapter->timestamp_sync_cnt >=
+ LEAPRAID_TIMESTAMP_SYNC_INTERVAL) {
+ adapter->timestamp_sync_cnt = 0;
+ leapraid_timestamp_sync(adapter);
+ }
+
+scheduled_timer:
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->reset_desc.fault_reset_wq)
+ queue_delayed_work(adapter->reset_desc.fault_reset_wq,
+ &adapter->reset_desc.fault_reset_work,
+ msecs_to_jiffies(LEAPRAID_FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+void leapraid_check_scheduled_fault_start(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+
+ if (adapter->reset_desc.fault_reset_wq)
+ return;
+
+ adapter->timestamp_sync_cnt = 0;
+ INIT_DELAYED_WORK(&adapter->reset_desc.fault_reset_work,
+ leapraid_check_scheduled_fault_work);
+ snprintf(adapter->reset_desc.fault_reset_wq_name,
+ sizeof(adapter->reset_desc.fault_reset_wq_name),
+ "poll_%s%u_status",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+ adapter->reset_desc.fault_reset_wq =
+ create_singlethread_workqueue(
+ adapter->reset_desc.fault_reset_wq_name);
+ if (!adapter->reset_desc.fault_reset_wq) {
+ dev_err(&adapter->pdev->dev,
+ "create single thread workqueue failed!\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->reset_desc.fault_reset_wq)
+ queue_delayed_work(adapter->reset_desc.fault_reset_wq,
+ &adapter->reset_desc.fault_reset_work,
+ msecs_to_jiffies(LEAPRAID_FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+void leapraid_check_scheduled_fault_stop(struct leapraid_adapter *adapter)
+{
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ wq = adapter->reset_desc.fault_reset_wq;
+ adapter->reset_desc.fault_reset_wq = NULL;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+
+ if (!wq)
+ return;
+
+ if (!cancel_delayed_work_sync(&adapter->reset_desc.fault_reset_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+}
+
+static bool leapraid_ready_for_scsi_io(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ if (adapter->access_ctrl.pcie_recovering ||
+ adapter->access_ctrl.shost_recovering)
+ return false;
+
+ if (leapraid_check_adapter_is_op(adapter))
+ return false;
+
+ if (hdl == LEAPRAID_INVALID_DEV_HANDLE)
+ return false;
+
+ if (test_bit(hdl, (unsigned long *)adapter->dev_topo.dev_removing))
+ return false;
+
+ return true;
+}
+
+static int leapraid_dispatch_scsi_io(struct leapraid_adapter *adapter,
+ struct leapraid_scsi_cmd_desc *cmd_desc)
+{
+ struct scsi_device *sdev;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_cmnd *scmd;
+ void *dma_buffer = NULL;
+ dma_addr_t dma_addr = 0;
+ u8 sdev_flg = 0;
+ bool issue_reset = false;
+ int rc = 0;
+
+ if (WARN_ON(!adapter->driver_cmds.internal_scmd))
+ return -EINVAL;
+
+ if (!leapraid_ready_for_scsi_io(adapter, cmd_desc->hdl))
+ return -EINVAL;
+
+ mutex_lock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ if (adapter->driver_cmds.driver_scsiio_cmd.status !=
+ LEAPRAID_CMD_NOT_USED) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ adapter->driver_cmds.driver_scsiio_cmd.status = LEAPRAID_CMD_PENDING;
+
+ __shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (sdev_priv->starget_priv->hdl == cmd_desc->hdl &&
+ sdev_priv->lun == cmd_desc->lun) {
+ sdev_flg = 1;
+ break;
+ }
+ }
+
+ if (!sdev_flg) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (cmd_desc->data_length) {
+ dma_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ cmd_desc->data_length,
+ &dma_addr, GFP_ATOMIC);
+ if (!dma_buffer) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (cmd_desc->dir == DMA_TO_DEVICE)
+ memcpy(dma_buffer, cmd_desc->data_buffer,
+ cmd_desc->data_length);
+ }
+
+ scmd = adapter->driver_cmds.internal_scmd;
+ scmd->device = sdev;
+ scmd->cmd_len = cmd_desc->cdb_length;
+ memcpy(scmd->cmnd, cmd_desc->cdb, cmd_desc->cdb_length);
+ scmd->sc_data_direction = cmd_desc->dir;
+ scmd->sdb.length = cmd_desc->data_length;
+ scmd->sdb.table.nents = 1;
+ scmd->sdb.table.orig_nents = 1;
+ sg_init_one(scmd->sdb.table.sgl, dma_buffer, cmd_desc->data_length);
+ init_completion(&adapter->driver_cmds.driver_scsiio_cmd.done);
+ if (leapraid_queuecommand(adapter->shost, scmd)) {
+ adapter->driver_cmds.driver_scsiio_cmd.status &=
+ ~LEAPRAID_CMD_PENDING;
+ complete(&adapter->driver_cmds.driver_scsiio_cmd.done);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ wait_for_completion_timeout(&adapter->driver_cmds.driver_scsiio_cmd.done,
+ cmd_desc->time_out * HZ);
+
+ if (!(adapter->driver_cmds.driver_scsiio_cmd.status &
+ LEAPRAID_CMD_DONE)) {
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.driver_scsiio_cmd.status);
+ rc = -ENODATA;
+ goto reset;
+ }
+
+ rc = adapter->driver_cmds.internal_scmd->result;
+ if (!rc && cmd_desc->dir == DMA_FROM_DEVICE)
+ memcpy(cmd_desc->data_buffer, dma_buffer,
+ cmd_desc->data_length);
+
+reset:
+ if (issue_reset) {
+ rc = -ENODATA;
+ dev_err(&adapter->pdev->dev, "fire tgt reset: hdl=0x%04x\n",
+ cmd_desc->hdl);
+ leapraid_issue_locked_tm(adapter, cmd_desc->hdl, 0, 0, 0,
+ LEAPRAID_TM_TASKTYPE_TARGET_RESET,
+ adapter->driver_cmds.driver_scsiio_cmd.taskid,
+ LEAPRAID_TM_MSGFLAGS_LINK_RESET);
+ }
+out:
+ if (dma_buffer)
+ dma_free_coherent(&adapter->pdev->dev,
+ cmd_desc->data_length, dma_buffer, dma_addr);
+ adapter->driver_cmds.driver_scsiio_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ return rc;
+}
+
+static int leapraid_dispatch_logsense(struct leapraid_adapter *adapter,
+ u16 hdl, u32 lun)
+{
+ struct leapraid_scsi_cmd_desc *desc;
+ int rc = 0;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->hdl = hdl;
+ desc->lun = lun;
+ desc->data_length = LEAPRAID_LOGSENSE_DATA_LENGTH;
+ desc->dir = DMA_FROM_DEVICE;
+ desc->cdb_length = LEAPRAID_LOGSENSE_CDB_LENGTH;
+ desc->cdb[0] = LOG_SENSE;
+ desc->cdb[2] = LEAPRAID_LOGSENSE_CDB_CODE;
+ desc->cdb[8] = desc->data_length;
+ desc->raid_member = false;
+ desc->time_out = LEAPRAID_LOGSENSE_TIMEOUT;
+
+ desc->data_buffer = kzalloc(desc->data_length, GFP_KERNEL);
+ if (!desc->data_buffer) {
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ rc = leapraid_dispatch_scsi_io(adapter, desc);
+ if (!rc) {
+ if (((char *)desc->data_buffer)[8] ==
+ LEAPRAID_LOGSENSE_SMART_CODE)
+ leapraid_smart_fault_detect(adapter, hdl);
+ }
+
+ kfree(desc->data_buffer);
+ kfree(desc);
+
+ return rc;
+}
+
+static bool leapraid_smart_poll_check(struct leapraid_adapter *adapter,
+ struct leapraid_sdev_priv *sdev_priv,
+ u32 reset_flg)
+{
+ struct leapraid_sas_dev *sas_dev = NULL;
+
+ if (!sdev_priv || !sdev_priv->starget_priv->card_port)
+ goto out;
+
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter,
+ sdev_priv->starget_priv->sas_address,
+ sdev_priv->starget_priv->card_port);
+ if (!sas_dev || !sas_dev->support_smart)
+ goto out;
+
+ if (reset_flg)
+ sas_dev->led_on = false;
+ else if (sas_dev->led_on)
+ goto out;
+
+ if ((sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER) ||
+ (sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME) ||
+ sdev_priv->block)
+ goto out;
+
+ leapraid_sdev_put(sas_dev);
+ return true;
+
+out:
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ return false;
+}
+
+static void leapraid_sata_smart_poll_work(struct work_struct *work)
+{
+ struct leapraid_adapter *adapter =
+ container_of(work, struct leapraid_adapter,
+ smart_poll_desc.smart_poll_work.work);
+ struct scsi_device *sdev;
+ struct leapraid_sdev_priv *sdev_priv;
+ static u32 reset_cnt;
+ bool reset_flg = false;
+
+ if (leapraid_check_adapter_is_op(adapter))
+ goto out;
+
+ reset_flg = (reset_cnt < adapter->reset_desc.reset_cnt);
+ reset_cnt = adapter->reset_desc.reset_cnt;
+
+ __shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (leapraid_smart_poll_check(adapter, sdev_priv, reset_flg))
+ leapraid_dispatch_logsense(adapter,
+ sdev_priv->starget_priv->hdl,
+ sdev_priv->lun);
+ }
+
+out:
+ if (adapter->smart_poll_desc.smart_poll_wq)
+ queue_delayed_work(adapter->smart_poll_desc.smart_poll_wq,
+ &adapter->smart_poll_desc.smart_poll_work,
+ msecs_to_jiffies(LEAPRAID_SMART_POLLING_INTERVAL));
+}
+
+void leapraid_smart_polling_start(struct leapraid_adapter *adapter)
+{
+ if (adapter->smart_poll_desc.smart_poll_wq || !smart_poll)
+ return;
+
+ INIT_DELAYED_WORK(&adapter->smart_poll_desc.smart_poll_work,
+ leapraid_sata_smart_poll_work);
+
+ snprintf(adapter->smart_poll_desc.smart_poll_wq_name,
+ sizeof(adapter->smart_poll_desc.smart_poll_wq_name),
+ "poll_%s%u_smart_poll",
+ LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id);
+ adapter->smart_poll_desc.smart_poll_wq =
+ create_singlethread_workqueue(
+ adapter->smart_poll_desc.smart_poll_wq_name);
+ if (!adapter->smart_poll_desc.smart_poll_wq)
+ return;
+ queue_delayed_work(adapter->smart_poll_desc.smart_poll_wq,
+ &adapter->smart_poll_desc.smart_poll_work,
+ msecs_to_jiffies(LEAPRAID_SMART_POLLING_INTERVAL));
+}
+
+void leapraid_smart_polling_stop(struct leapraid_adapter *adapter)
+{
+ struct workqueue_struct *wq;
+
+ if (!adapter->smart_poll_desc.smart_poll_wq)
+ return;
+
+ wq = adapter->smart_poll_desc.smart_poll_wq;
+ adapter->smart_poll_desc.smart_poll_wq = NULL;
+
+ if (wq) {
+ if (!cancel_delayed_work_sync(&adapter->smart_poll_desc.smart_poll_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+static void leapraid_fw_work(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt);
+
+static void leapraid_fw_evt_free(struct kref *r)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+
+ fw_evt = container_of(r, struct leapraid_fw_evt_work, refcnt);
+
+ kfree(fw_evt->evt_data);
+ kfree(fw_evt);
+}
+
+static void leapraid_fw_evt_get(struct leapraid_fw_evt_work *fw_evt)
+{
+ kref_get(&fw_evt->refcnt);
+}
+
+static void leapraid_fw_evt_put(struct leapraid_fw_evt_work *fw_work)
+{
+ kref_put(&fw_work->refcnt, leapraid_fw_evt_free);
+}
+
+static struct leapraid_fw_evt_work *leapraid_alloc_fw_evt_work(void)
+{
+ struct leapraid_fw_evt_work *fw_evt =
+ kzalloc(sizeof(*fw_evt), GFP_ATOMIC);
+ if (!fw_evt)
+ return NULL;
+
+ kref_init(&fw_evt->refcnt);
+ return fw_evt;
+}
+
+static void leapraid_run_fw_evt_work(struct work_struct *work)
+{
+ struct leapraid_fw_evt_work *fw_evt =
+ container_of(work, struct leapraid_fw_evt_work, work);
+
+ leapraid_fw_work(fw_evt->adapter, fw_evt);
+}
+
+static void leapraid_fw_evt_add(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ unsigned long flags;
+
+ if (!adapter->fw_evt_s.fw_evt_thread)
+ return;
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ leapraid_fw_evt_get(fw_evt);
+ INIT_LIST_HEAD(&fw_evt->list);
+ list_add_tail(&fw_evt->list, &adapter->fw_evt_s.fw_evt_list);
+ INIT_WORK(&fw_evt->work, leapraid_run_fw_evt_work);
+ leapraid_fw_evt_get(fw_evt);
+ queue_work(adapter->fw_evt_s.fw_evt_thread, &fw_evt->work);
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+}
+
+static void leapraid_del_fw_evt_from_list(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (!list_empty(&fw_evt->list)) {
+ list_del_init(&fw_evt->list);
+ leapraid_fw_evt_put(fw_evt);
+ }
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+}
+
+static struct leapraid_fw_evt_work *leapraid_next_fw_evt(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_evt = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (!list_empty(&adapter->fw_evt_s.fw_evt_list)) {
+ fw_evt = list_first_entry(&adapter->fw_evt_s.fw_evt_list,
+ struct leapraid_fw_evt_work, list);
+ list_del_init(&fw_evt->list);
+ leapraid_fw_evt_put(fw_evt);
+ }
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ return fw_evt;
+}
+
+void leapraid_clean_active_fw_evt(struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+ bool rc = false;
+
+ if ((list_empty(&adapter->fw_evt_s.fw_evt_list) &&
+ !adapter->fw_evt_s.cur_evt) || !adapter->fw_evt_s.fw_evt_thread)
+ return;
+
+ adapter->fw_evt_s.fw_evt_cleanup = 1;
+ if (adapter->access_ctrl.shost_recovering &&
+ adapter->fw_evt_s.cur_evt)
+ adapter->fw_evt_s.cur_evt->ignore = 1;
+
+ while ((fw_evt = leapraid_next_fw_evt(adapter)) ||
+ (fw_evt = adapter->fw_evt_s.cur_evt)) {
+ if (fw_evt == adapter->fw_evt_s.cur_evt &&
+ adapter->fw_evt_s.cur_evt->evt_type !=
+ LEAPRAID_EVT_REMOVE_DEAD_DEV) {
+ adapter->fw_evt_s.cur_evt = NULL;
+ continue;
+ }
+
+ rc = cancel_work_sync(&fw_evt->work);
+
+ if (rc)
+ leapraid_fw_evt_put(fw_evt);
+ }
+ adapter->fw_evt_s.fw_evt_cleanup = 0;
+}
+
+static void leapraid_internal_dev_ublk(struct scsi_device *sdev,
+ struct leapraid_sdev_priv *sdev_priv)
+{
+ int rc = 0;
+
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: now internal unblkg dev\n",
+ sdev_priv->starget_priv->hdl);
+ sdev_priv->block = false;
+ rc = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ if (rc == -EINVAL) {
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: unblkg failed, rc=%d\n",
+ sdev_priv->starget_priv->hdl, rc);
+ sdev_priv->block = true;
+ rc = scsi_internal_device_block_nowait(sdev);
+ if (rc)
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: blkg failed: earlier unblkg err, rc=%d\n",
+ sdev_priv->starget_priv->hdl, rc);
+
+ sdev_priv->block = false;
+ rc = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ if (rc)
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: ublkg failed again, rc=%d\n",
+ sdev_priv->starget_priv->hdl, rc);
+ }
+}
+
+static void leapraid_internal_ublk_io_dev_to_running(struct scsi_device *sdev)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+
+ sdev_priv = sdev->hostdata;
+ sdev_priv->block = false;
+ scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ sdev_printk(KERN_WARNING, sdev, "%s: ublk hdl 0x%04x\n",
+ __func__, sdev_priv->starget_priv->hdl);
+}
+
+static void leapraid_ublk_io_dev_to_running(
+ struct leapraid_adapter *adapter, u64 sas_addr,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->sas_address != sas_addr ||
+ sdev_priv->starget_priv->card_port != card_port)
+ continue;
+
+ if (sdev_priv->block)
+ leapraid_internal_ublk_io_dev_to_running(sdev);
+ }
+}
+
+static void leapraid_ublk_io_dev(struct leapraid_adapter *adapter,
+ u64 sas_addr,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv || !sdev_priv->starget_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->sas_address != sas_addr)
+ continue;
+
+ if (sdev_priv->starget_priv->card_port != card_port)
+ continue;
+
+ if (sdev_priv->block)
+ leapraid_internal_dev_ublk(sdev, sdev_priv);
+
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ }
+}
+
+static void leapraid_ublk_io_all_dev(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_starget_priv *stgt_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+
+ if (!sdev_priv)
+ continue;
+
+ stgt_priv = sdev_priv->starget_priv;
+ if (!stgt_priv || stgt_priv->deleted)
+ continue;
+
+ if (!sdev_priv->block)
+ continue;
+
+ sdev_printk(KERN_WARNING, sdev, "hdl 0x%04x: blkg...\n",
+ sdev_priv->starget_priv->hdl);
+ leapraid_internal_dev_ublk(sdev, sdev_priv);
+ continue;
+ }
+}
+
+static void __maybe_unused leapraid_internal_dev_blk(
+ struct scsi_device *sdev,
+ struct leapraid_sdev_priv *sdev_priv)
+{
+ int rc = 0;
+
+ sdev_printk(KERN_INFO, sdev, "internal blkg hdl 0x%04x\n",
+ sdev_priv->starget_priv->hdl);
+ sdev_priv->block = true;
+ rc = scsi_internal_device_block_nowait(sdev);
+ if (rc == -EINVAL)
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: blkg failed, rc=%d\n",
+ rc, sdev_priv->starget_priv->hdl);
+}
+
+static void __maybe_unused leapraid_blkio_dev(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_device *sdev;
+
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter, hdl);
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl != hdl)
+ continue;
+
+ if (sdev_priv->block)
+ continue;
+
+ if (sas_dev && sas_dev->pend_sas_rphy_add)
+ continue;
+
+ if (sdev_priv->sep) {
+ sdev_printk(KERN_INFO, sdev,
+ "sep hdl 0x%04x skip blkg\n",
+ sdev_priv->starget_priv->hdl);
+ continue;
+ }
+
+ leapraid_internal_dev_blk(sdev, sdev_priv);
+ }
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_imm_blkio_to_end_dev(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+
+ if (sas_dev) {
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl != sas_dev->hdl)
+ continue;
+
+ if (sdev_priv->block)
+ continue;
+
+ if (sas_dev && sas_dev->pend_sas_rphy_add)
+ continue;
+
+ if (sdev_priv->sep) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s skip dev blk for sep hdl 0x%04x\n",
+ __func__,
+ sdev_priv->starget_priv->hdl);
+ continue;
+ }
+
+ leapraid_internal_dev_blk(sdev, sdev_priv);
+ }
+
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_imm_blkio_set_end_dev_blk_hdls(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ struct leapraid_sas_port *sas_port;
+
+ list_for_each_entry(sas_port,
+ &topo_node_exp->sas_port_list, port_list) {
+ if (sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE) {
+ leapraid_imm_blkio_to_end_dev(adapter, sas_port);
+ }
+ }
+}
+
+static void leapraid_imm_blkio_to_kids_attchd_to_ex(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp);
+
+static void leapraid_imm_blkio_to_sib_exp(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ struct leapraid_topo_node *topo_node_exp_sib;
+ struct leapraid_sas_port *sas_port;
+
+ list_for_each_entry(sas_port,
+ &topo_node_exp->sas_port_list, port_list) {
+ if (sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE) {
+ topo_node_exp_sib =
+ leapraid_exp_find_by_sas_address(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ leapraid_imm_blkio_to_kids_attchd_to_ex(
+ adapter,
+ topo_node_exp_sib);
+ }
+ }
+}
+
+static void leapraid_imm_blkio_to_kids_attchd_to_ex(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ if (!topo_node_exp)
+ return;
+
+ leapraid_imm_blkio_set_end_dev_blk_hdls(adapter, topo_node_exp);
+
+ leapraid_imm_blkio_to_sib_exp(adapter, topo_node_exp);
+}
+
+static void leapraid_report_sdev_directly(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ struct leapraid_sas_port *sas_port;
+
+ sas_port = leapraid_transport_port_add(adapter,
+ sas_dev->hdl,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ if (!sas_port) {
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ return;
+ }
+
+ if (!sas_dev->starget) {
+ if (!adapter->scan_dev_desc.driver_loading) {
+ leapraid_transport_port_remove(adapter,
+ sas_dev->sas_addr,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ }
+ return;
+ }
+
+ clear_bit(sas_dev->hdl,
+ (unsigned long *)adapter->dev_topo.pending_dev_add);
+}
+
+static struct leapraid_sas_dev *leapraid_init_sas_dev(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev_p0 *sas_dev_pg0,
+ struct leapraid_card_port *card_port, u16 hdl,
+ u64 parent_sas_addr, u64 sas_addr, u32 dev_info)
+{
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_enc_node *enc_dev;
+
+ sas_dev = kzalloc(sizeof(*sas_dev), GFP_KERNEL);
+ if (!sas_dev)
+ return NULL;
+
+ kref_init(&sas_dev->refcnt);
+ sas_dev->hdl = hdl;
+ sas_dev->dev_info = dev_info;
+ sas_dev->sas_addr = sas_addr;
+ sas_dev->card_port = card_port;
+ sas_dev->parent_sas_addr = parent_sas_addr;
+ sas_dev->phy = sas_dev_pg0->phy_num;
+ sas_dev->enc_hdl = le16_to_cpu(sas_dev_pg0->enc_hdl);
+ sas_dev->dev_name = le64_to_cpu(sas_dev_pg0->dev_name);
+ sas_dev->port_type = sas_dev_pg0->max_port_connections;
+ sas_dev->slot = sas_dev->enc_hdl ? le16_to_cpu(sas_dev_pg0->slot) : 0;
+ sas_dev->support_smart = (le16_to_cpu(sas_dev_pg0->flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_SATA_SMART);
+ if (le16_to_cpu(sas_dev_pg0->flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID) {
+ sas_dev->enc_level = sas_dev_pg0->enc_level;
+ memcpy(sas_dev->connector_name, sas_dev_pg0->connector_name, 4);
+ sas_dev->connector_name[4] = '\0';
+ } else {
+ sas_dev->enc_level = 0;
+ sas_dev->connector_name[0] = '\0';
+ }
+ if (le16_to_cpu(sas_dev_pg0->enc_hdl)) {
+ enc_dev = leapraid_enc_find_by_hdl(adapter,
+ le16_to_cpu(sas_dev_pg0->enc_hdl));
+ sas_dev->enc_lid = enc_dev ?
+ le64_to_cpu(enc_dev->pg0.enc_lid) : 0;
+ }
+ dev_info(&adapter->pdev->dev,
+ "add dev: hdl=0x%0x, sas addr=0x%016llx, port_type=0x%0x\n",
+ hdl, sas_dev->sas_addr, sas_dev->port_type);
+
+ return sas_dev;
+}
+
+static void leapraid_add_dev(struct leapraid_adapter *adapter, u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_pg0;
+ struct leapraid_card_port *card_port;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u64 parent_sas_addr;
+ u32 dev_info;
+ u64 sas_addr;
+ u8 port_id;
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_pg0,
+ cfgp1, cfgp2, GET_SAS_DEVICE_PG0)))
+ return;
+
+ dev_info = le32_to_cpu(sas_dev_pg0.dev_info);
+ if (!(leapraid_is_end_dev(dev_info)))
+ return;
+
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add);
+ sas_addr = le64_to_cpu(sas_dev_pg0.sas_address);
+ if (!(le16_to_cpu(sas_dev_pg0.flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_DEV_PRESENT))
+ return;
+
+ port_id = sas_dev_pg0.physical_port;
+ card_port = leapraid_get_port_by_id(adapter, port_id, false);
+ if (!card_port)
+ return;
+
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter, sas_addr, card_port);
+ if (sas_dev) {
+ clear_bit(hdl,
+ (unsigned long *)adapter->dev_topo.pending_dev_add);
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+
+ if (leapraid_get_sas_address(adapter,
+ le16_to_cpu(sas_dev_pg0.parent_dev_hdl),
+ &parent_sas_addr))
+ return;
+
+ sas_dev = leapraid_init_sas_dev(adapter, &sas_dev_pg0, card_port,
+ hdl, parent_sas_addr, sas_addr,
+ dev_info);
+ if (!sas_dev)
+ return;
+ if (adapter->scan_dev_desc.wait_scan_dev_done) {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_sdev_get(sas_dev);
+ list_add_tail(&sas_dev->list,
+ &adapter->dev_topo.sas_dev_init_list);
+ leapraid_check_boot_dev(adapter, sas_dev, 0);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ } else {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_sdev_get(sas_dev);
+ list_add_tail(&sas_dev->list, &adapter->dev_topo.sas_dev_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_report_sdev_directly(adapter, sas_dev);
+ }
+}
+
+static void leapraid_remove_device(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ if (sas_dev->led_on) {
+ leapraid_set_led(adapter, sas_dev, false);
+ sas_dev->led_on = false;
+ }
+
+ if (sas_dev->starget && sas_dev->starget->hostdata) {
+ starget_priv = sas_dev->starget->hostdata;
+ starget_priv->deleted = true;
+ leapraid_ublk_io_dev(adapter,
+ sas_dev->sas_addr, sas_dev->card_port);
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ }
+
+ leapraid_transport_port_remove(adapter,
+ sas_dev->sas_addr,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+
+ dev_info(&adapter->pdev->dev,
+ "remove dev: hdl=0x%04x, sas addr=0x%016llx\n",
+ sas_dev->hdl, (unsigned long long)sas_dev->sas_addr);
+}
+
+static struct leapraid_vphy *leapraid_alloc_vphy(struct leapraid_adapter *adapter,
+ u8 port_id, u8 phy_num)
+{
+ struct leapraid_card_port *port;
+ struct leapraid_vphy *vphy;
+
+ port = leapraid_get_port_by_id(adapter, port_id, false);
+ if (!port)
+ return NULL;
+
+ vphy = leapraid_get_vphy_by_phy(port, phy_num);
+ if (vphy)
+ return vphy;
+
+ vphy = kzalloc(sizeof(*vphy), GFP_KERNEL);
+ if (!vphy)
+ return NULL;
+
+ if (!port->vphys_mask)
+ INIT_LIST_HEAD(&port->vphys_list);
+
+ port->vphys_mask |= BIT(phy_num);
+ vphy->phy_mask |= BIT(phy_num);
+ list_add_tail(&vphy->list, &port->vphys_list);
+ return vphy;
+}
+
+static int leapraid_add_port_to_card_port_list(struct leapraid_adapter *adapter,
+ u8 port_id, bool refresh)
+{
+ struct leapraid_card_port *card_port;
+
+ card_port = leapraid_get_port_by_id(adapter, port_id, false);
+ if (card_port)
+ return 0;
+
+ card_port = kzalloc(sizeof(*card_port), GFP_KERNEL);
+ if (!card_port)
+ return -ENOMEM;
+
+ card_port->port_id = port_id;
+ dev_info(&adapter->pdev->dev,
+ "port: %d is added to card_port list\n",
+ card_port->port_id);
+
+ if (refresh)
+ if (adapter->access_ctrl.shost_recovering)
+ card_port->flg = LEAPRAID_CARD_PORT_FLG_NEW;
+ list_add_tail(&card_port->list, &adapter->dev_topo.card_port_list);
+ return 0;
+}
+
+static void leapraid_sas_host_add(struct leapraid_adapter *adapter,
+ bool refresh)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_phy_p0 phy_pg0;
+ struct leapraid_sas_dev_p0 sas_dev_pg0;
+ struct leapraid_enc_p0 enc_pg0;
+ struct leapraid_sas_io_unit_p0 *sas_iou_pg0;
+ u16 sas_iou_pg0_sz;
+ u16 attached_hdl;
+ u8 phys_num;
+ u8 port_id;
+ u8 link_rate;
+ int i;
+
+ if (!refresh) {
+ if (leapraid_get_adapter_phys(adapter, &phys_num) || !phys_num)
+ return;
+
+ adapter->dev_topo.card.card_phy =
+ kcalloc(phys_num,
+ sizeof(struct leapraid_card_phy), GFP_KERNEL);
+ if (!adapter->dev_topo.card.card_phy)
+ return;
+
+ adapter->dev_topo.card.phys_num = phys_num;
+ }
+
+ sas_iou_pg0_sz = offsetof(struct leapraid_sas_io_unit_p0, phy_info) +
+ (adapter->dev_topo.card.phys_num *
+ sizeof(struct leapraid_sas_io_unit0_phy_info));
+ sas_iou_pg0 = kzalloc(sas_iou_pg0_sz, GFP_KERNEL);
+ if (!sas_iou_pg0)
+ goto out;
+
+ if (leapraid_get_sas_io_unit_page0(adapter,
+ sas_iou_pg0,
+ sas_iou_pg0_sz))
+ goto out;
+
+ adapter->dev_topo.card.parent_dev = &adapter->shost->shost_gendev;
+ adapter->dev_topo.card.hdl =
+ le16_to_cpu(sas_iou_pg0->phy_info[0].controller_dev_hdl);
+ for (i = 0; i < adapter->dev_topo.card.phys_num; i++) {
+ if (!refresh) { /* add */
+ cfgp1.phy_number = i;
+ if (leapraid_op_config_page(adapter, &phy_pg0, cfgp1,
+ cfgp2, GET_PHY_PG0))
+ goto out;
+
+ port_id = sas_iou_pg0->phy_info[i].port;
+ if (leapraid_add_port_to_card_port_list(adapter,
+ port_id,
+ false))
+ goto out;
+
+ if ((le32_to_cpu(phy_pg0.phy_info) &
+ LEAPRAID_SAS_PHYINFO_VPHY) &&
+ (phy_pg0.neg_link_rate >> 4) >=
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5) {
+ if (!leapraid_alloc_vphy(adapter, port_id, i))
+ goto out;
+ adapter->dev_topo.card.card_phy[i].vphy = true;
+ }
+
+ adapter->dev_topo.card.card_phy[i].hdl =
+ adapter->dev_topo.card.hdl;
+ adapter->dev_topo.card.card_phy[i].phy_id = i;
+ adapter->dev_topo.card.card_phy[i].card_port =
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false);
+ leapraid_transport_add_card_phy(
+ adapter,
+ &adapter->dev_topo.card.card_phy[i],
+ &phy_pg0, adapter->dev_topo.card.parent_dev);
+ } else { /* refresh */
+ link_rate = sas_iou_pg0->phy_info[i].neg_link_rate >> 4;
+ port_id = sas_iou_pg0->phy_info[i].port;
+ if (leapraid_add_port_to_card_port_list(adapter,
+ port_id,
+ true))
+ goto out;
+
+ if (le32_to_cpu(sas_iou_pg0->phy_info[i]
+ .controller_phy_dev_info) &
+ LEAPRAID_DEVTYP_SEP &&
+ link_rate >= LEAPRAID_SAS_NEG_LINK_RATE_1_5) {
+ cfgp1.phy_number = i;
+ if ((leapraid_op_config_page(adapter, &phy_pg0,
+ cfgp1, cfgp2,
+ GET_PHY_PG0)))
+ continue;
+
+ if ((le32_to_cpu(phy_pg0.phy_info) &
+ LEAPRAID_SAS_PHYINFO_VPHY)) {
+ if (!leapraid_alloc_vphy(adapter,
+ port_id,
+ i))
+ goto out;
+ adapter->dev_topo.card.card_phy[i].vphy = true;
+ }
+ }
+
+ adapter->dev_topo.card.card_phy[i].hdl =
+ adapter->dev_topo.card.hdl;
+ attached_hdl =
+ le16_to_cpu(sas_iou_pg0->phy_info[i].attached_dev_hdl);
+ if (attached_hdl && link_rate < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ link_rate = LEAPRAID_SAS_NEG_LINK_RATE_1_5;
+
+ adapter->dev_topo.card.card_phy[i].card_port =
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false);
+ if (!adapter->dev_topo.card.card_phy[i].phy) {
+ cfgp1.phy_number = i;
+ if ((leapraid_op_config_page(adapter, &phy_pg0,
+ cfgp1, cfgp2,
+ GET_PHY_PG0)))
+ continue;
+
+ adapter->dev_topo.card.card_phy[i].phy_id = i;
+ leapraid_transport_add_card_phy(adapter,
+ &adapter->dev_topo.card.card_phy[i],
+ &phy_pg0,
+ adapter->dev_topo.card.parent_dev);
+ continue;
+ }
+
+ leapraid_transport_update_links(adapter,
+ adapter->dev_topo.card.sas_address,
+ attached_hdl, i, link_rate,
+ adapter->dev_topo.card.card_phy[i].card_port);
+ }
+ }
+
+ if (!refresh) {
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = adapter->dev_topo.card.hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_pg0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0)))
+ goto out;
+
+ adapter->dev_topo.card.enc_hdl =
+ le16_to_cpu(sas_dev_pg0.enc_hdl);
+ adapter->dev_topo.card.sas_address =
+ le64_to_cpu(sas_dev_pg0.sas_address);
+ dev_info(&adapter->pdev->dev,
+ "add host: devhdl=0x%04x, sas addr=0x%016llx, phynums=%d\n",
+ adapter->dev_topo.card.hdl,
+ (unsigned long long)adapter->dev_topo.card.sas_address,
+ adapter->dev_topo.card.phys_num);
+
+ if (adapter->dev_topo.card.enc_hdl) {
+ cfgp1.form = LEAPRAID_SAS_ENC_CFG_PGAD_HDL;
+ cfgp2.handle = adapter->dev_topo.card.enc_hdl;
+ if (!(leapraid_op_config_page(adapter, &enc_pg0,
+ cfgp1, cfgp2,
+ GET_SAS_ENCLOSURE_PG0)))
+ adapter->dev_topo.card.enc_lid =
+ le64_to_cpu(enc_pg0.enc_lid);
+ }
+ }
+out:
+ kfree(sas_iou_pg0);
+}
+
+static int leapraid_internal_exp_add(struct leapraid_adapter *adapter,
+ struct leapraid_exp_p0 *exp_pg0,
+ union cfg_param_1 *cfgp1,
+ union cfg_param_2 *cfgp2,
+ u16 hdl)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_sas_port *sas_port = NULL;
+ struct leapraid_enc_node *enc_dev;
+ struct leapraid_exp_p1 exp_pg1;
+ int rc = 0;
+ unsigned long flags;
+ u8 port_id;
+ u16 parent_handle;
+ u64 sas_addr_parent = 0;
+ int i;
+
+ port_id = exp_pg0->physical_port;
+ parent_handle = le16_to_cpu(exp_pg0->parent_dev_hdl);
+
+ if (leapraid_get_sas_address(adapter, parent_handle, &sas_addr_parent))
+ return -1;
+
+ topo_node_exp = kzalloc(sizeof(*topo_node_exp), GFP_KERNEL);
+ if (!topo_node_exp)
+ return -1;
+
+ topo_node_exp->hdl = hdl;
+ topo_node_exp->phys_num = exp_pg0->phy_num;
+ topo_node_exp->sas_address_parent = sas_addr_parent;
+ topo_node_exp->sas_address = le64_to_cpu(exp_pg0->sas_address);
+ topo_node_exp->card_port =
+ leapraid_get_port_by_id(adapter, port_id, false);
+ if (!topo_node_exp->card_port) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "add exp: sas addr=0x%016llx, hdl=0x%04x, phdl=0x%04x, phys=%d\n",
+ (unsigned long long)topo_node_exp->sas_address,
+ hdl, parent_handle,
+ topo_node_exp->phys_num);
+ if (!topo_node_exp->phys_num) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ topo_node_exp->card_phy =
+ kcalloc(topo_node_exp->phys_num,
+ sizeof(struct leapraid_card_phy), GFP_KERNEL);
+ if (!topo_node_exp->card_phy) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&topo_node_exp->sas_port_list);
+ sas_port = leapraid_transport_port_add(adapter, hdl, sas_addr_parent,
+ topo_node_exp->card_port);
+ if (!sas_port) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ topo_node_exp->parent_dev = &sas_port->rphy->dev;
+ topo_node_exp->rphy = sas_port->rphy;
+ for (i = 0; i < topo_node_exp->phys_num; i++) {
+ cfgp1->phy_number = i;
+ cfgp2->handle = hdl;
+ if ((leapraid_op_config_page(adapter, &exp_pg1, *cfgp1, *cfgp2,
+ GET_SAS_EXPANDER_PG1))) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ topo_node_exp->card_phy[i].hdl = hdl;
+ topo_node_exp->card_phy[i].phy_id = i;
+ topo_node_exp->card_phy[i].card_port =
+ leapraid_get_port_by_id(adapter, port_id, false);
+ if ((leapraid_transport_add_exp_phy(adapter,
+ &topo_node_exp->card_phy[i],
+ &exp_pg1,
+ topo_node_exp->parent_dev))) {
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (topo_node_exp->enc_hdl) {
+ enc_dev = leapraid_enc_find_by_hdl(adapter,
+ topo_node_exp->enc_hdl);
+ if (enc_dev)
+ topo_node_exp->enc_lid =
+ le64_to_cpu(enc_dev->pg0.enc_lid);
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_add_tail(&topo_node_exp->list, &adapter->dev_topo.exp_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+ return 0;
+
+out_fail:
+ if (sas_port)
+ leapraid_transport_port_remove(adapter,
+ topo_node_exp->sas_address,
+ sas_addr_parent,
+ topo_node_exp->card_port);
+ kfree(topo_node_exp);
+ return rc;
+}
+
+static int leapraid_exp_add(struct leapraid_adapter *adapter, u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_exp_p0 exp_pg0;
+ u16 parent_handle;
+ u64 sas_addr, sas_addr_parent = 0;
+ unsigned long flags;
+ u8 port_id;
+ int rc = 0;
+
+ if (!hdl)
+ return -EPERM;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering)
+ return -EPERM;
+
+ cfgp1.form = LEAPRAID_SAS_EXP_CFD_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &exp_pg0, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG0)))
+ return -EPERM;
+
+ parent_handle = le16_to_cpu(exp_pg0.parent_dev_hdl);
+ if (leapraid_get_sas_address(adapter, parent_handle, &sas_addr_parent))
+ return -EPERM;
+
+ port_id = exp_pg0.physical_port;
+ if (sas_addr_parent != adapter->dev_topo.card.sas_address) {
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp =
+ leapraid_exp_find_by_sas_address(adapter,
+ sas_addr_parent,
+ leapraid_get_port_by_id(adapter, port_id, false));
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+ if (!topo_node_exp) {
+ rc = leapraid_exp_add(adapter, parent_handle);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ sas_addr = le64_to_cpu(exp_pg0.sas_address);
+ topo_node_exp =
+ leapraid_exp_find_by_sas_address(adapter, sas_addr,
+ leapraid_get_port_by_id(adapter, port_id, false));
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (topo_node_exp)
+ return 0;
+
+ return leapraid_internal_exp_add(adapter, &exp_pg0, &cfgp1,
+ &cfgp2, hdl);
+}
+
+static void leapraid_exp_node_rm(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ struct leapraid_sas_port *sas_port, *sas_port_next;
+ unsigned long flags;
+ int port_id;
+
+ list_for_each_entry_safe(sas_port, sas_port_next,
+ &topo_node_exp->sas_port_list,
+ port_list) {
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ switch (sas_port->remote_identify.device_type) {
+ case SAS_END_DEVICE:
+ leapraid_sas_dev_remove_by_sas_address(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ leapraid_exp_rm(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ default:
+ break;
+ }
+ }
+
+ port_id = topo_node_exp->card_port->port_id;
+ leapraid_transport_port_remove(adapter, topo_node_exp->sas_address,
+ topo_node_exp->sas_address_parent,
+ topo_node_exp->card_port);
+ dev_info(&adapter->pdev->dev,
+ "removing exp: port=%d, sas addr=0x%016llx, hdl=0x%04x\n",
+ port_id, (unsigned long long)topo_node_exp->sas_address,
+ topo_node_exp->hdl);
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_del(&topo_node_exp->list);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+ kfree(topo_node_exp->card_phy);
+ kfree(topo_node_exp);
+}
+
+void leapraid_exp_rm(struct leapraid_adapter *adapter, u64 sas_addr,
+ struct leapraid_card_port *port)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ unsigned long flags;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp = leapraid_exp_find_by_sas_address(adapter,
+ sas_addr,
+ port);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (topo_node_exp)
+ leapraid_exp_node_rm(adapter, topo_node_exp);
+}
+
+static void leapraid_check_device(struct leapraid_adapter *adapter,
+ u64 parent_sas_address, u16 handle,
+ u8 phy_number, u8 link_rate)
+{
+ struct leapraid_sas_dev_p0 sas_device_pg0;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ struct leapraid_enc_node *enclosure_dev = NULL;
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct leapraid_starget_priv *sas_target_priv_data;
+ u32 device_info;
+ struct leapraid_card_port *port;
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = handle;
+ if ((leapraid_op_config_page(adapter, &sas_device_pg0, cfgp1, cfgp2,
+ GET_SAS_DEVICE_PG0)))
+ return;
+
+ if (phy_number != sas_device_pg0.phy_num)
+ return;
+
+ device_info = le32_to_cpu(sas_device_pg0.dev_info);
+ if (!(leapraid_is_end_dev(device_info)))
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.sas_address);
+ port = leapraid_get_port_by_id(adapter, sas_device_pg0.physical_port,
+ false);
+ if (!port)
+ goto out_unlock;
+
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter, sas_address,
+ port);
+ if (!sas_dev)
+ goto out_unlock;
+
+ if (unlikely(sas_dev->hdl != handle)) {
+ starget = sas_dev->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget,
+ "hdl changed from 0x%04x to 0x%04x!\n",
+ sas_dev->hdl, handle);
+ sas_target_priv_data->hdl = handle;
+ sas_dev->hdl = handle;
+ if (le16_to_cpu(sas_device_pg0.flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID) {
+ sas_dev->enc_level =
+ sas_device_pg0.enc_level;
+ memcpy(sas_dev->connector_name,
+ sas_device_pg0.connector_name, 4);
+ sas_dev->connector_name[4] = '\0';
+ } else {
+ sas_dev->enc_level = 0;
+ sas_dev->connector_name[0] = '\0';
+ }
+ sas_dev->enc_hdl =
+ le16_to_cpu(sas_device_pg0.enc_hdl);
+ enclosure_dev =
+ leapraid_enc_find_by_hdl(adapter, sas_dev->enc_hdl);
+ if (enclosure_dev) {
+ sas_dev->enc_lid =
+ le64_to_cpu(enclosure_dev->pg0.enc_lid);
+ }
+ }
+
+ if (!(le16_to_cpu(sas_device_pg0.flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_DEV_PRESENT))
+ goto out_unlock;
+
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ leapraid_ublk_io_dev_to_running(adapter, sas_address, port);
+ goto out;
+
+out_unlock:
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+out:
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static int leapraid_internal_sas_topo_chg_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_topo_node *topo_node_exp,
+ struct leapraid_fw_evt_work *fw_evt,
+ u64 sas_addr, u8 max_phys)
+{
+ struct leapraid_evt_data_sas_topo_change_list *evt_data;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u8 phy_number;
+ u8 link_rate, prev_link_rate;
+ u16 reason_code;
+ u16 hdl;
+ int i;
+
+ evt_data = fw_evt->evt_data;
+ for (i = 0; i < evt_data->entry_num; i++) {
+ if (fw_evt->ignore)
+ return 0;
+
+ if (adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return 0;
+
+ phy_number = evt_data->start_phy_num + i;
+ if (phy_number >= max_phys)
+ continue;
+
+ reason_code = evt_data->phy[i].phy_status &
+ LEAPRAID_EVT_SAS_TOPO_RC_MASK;
+
+ hdl = le16_to_cpu(evt_data->phy[i].attached_dev_hdl);
+ if (!hdl)
+ continue;
+
+ link_rate = evt_data->phy[i].link_rate >> 4;
+ prev_link_rate = evt_data->phy[i].link_rate & 0xF;
+ switch (reason_code) {
+ case LEAPRAID_EVT_SAS_TOPO_RC_PHY_CHANGED:
+ if (adapter->access_ctrl.shost_recovering)
+ break;
+
+ if (link_rate == prev_link_rate)
+ break;
+
+ leapraid_transport_update_links(adapter, sas_addr,
+ hdl, phy_number,
+ link_rate, card_port);
+ if (link_rate < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ leapraid_check_device(adapter, sas_addr, hdl,
+ phy_number, link_rate);
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock,
+ flags);
+ sas_dev =
+ leapraid_hold_lock_get_sas_dev_by_hdl(
+ adapter, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock,
+ flags);
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ break;
+ }
+ if (!test_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add))
+ break;
+
+ evt_data->phy[i].phy_status &=
+ LEAPRAID_EVT_SAS_TOPO_RC_CLEAR_MASK;
+ evt_data->phy[i].phy_status |=
+ LEAPRAID_EVT_SAS_TOPO_RC_TARG_ADDED;
+ fallthrough;
+
+ case LEAPRAID_EVT_SAS_TOPO_RC_TARG_ADDED:
+ if (adapter->access_ctrl.shost_recovering)
+ break;
+ leapraid_transport_update_links(adapter, sas_addr,
+ hdl, phy_number,
+ link_rate, card_port);
+ if (link_rate < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ break;
+ leapraid_add_dev(adapter, hdl);
+ break;
+ case LEAPRAID_EVT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ leapraid_sas_dev_remove_by_hdl(adapter, hdl);
+ break;
+ }
+ }
+
+ if (evt_data->exp_status == LEAPRAID_EVT_SAS_TOPO_ES_NOT_RESPONDING &&
+ topo_node_exp)
+ leapraid_exp_rm(adapter, sas_addr, card_port);
+
+ return 0;
+}
+
+static int leapraid_sas_topo_chg_evt(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_card_port *card_port;
+ struct leapraid_evt_data_sas_topo_change_list *evt_data;
+ u16 phdl;
+ u8 max_phys;
+ u64 sas_addr;
+ unsigned long flags;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return 0;
+
+ evt_data = fw_evt->evt_data;
+ leapraid_sas_host_add(adapter, adapter->dev_topo.card.phys_num);
+
+ if (fw_evt->ignore)
+ return 0;
+
+ phdl = le16_to_cpu(evt_data->exp_dev_hdl);
+ card_port = leapraid_get_port_by_id(adapter,
+ evt_data->physical_port,
+ false);
+ if (evt_data->exp_status == LEAPRAID_EVT_SAS_TOPO_ES_ADDED)
+ if (leapraid_exp_add(adapter, phdl) != 0)
+ return 0;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp = leapraid_exp_find_by_hdl(adapter, phdl);
+ if (topo_node_exp) {
+ sas_addr = topo_node_exp->sas_address;
+ max_phys = topo_node_exp->phys_num;
+ card_port = topo_node_exp->card_port;
+ } else if (phdl < adapter->dev_topo.card.phys_num) {
+ sas_addr = adapter->dev_topo.card.sas_address;
+ max_phys = adapter->dev_topo.card.phys_num;
+ } else {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ return leapraid_internal_sas_topo_chg_evt(adapter, card_port,
+ topo_node_exp, fw_evt,
+ sas_addr, max_phys);
+}
+
+static void leapraid_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
+{
+ sdev->no_uld_attach = no_uld_attach ? 1 : 0;
+ sdev_printk(KERN_INFO, sdev,
+ "%s raid component to upper layer\n",
+ sdev->no_uld_attach ? "hide" : "expose");
+ WARN_ON(scsi_device_reprobe(sdev));
+}
+
+static void leapraid_sas_pd_add(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_sas_dev *sas_dev;
+ u64 sas_address;
+ u16 parent_hdl;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ dev_warn(&adapter->pdev->dev,
+ "dev handle 0x%x already exists\n", hdl);
+ return;
+ }
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1, cfgp2,
+ GET_SAS_DEVICE_PG0))) {
+ dev_warn(&adapter->pdev->dev, "failed to read dev page0\n");
+ return;
+ }
+
+ parent_hdl = le16_to_cpu(sas_dev_p0.parent_dev_hdl);
+ if (!leapraid_get_sas_address(adapter, parent_hdl, &sas_address))
+ leapraid_transport_update_links(adapter, sas_address, hdl,
+ sas_dev_p0.phy_num,
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ leapraid_get_port_by_id(adapter,
+ sas_dev_p0.physical_port,
+ false));
+ leapraid_add_dev(adapter, hdl);
+}
+
+static void leapraid_sas_pd_delete(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ leapraid_sas_dev_remove_by_hdl(adapter, hdl);
+}
+
+static void leapraid_sas_pd_hide(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct scsi_target *starget = NULL;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u64 volume_wwid = 0;
+ u16 volume_hdl = 0;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ leapraid_cfg_get_volume_hdl(adapter, hdl, &volume_hdl);
+ if (volume_hdl)
+ leapraid_cfg_get_volume_wwid(adapter,
+ volume_hdl,
+ &volume_wwid);
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (!sas_dev) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return;
+ }
+
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ if (sas_dev->starget && sas_dev->starget->hostdata) {
+ starget = sas_dev->starget;
+ starget_priv = starget->hostdata;
+ starget_priv->flg |= LEAPRAID_TGT_FLG_RAID_MEMBER;
+ sas_dev->volume_hdl = volume_hdl;
+ sas_dev->volume_wwid = volume_wwid;
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ if (starget) {
+ dev_info(&adapter->pdev->dev, "hide sas_dev, hdl=0x%x\n", hdl);
+ starget_for_each_device(starget,
+ (void *)1, leapraid_reprobe_lun);
+ }
+}
+
+static void leapraid_sas_pd_expose(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct scsi_target *starget = NULL;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (!sas_dev) {
+ dev_warn(&adapter->pdev->dev,
+ "%s:%d: sas_dev not found, hdl=0x%x\n",
+ __func__, __LINE__, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return;
+ }
+
+ sas_dev->volume_hdl = 0;
+ sas_dev->volume_wwid = 0;
+ clear_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ if (sas_dev->starget && sas_dev->starget->hostdata) {
+ starget = sas_dev->starget;
+ starget_priv = starget->hostdata;
+ starget_priv->flg &= ~LEAPRAID_TGT_FLG_RAID_MEMBER;
+ sas_dev->led_on = false;
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (starget) {
+ dev_info(&adapter->pdev->dev,
+ "expose sas_dev, hdl=0x%x\n", hdl);
+ starget_for_each_device(starget, NULL, leapraid_reprobe_lun);
+ }
+}
+
+static void leapraid_sas_volume_add(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ struct leapraid_raid_volume *raid_volume;
+ unsigned long flags;
+ u64 wwid;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->vol_dev_hdl);
+
+ if (leapraid_cfg_get_volume_wwid(adapter, hdl, &wwid)) {
+ dev_warn(&adapter->pdev->dev, "failed to read volume page1\n");
+ return;
+ }
+
+ if (!wwid) {
+ dev_warn(&adapter->pdev->dev, "invalid WWID(handle=0x%x)\n",
+ hdl);
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_wwid(adapter, wwid);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+
+ if (raid_volume) {
+ dev_warn(&adapter->pdev->dev,
+ "volume handle 0x%x already exists\n", hdl);
+ return;
+ }
+
+ raid_volume = kzalloc(sizeof(*raid_volume), GFP_KERNEL);
+ if (!raid_volume)
+ return;
+
+ raid_volume->id = adapter->dev_topo.sas_id++;
+ raid_volume->channel = RAID_CHANNEL;
+ raid_volume->hdl = hdl;
+ raid_volume->wwid = wwid;
+ leapraid_raid_volume_add(adapter, raid_volume);
+ if (!adapter->scan_dev_desc.wait_scan_dev_done) {
+ if (scsi_add_device(adapter->shost, RAID_CHANNEL,
+ raid_volume->id, 0))
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ dev_info(&adapter->pdev->dev,
+ "add raid volume: hdl=0x%x, wwid=0x%llx\n", hdl, wwid);
+ } else {
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ leapraid_check_boot_dev(adapter, raid_volume, RAID_CHANNEL);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ }
+}
+
+static void leapraid_sas_volume_delete(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_raid_volume *raid_volume;
+ struct scsi_target *starget = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_hdl(adapter, hdl);
+ if (!raid_volume) {
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ dev_warn(&adapter->pdev->dev,
+ "%s:%d: volume handle 0x%x not found\n",
+ __func__, __LINE__, hdl);
+ return;
+ }
+
+ if (raid_volume->starget) {
+ starget = raid_volume->starget;
+ starget_priv = starget->hostdata;
+ starget_priv->deleted = true;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "delete raid volume: hdl=0x%x, wwid=0x%llx\n",
+ raid_volume->hdl, raid_volume->wwid);
+ list_del(&raid_volume->list);
+ kfree(raid_volume);
+
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+
+ if (starget)
+ scsi_remove_target(&starget->dev);
+}
+
+static void leapraid_sas_ir_chg_evt(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_evt_data_ir_change *evt_data;
+
+ evt_data = fw_evt->evt_data;
+
+ switch (evt_data->reason_code) {
+ case LEAPRAID_EVT_IR_RC_VOLUME_ADD:
+ leapraid_sas_volume_add(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_VOLUME_DELETE:
+ leapraid_sas_volume_delete(adapter,
+ le16_to_cpu(evt_data->vol_dev_hdl));
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_HIDDEN_TO_ADD:
+ leapraid_sas_pd_add(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_UNHIDDEN_TO_DELETE:
+ leapraid_sas_pd_delete(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_CREATED_TO_HIDE:
+ leapraid_sas_pd_hide(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_DELETED_TO_EXPOSE:
+ leapraid_sas_pd_expose(adapter, evt_data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_sas_enc_dev_stat_add_node(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_enc_node *enc_node = NULL;
+ int rc;
+
+ enc_node = kzalloc(sizeof(*enc_node), GFP_KERNEL);
+ if (!enc_node)
+ return;
+
+ cfgp1.form = LEAPRAID_SAS_ENC_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ rc = leapraid_op_config_page(adapter, &enc_node->pg0, cfgp1, cfgp2,
+ GET_SAS_ENCLOSURE_PG0);
+ if (rc) {
+ kfree(enc_node);
+ return;
+ }
+ list_add_tail(&enc_node->list, &adapter->dev_topo.enc_list);
+}
+
+static void leapraid_sas_enc_dev_stat_del_node(
+ struct leapraid_enc_node *enc_node)
+{
+ if (!enc_node)
+ return;
+
+ list_del(&enc_node->list);
+ kfree(enc_node);
+}
+
+static void leapraid_sas_enc_dev_stat_chg_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_enc_node *enc_node = NULL;
+ struct leapraid_evt_data_sas_enc_dev_status_change *evt_data;
+ u16 enc_hdl;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ evt_data = fw_evt->evt_data;
+ enc_hdl = le16_to_cpu(evt_data->enc_hdl);
+ if (enc_hdl)
+ enc_node = leapraid_enc_find_by_hdl(adapter, enc_hdl);
+ switch (evt_data->reason_code) {
+ case LEAPRAID_EVT_SAS_ENCL_RC_ADDED:
+ if (!enc_node)
+ leapraid_sas_enc_dev_stat_add_node(adapter, enc_hdl);
+ break;
+ case LEAPRAID_EVT_SAS_ENCL_RC_NOT_RESPONDING:
+ leapraid_sas_enc_dev_stat_del_node(enc_node);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_remove_unresp_sas_end_dev(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_sas_dev *sas_dev, *sas_dev_next;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ list_for_each_entry_safe(sas_dev, sas_dev_next,
+ &adapter->dev_topo.sas_dev_init_list, list) {
+ list_del_init(&sas_dev->list);
+ leapraid_sdev_put(sas_dev);
+ }
+ list_for_each_entry_safe(sas_dev, sas_dev_next,
+ &adapter->dev_topo.sas_dev_list, list) {
+ if (!sas_dev->resp)
+ list_move_tail(&sas_dev->list, &head);
+ else
+ sas_dev->resp = false;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ list_for_each_entry_safe(sas_dev, sas_dev_next, &head, list) {
+ leapraid_remove_device(adapter, sas_dev);
+ list_del_init(&sas_dev->list);
+ leapraid_sdev_put(sas_dev);
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "unresponding sas end devices removed\n");
+}
+
+static void leapraid_remove_unresp_raid_volumes(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_volume *raid_volume, *raid_volume_next;
+
+ list_for_each_entry_safe(raid_volume, raid_volume_next,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (!raid_volume->resp)
+ leapraid_sas_volume_delete(adapter, raid_volume->hdl);
+ else
+ raid_volume->resp = false;
+ }
+ dev_info(&adapter->pdev->dev,
+ "unresponding raid volumes removed\n");
+}
+
+static void leapraid_remove_unresp_sas_exp(struct leapraid_adapter *adapter)
+{
+ struct leapraid_topo_node *topo_node_exp, *topo_node_exp_next;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_for_each_entry_safe(topo_node_exp, topo_node_exp_next,
+ &adapter->dev_topo.exp_list, list) {
+ if (!topo_node_exp->resp)
+ list_move_tail(&topo_node_exp->list, &head);
+ else
+ topo_node_exp->resp = false;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ list_for_each_entry_safe(topo_node_exp, topo_node_exp_next,
+ &head, list)
+ leapraid_exp_node_rm(adapter, topo_node_exp);
+
+ dev_info(&adapter->pdev->dev,
+ "unresponding sas expanders removed\n");
+}
+
+static void leapraid_remove_unresp_dev(struct leapraid_adapter *adapter)
+{
+ leapraid_remove_unresp_sas_end_dev(adapter);
+ if (adapter->adapter_attr.raid_support)
+ leapraid_remove_unresp_raid_volumes(adapter);
+ leapraid_remove_unresp_sas_exp(adapter);
+ leapraid_ublk_io_all_dev(adapter);
+}
+
+static void leapraid_del_dirty_vphy(struct leapraid_adapter *adapter)
+{
+ struct leapraid_card_port *card_port, *card_port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (!card_port->vphys_mask)
+ continue;
+
+ list_for_each_entry_safe(vphy, vphy_next,
+ &card_port->vphys_list, list) {
+ if (!(vphy->flg & LEAPRAID_VPHY_FLG_DIRTY))
+ continue;
+
+ card_port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+
+ if (!card_port->vphys_mask && !card_port->sas_address)
+ card_port->flg |= LEAPRAID_CARD_PORT_FLG_DIRTY;
+ }
+}
+
+static void leapraid_del_dirty_card_port(struct leapraid_adapter *adapter)
+{
+ struct leapraid_card_port *card_port, *card_port_next;
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (!(card_port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY) ||
+ card_port->flg & LEAPRAID_CARD_PORT_FLG_NEW)
+ continue;
+
+ list_del(&card_port->list);
+ kfree(card_port);
+ }
+}
+
+static void leapraid_update_dev_qdepth(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_device *sdev;
+ u16 qdepth;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv || !sdev_priv->starget_priv)
+ continue;
+ sas_dev = sdev_priv->starget_priv->sas_dev;
+ if (sas_dev && sas_dev->dev_info & LEAPRAID_DEVTYP_SSP_TGT)
+ qdepth = (sas_dev->port_type > 1) ?
+ adapter->adapter_attr.wideport_max_queue_depth :
+ adapter->adapter_attr.narrowport_max_queue_depth;
+ else if (sas_dev && sas_dev->dev_info &
+ LEAPRAID_DEVTYP_SATA_DEV)
+ qdepth = adapter->adapter_attr.sata_max_queue_depth;
+ else
+ continue;
+
+ leapraid_adjust_sdev_queue_depth(sdev, qdepth);
+ }
+}
+
+static void leapraid_update_exp_links(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp,
+ u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_exp_p1 exp_p1;
+ int i;
+
+ cfgp2.handle = hdl;
+ for (i = 0; i < topo_node_exp->phys_num; i++) {
+ cfgp1.phy_number = i;
+ if ((leapraid_op_config_page(adapter, &exp_p1, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG1)))
+ return;
+
+ leapraid_transport_update_links(adapter,
+ topo_node_exp->sas_address,
+ le16_to_cpu(exp_p1.attached_dev_hdl),
+ i,
+ exp_p1.neg_link_rate >> 4,
+ topo_node_exp->card_port);
+ }
+}
+
+static void leapraid_scan_exp_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_exp_p0 exp_p0;
+ unsigned long flags;
+ u16 hdl;
+ u8 port_id;
+
+ dev_info(&adapter->pdev->dev, "begin scanning expanders\n");
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &exp_p0, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG0);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(exp_p0.dev_hdl);
+ port_id = exp_p0.physical_port;
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp =
+ leapraid_exp_find_by_sas_address(adapter,
+ le64_to_cpu(exp_p0.sas_address),
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false));
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+
+ if (topo_node_exp) {
+ leapraid_update_exp_links(adapter, topo_node_exp, hdl);
+ } else {
+ leapraid_exp_add(adapter, hdl);
+
+ dev_info(&adapter->pdev->dev,
+ "add exp: hdl=0x%04x, sas addr=0x%016llx\n",
+ hdl,
+ (unsigned long long)le64_to_cpu(
+ exp_p0.sas_address));
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "expanders scan complete\n");
+}
+
+static void leapraid_scan_phy_disks_after_reset(
+ struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ union cfg_param_1 cfgp1_extra = {0};
+ union cfg_param_2 cfgp2_extra = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_raidpd_p0 raidpd_p0;
+ struct leapraid_sas_dev *sas_dev;
+ u8 phys_disk_num, port_id;
+ u16 hdl, parent_hdl;
+ u64 sas_addr;
+
+ dev_info(&adapter->pdev->dev, "begin scanning phys disk\n");
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (phys_disk_num = 0xFF, cfgp2.form_specific = phys_disk_num;
+ !leapraid_op_config_page(adapter, &raidpd_p0,
+ cfgp1, cfgp2, GET_PHY_DISK_PG0);
+ cfgp2.form_specific = phys_disk_num) {
+ phys_disk_num = raidpd_p0.phys_disk_num;
+ hdl = le16_to_cpu(raidpd_p0.dev_hdl);
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ continue;
+ }
+
+ cfgp1_extra.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2_extra.handle = hdl;
+ if (leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1_extra,
+ cfgp2_extra, GET_SAS_DEVICE_PG0) !=
+ 0)
+ continue;
+
+ parent_hdl = le16_to_cpu(sas_dev_p0.parent_dev_hdl);
+ if (!leapraid_get_sas_address(adapter,
+ parent_hdl,
+ &sas_addr)) {
+ port_id = sas_dev_p0.physical_port;
+ leapraid_transport_update_links(
+ adapter, sas_addr, hdl,
+ sas_dev_p0.phy_num,
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ leapraid_get_port_by_id(
+ adapter, port_id, false));
+ set_bit(hdl,
+ (unsigned long *)adapter->dev_topo.pd_hdls);
+
+ leapraid_add_dev(adapter, hdl);
+
+ dev_info(&adapter->pdev->dev,
+ "add phys disk: hdl=0x%04x, sas addr=0x%016llx\n",
+ hdl,
+ (unsigned long long)le64_to_cpu(
+ sas_dev_p0.sas_address));
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "phys disk scan complete\n");
+}
+
+static void leapraid_scan_vol_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ union cfg_param_1 cfgp1_extra = {0};
+ union cfg_param_2 cfgp2_extra = {0};
+ struct leapraid_evt_data_ir_change evt_data;
+ static struct leapraid_raid_volume *raid_volume;
+ struct leapraid_raidvol_p1 *vol_p1;
+ struct leapraid_raidvol_p0 *vol_p0;
+ unsigned long flags;
+ u16 hdl;
+
+ vol_p0 = kzalloc(sizeof(*vol_p0), GFP_KERNEL);
+ if (!vol_p0)
+ return;
+
+ vol_p1 = kzalloc(sizeof(*vol_p1), GFP_KERNEL);
+ if (!vol_p1) {
+ kfree(vol_p0);
+ return;
+ }
+
+ dev_info(&adapter->pdev->dev, "begin scanning volumes\n");
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, vol_p1, cfgp1,
+ cfgp2, GET_RAID_VOLUME_PG1);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(vol_p1->dev_hdl);
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_wwid(
+ adapter,
+ le64_to_cpu(vol_p1->wwid));
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ if (raid_volume)
+ continue;
+
+ cfgp1_extra.size = sizeof(struct leapraid_raidvol_p0);
+ cfgp2_extra.handle = hdl;
+ if (leapraid_op_config_page(adapter, vol_p0, cfgp1_extra,
+ cfgp2_extra, GET_RAID_VOLUME_PG0))
+ continue;
+
+ if (vol_p0->volume_state == LEAPRAID_VOL_STATE_OPTIMAL ||
+ vol_p0->volume_state == LEAPRAID_VOL_STATE_ONLINE ||
+ vol_p0->volume_state == LEAPRAID_VOL_STATE_DEGRADED) {
+ memset(&evt_data, 0,
+ sizeof(struct leapraid_evt_data_ir_change));
+ evt_data.reason_code = LEAPRAID_EVT_IR_RC_VOLUME_ADD;
+ evt_data.vol_dev_hdl = vol_p1->dev_hdl;
+ leapraid_sas_volume_add(adapter, &evt_data);
+ dev_info(&adapter->pdev->dev,
+ "add volume: hdl=0x%04x\n",
+ vol_p1->dev_hdl);
+ }
+ }
+
+ kfree(vol_p0);
+ kfree(vol_p1);
+
+ dev_info(&adapter->pdev->dev, "volumes scan complete\n");
+}
+
+static void leapraid_scan_sas_dev_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_sas_dev *sas_dev;
+ u16 hdl, parent_hdl;
+ u64 sas_address;
+ u8 port_id;
+
+ dev_info(&adapter->pdev->dev,
+ "begin scanning sas end devices\n");
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1, cfgp2,
+ GET_SAS_DEVICE_PG0);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(sas_dev_p0.dev_hdl);
+ if (!(leapraid_is_end_dev(le32_to_cpu(sas_dev_p0.dev_info))))
+ continue;
+
+ port_id = sas_dev_p0.physical_port;
+ sas_dev = leapraid_get_sas_dev_by_addr(
+ adapter,
+ le64_to_cpu(sas_dev_p0.sas_address),
+ leapraid_get_port_by_id(
+ adapter,
+ port_id,
+ false));
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ continue;
+ }
+
+ parent_hdl = le16_to_cpu(sas_dev_p0.parent_dev_hdl);
+ if (!leapraid_get_sas_address(adapter, parent_hdl,
+ &sas_address)) {
+ leapraid_transport_update_links(
+ adapter,
+ sas_address,
+ hdl,
+ sas_dev_p0.phy_num,
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false));
+ leapraid_add_dev(adapter, hdl);
+ dev_info(&adapter->pdev->dev,
+ "add sas dev: hdl=0x%04x, sas addr=0x%016llx\n",
+ hdl,
+ (unsigned long long)le64_to_cpu(
+ sas_dev_p0.sas_address));
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "sas end devices scan complete\n");
+}
+
+static void leapraid_scan_all_dev_after_reset(struct leapraid_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "begin scanning devices\n");
+
+ leapraid_sas_host_add(adapter, adapter->dev_topo.card.phys_num);
+ leapraid_scan_exp_after_reset(adapter);
+ if (adapter->adapter_attr.raid_support) {
+ leapraid_scan_phy_disks_after_reset(adapter);
+ leapraid_scan_vol_after_reset(adapter);
+ }
+ leapraid_scan_sas_dev_after_reset(adapter);
+
+ dev_info(&adapter->pdev->dev, "devices scan complete\n");
+}
+
+static void leapraid_hardreset_async_logic(struct leapraid_adapter *adapter)
+{
+ leapraid_remove_unresp_dev(adapter);
+ leapraid_del_dirty_vphy(adapter);
+ leapraid_del_dirty_card_port(adapter);
+ leapraid_update_dev_qdepth(adapter);
+ leapraid_scan_all_dev_after_reset(adapter);
+
+ if (adapter->scan_dev_desc.driver_loading)
+ leapraid_scan_dev_done(adapter);
+}
+
+static int leapraid_send_enc_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_sep_rep *sep_rep,
+ struct leapraid_sep_req *sep_req)
+{
+ void *req;
+ bool reset_flg = false;
+ int rc = 0;
+
+ mutex_lock(&adapter->driver_cmds.enc_cmd.mutex);
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ adapter->driver_cmds.enc_cmd.status = LEAPRAID_CMD_PENDING;
+ req = leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.enc_cmd.inter_taskid);
+ memset(req, 0, LEAPRAID_REQUEST_SIZE);
+ memcpy(req, sep_req, sizeof(struct leapraid_sep_req));
+ init_completion(&adapter->driver_cmds.enc_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.enc_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.enc_cmd.done,
+ LEAPRAID_ENC_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.enc_cmd.status & LEAPRAID_CMD_DONE)) {
+ reset_flg =
+ leapraid_check_reset(
+ adapter->driver_cmds.enc_cmd.status);
+ rc = -EFAULT;
+ goto do_hard_reset;
+ }
+
+ if (adapter->driver_cmds.enc_cmd.status & LEAPRAID_CMD_REPLY_VALID)
+ memcpy(sep_rep, (void *)(&adapter->driver_cmds.enc_cmd.reply),
+ sizeof(struct leapraid_sep_rep));
+do_hard_reset:
+ if (reset_flg) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+
+ adapter->driver_cmds.enc_cmd.status = LEAPRAID_CMD_NOT_USED;
+out:
+ mutex_unlock(&adapter->driver_cmds.enc_cmd.mutex);
+ return rc;
+}
+
+static void leapraid_set_led(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev, bool on)
+{
+ struct leapraid_sep_rep sep_rep;
+ struct leapraid_sep_req sep_req;
+
+ if (!sas_dev)
+ return;
+
+ memset(&sep_req, 0, sizeof(struct leapraid_sep_req));
+ memset(&sep_rep, 0, sizeof(struct leapraid_sep_rep));
+ sep_req.func = LEAPRAID_FUNC_SCSI_ENC_PROCESSOR;
+ sep_req.act = LEAPRAID_SEP_REQ_ACT_WRITE_STATUS;
+ if (on) {
+ sep_req.slot_status =
+ cpu_to_le32(LEAPRAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ sep_req.dev_hdl = cpu_to_le16(sas_dev->hdl);
+ sep_req.flg = LEAPRAID_SEP_REQ_FLG_DEVHDL_ADDRESS;
+ if (leapraid_send_enc_cmd(adapter, &sep_rep, &sep_req)) {
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+
+ sas_dev->led_on = true;
+ if (sep_rep.adapter_status)
+ leapraid_sdev_put(sas_dev);
+ } else {
+ sep_req.slot_status = 0;
+ sep_req.slot = cpu_to_le16(sas_dev->slot);
+ sep_req.dev_hdl = 0;
+ sep_req.enc_hdl = cpu_to_le16(sas_dev->enc_hdl);
+ sep_req.flg = LEAPRAID_SEP_REQ_FLG_ENCLOSURE_SLOT_ADDRESS;
+ if ((leapraid_send_enc_cmd(adapter, &sep_rep, &sep_req))) {
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+
+ if (sep_rep.adapter_status) {
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+ }
+}
+
+static void leapraid_fw_work(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ adapter->fw_evt_s.cur_evt = fw_evt;
+ leapraid_del_fw_evt_from_list(adapter, fw_evt);
+ if (adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering) {
+ leapraid_fw_evt_put(fw_evt);
+ adapter->fw_evt_s.cur_evt = NULL;
+ return;
+ }
+ switch (fw_evt->evt_type) {
+ case LEAPRAID_EVT_SAS_DISCOVERY:
+ {
+ struct leapraid_evt_data_sas_disc *evt_data;
+
+ evt_data = fw_evt->evt_data;
+ if (evt_data->reason_code ==
+ LEAPRAID_EVT_SAS_DISC_RC_STARTED &&
+ !adapter->dev_topo.card.phys_num)
+ leapraid_sas_host_add(adapter, 0);
+ break;
+ }
+ case LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST:
+ leapraid_sas_topo_chg_evt(adapter, fw_evt);
+ break;
+ case LEAPRAID_EVT_IR_CHANGE:
+ leapraid_sas_ir_chg_evt(adapter, fw_evt);
+ break;
+ case LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE:
+ leapraid_sas_enc_dev_stat_chg_evt(adapter, fw_evt);
+ break;
+ case LEAPRAID_EVT_REMOVE_DEAD_DEV:
+ while (scsi_host_in_recovery(adapter->shost) ||
+ adapter->access_ctrl.shost_recovering) {
+ if (adapter->access_ctrl.host_removing ||
+ adapter->fw_evt_s.fw_evt_cleanup)
+ goto out;
+
+ ssleep(1);
+ }
+ leapraid_hardreset_async_logic(adapter);
+ break;
+ case LEAPRAID_EVT_TURN_ON_PFA_LED:
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter,
+ fw_evt->dev_handle);
+ leapraid_set_led(adapter, sas_dev, true);
+ break;
+ case LEAPRAID_EVT_SCAN_DEV_DONE:
+ adapter->scan_dev_desc.scan_start = false;
+ break;
+ default:
+ break;
+ }
+out:
+ leapraid_fw_evt_put(fw_evt);
+ adapter->fw_evt_s.cur_evt = NULL;
+}
+
+static void leapraid_sas_dev_stat_chg_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_sas_dev_status_change *event_data)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ u64 sas_address;
+ unsigned long flags;
+
+ switch (event_data->reason_code) {
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_INTERNAL_DEV_RESET:
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ break;
+ default:
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+
+ sas_address = le64_to_cpu(event_data->sas_address);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter,
+ sas_address,
+ leapraid_get_port_by_id(adapter,
+ event_data->physical_port,
+ false));
+
+ if (sas_dev && sas_dev->starget) {
+ starget_priv = sas_dev->starget->hostdata;
+ if (starget_priv) {
+ switch (event_data->reason_code) {
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_INTERNAL_DEV_RESET:
+ starget_priv->tm_busy = true;
+ break;
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ starget_priv->tm_busy = false;
+ break;
+ }
+ }
+ }
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_set_volume_delete_flag(struct leapraid_adapter *adapter,
+ u16 handle)
+{
+ struct leapraid_raid_volume *raid_volume;
+ struct leapraid_starget_priv *sas_target_priv_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_hdl(adapter, handle);
+ if (raid_volume && raid_volume->starget &&
+ raid_volume->starget->hostdata) {
+ sas_target_priv_data = raid_volume->starget->hostdata;
+ sas_target_priv_data->deleted = true;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static void leapraid_check_ir_change_evt(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ u16 phys_disk_dev_hdl;
+
+ switch (evt_data->reason_code) {
+ case LEAPRAID_EVT_IR_RC_VOLUME_DELETE:
+ leapraid_set_volume_delete_flag(adapter,
+ le16_to_cpu(evt_data->vol_dev_hdl));
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_UNHIDDEN_TO_DELETE:
+ phys_disk_dev_hdl =
+ le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ clear_bit(phys_disk_dev_hdl,
+ (unsigned long *)adapter->dev_topo.pd_hdls);
+ leapraid_tgt_rst_send(adapter, phys_disk_dev_hdl);
+ break;
+ }
+}
+
+static void leapraid_topo_del_evts_process_exp_status(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_sas_topo_change_list *evt_data)
+{
+ struct leapraid_fw_evt_work *fw_evt = NULL;
+ struct leapraid_evt_data_sas_topo_change_list *loc_evt_data = NULL;
+ unsigned long flags;
+ u16 exp_hdl;
+
+ exp_hdl = le16_to_cpu(evt_data->exp_dev_hdl);
+
+ switch (evt_data->exp_status) {
+ case LEAPRAID_EVT_SAS_TOPO_ES_NOT_RESPONDING:
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ list_for_each_entry(fw_evt,
+ &adapter->fw_evt_s.fw_evt_list, list) {
+ if (fw_evt->evt_type !=
+ LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST ||
+ fw_evt->ignore)
+ continue;
+
+ loc_evt_data = fw_evt->evt_data;
+ if ((loc_evt_data->exp_status ==
+ LEAPRAID_EVT_SAS_TOPO_ES_ADDED ||
+ loc_evt_data->exp_status ==
+ LEAPRAID_EVT_SAS_TOPO_ES_RESPONDING) &&
+ le16_to_cpu(loc_evt_data->exp_dev_hdl) == exp_hdl)
+ fw_evt->ignore = 1;
+ }
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_check_topo_del_evts(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_sas_topo_change_list *evt_data)
+{
+ int reason_code;
+ u16 hdl;
+ int i;
+
+ for (i = 0; i < evt_data->entry_num; i++) {
+ hdl = le16_to_cpu(evt_data->phy[i].attached_dev_hdl);
+ if (!hdl)
+ continue;
+
+ reason_code = evt_data->phy[i].phy_status &
+ LEAPRAID_EVT_SAS_TOPO_RC_MASK;
+ if (reason_code ==
+ LEAPRAID_EVT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
+ leapraid_tgt_not_responding(adapter, hdl);
+ }
+ leapraid_topo_del_evts_process_exp_status(adapter, evt_data);
+}
+
+static bool leapraid_async_process_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_notify_rep *event_notify_rep)
+{
+ u16 evt = le16_to_cpu(event_notify_rep->evt);
+ bool exit_flag = false;
+
+ switch (evt) {
+ case LEAPRAID_EVT_SAS_DEV_STATUS_CHANGE:
+ leapraid_sas_dev_stat_chg_evt(adapter,
+ (struct leapraid_evt_data_sas_dev_status_change
+ *)event_notify_rep->evt_data);
+ break;
+ case LEAPRAID_EVT_IR_CHANGE:
+ leapraid_check_ir_change_evt(adapter,
+ (struct leapraid_evt_data_ir_change
+ *)event_notify_rep->evt_data);
+ break;
+ case LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST:
+ leapraid_check_topo_del_evts(adapter,
+ (struct leapraid_evt_data_sas_topo_change_list
+ *)event_notify_rep->evt_data);
+ if (adapter->access_ctrl.shost_recovering) {
+ exit_flag = true;
+ return exit_flag;
+ }
+ break;
+ case LEAPRAID_EVT_SAS_DISCOVERY:
+ case LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE:
+ break;
+ default:
+ exit_flag = true;
+ return exit_flag;
+ }
+
+ return exit_flag;
+}
+
+static void leapraid_async_evt_cb_enqueue(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_notify_rep *evt_notify_rep)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+ u16 evt_sz;
+
+ fw_evt = leapraid_alloc_fw_evt_work();
+ if (!fw_evt)
+ return;
+
+ evt_sz = le16_to_cpu(evt_notify_rep->evt_data_len) * 4;
+ fw_evt->evt_data = kmemdup(evt_notify_rep->evt_data,
+ evt_sz, GFP_ATOMIC);
+ if (!fw_evt->evt_data) {
+ leapraid_fw_evt_put(fw_evt);
+ return;
+ }
+ fw_evt->adapter = adapter;
+ fw_evt->evt_type = le16_to_cpu(evt_notify_rep->evt);
+ leapraid_fw_evt_add(adapter, fw_evt);
+ leapraid_fw_evt_put(fw_evt);
+}
+
+static void leapraid_async_evt_cb(struct leapraid_adapter *adapter,
+ u8 msix_index, u32 rep_paddr)
+{
+ struct leapraid_evt_notify_rep *evt_notify_rep;
+
+ if (adapter->access_ctrl.pcie_recovering)
+ return;
+
+ evt_notify_rep = leapraid_get_reply_vaddr(adapter, rep_paddr);
+ if (unlikely(!evt_notify_rep))
+ return;
+
+ if (leapraid_async_process_evt(adapter, evt_notify_rep))
+ return;
+
+ leapraid_async_evt_cb_enqueue(adapter, evt_notify_rep);
+}
+
+static void leapraid_handle_async_event(struct leapraid_adapter *adapter,
+ u8 msix_index, u32 reply)
+{
+ struct leapraid_evt_notify_rep *leap_mpi_rep =
+ leapraid_get_reply_vaddr(adapter, reply);
+
+ if (!leap_mpi_rep)
+ return;
+
+ if (leap_mpi_rep->func != LEAPRAID_FUNC_EVENT_NOTIFY)
+ return;
+
+ leapraid_async_evt_cb(adapter, msix_index, reply);
+}
+
+void leapraid_async_turn_on_led(struct leapraid_adapter *adapter, u16 handle)
+{
+ struct leapraid_fw_evt_work *fw_event;
+
+ fw_event = leapraid_alloc_fw_evt_work();
+ if (!fw_event)
+ return;
+
+ fw_event->dev_handle = handle;
+ fw_event->adapter = adapter;
+ fw_event->evt_type = LEAPRAID_EVT_TURN_ON_PFA_LED;
+ leapraid_fw_evt_add(adapter, fw_event);
+ leapraid_fw_evt_put(fw_event);
+}
+
+static void leapraid_hardreset_barrier(struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_event;
+
+ fw_event = leapraid_alloc_fw_evt_work();
+ if (!fw_event)
+ return;
+
+ fw_event->adapter = adapter;
+ fw_event->evt_type = LEAPRAID_EVT_REMOVE_DEAD_DEV;
+ leapraid_fw_evt_add(adapter, fw_event);
+ leapraid_fw_evt_put(fw_event);
+}
+
+static void leapraid_scan_dev_complete(struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+
+ fw_evt = leapraid_alloc_fw_evt_work();
+ if (!fw_evt)
+ return;
+
+ fw_evt->evt_type = LEAPRAID_EVT_SCAN_DEV_DONE;
+ fw_evt->adapter = adapter;
+ leapraid_fw_evt_add(adapter, fw_evt);
+ leapraid_fw_evt_put(fw_evt);
+}
+
+static u8 leapraid_driver_cmds_done(struct leapraid_adapter *adapter,
+ u16 taskid, u8 msix_index,
+ u32 rep_paddr, u8 cb_idx)
+{
+ struct leapraid_rep *leap_mpi_rep =
+ leapraid_get_reply_vaddr(adapter, rep_paddr);
+ struct leapraid_driver_cmd *sp_cmd, *_sp_cmd = NULL;
+
+ list_for_each_entry(sp_cmd, &adapter->driver_cmds.special_cmd_list,
+ list)
+ if (cb_idx == sp_cmd->cb_idx) {
+ _sp_cmd = sp_cmd;
+ break;
+ }
+
+ if (WARN_ON(!_sp_cmd))
+ return 1;
+ if (WARN_ON(_sp_cmd->status == LEAPRAID_CMD_NOT_USED))
+ return 1;
+ if (WARN_ON(taskid != _sp_cmd->hp_taskid &&
+ taskid != _sp_cmd->taskid &&
+ taskid != _sp_cmd->inter_taskid))
+ return 1;
+
+ _sp_cmd->status |= LEAPRAID_CMD_DONE;
+ if (leap_mpi_rep) {
+ memcpy((void *)(&_sp_cmd->reply), leap_mpi_rep,
+ leap_mpi_rep->msg_len * 4);
+ _sp_cmd->status |= LEAPRAID_CMD_REPLY_VALID;
+
+ if (_sp_cmd->cb_idx == LEAPRAID_SCAN_DEV_CB_IDX) {
+ u16 adapter_status;
+
+ _sp_cmd->status &= ~LEAPRAID_CMD_PENDING;
+ adapter_status =
+ le16_to_cpu(leap_mpi_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS)
+ adapter->scan_dev_desc.scan_dev_failed = true;
+
+ if (_sp_cmd->async_scan_dev) {
+ if (adapter_status ==
+ LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ leapraid_scan_dev_complete(adapter);
+ } else {
+ adapter->scan_dev_desc.scan_start_failed =
+ adapter_status;
+ }
+ return 1;
+ }
+
+ complete(&_sp_cmd->done);
+ return 1;
+ }
+
+ if (_sp_cmd->cb_idx == LEAPRAID_CTL_CB_IDX) {
+ struct leapraid_scsiio_rep *scsiio_reply;
+
+ if (leap_mpi_rep->function ==
+ LEAPRAID_FUNC_SCSIIO_REQ ||
+ leap_mpi_rep->function ==
+ LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH) {
+ scsiio_reply =
+ (struct leapraid_scsiio_rep *)leap_mpi_rep;
+ if (scsiio_reply->scsi_state &
+ LEAPRAID_SCSI_STATE_AUTOSENSE_VALID)
+ memcpy((void *)(&adapter->driver_cmds.ctl_cmd.sense),
+ leapraid_get_sense_buffer(adapter, taskid),
+ min_t(u32,
+ SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_reply->sense_count)));
+ }
+ }
+ }
+
+ _sp_cmd->status &= ~LEAPRAID_CMD_PENDING;
+ complete(&_sp_cmd->done);
+
+ return 1;
+}
+
+static void leapraid_request_descript_handler(struct leapraid_adapter *adapter,
+ union leapraid_rep_desc_union *rpf,
+ u8 req_desc_type, u8 msix_idx)
+{
+ u32 rep;
+ u16 taskid;
+
+ rep = 0;
+ taskid = le16_to_cpu(rpf->dflt_rep.taskid);
+ switch (req_desc_type) {
+ case LEAPRAID_RPY_DESC_FLG_FP_SCSI_IO_SUCCESS:
+ case LEAPRAID_RPY_DESC_FLG_SCSI_IO_SUCCESS:
+ if (taskid <= adapter->shost->can_queue ||
+ taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid) {
+ leapraid_scsiio_done(adapter, taskid, msix_idx, 0);
+ } else {
+ if (leapraid_driver_cmds_done(adapter, taskid,
+ msix_idx, 0,
+ leapraid_get_cb_idx(adapter,
+ taskid)))
+ leapraid_free_taskid(adapter, taskid);
+ }
+ break;
+ case LEAPRAID_RPY_DESC_FLG_ADDRESS_REPLY:
+ rep = le32_to_cpu(rpf->addr_rep.rep_frame_addr);
+ if (rep > ((u32)adapter->mem_desc.rep_msg_dma +
+ adapter->adapter_attr.rep_msg_qd * LEAPRAID_REPLY_SIEZ) ||
+ rep < ((u32)adapter->mem_desc.rep_msg_dma))
+ rep = 0;
+ if (taskid) {
+ if (taskid <= adapter->shost->can_queue ||
+ taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid) {
+ leapraid_scsiio_done(adapter, taskid,
+ msix_idx, rep);
+ } else {
+ if (leapraid_driver_cmds_done(adapter, taskid,
+ msix_idx, rep,
+ leapraid_get_cb_idx(adapter,
+ taskid)))
+ leapraid_free_taskid(adapter, taskid);
+ }
+ } else {
+ leapraid_handle_async_event(adapter, msix_idx, rep);
+ }
+
+ if (rep) {
+ adapter->rep_msg_host_idx =
+ (adapter->rep_msg_host_idx ==
+ (adapter->adapter_attr.rep_msg_qd - 1)) ?
+ 0 : adapter->rep_msg_host_idx + 1;
+ adapter->mem_desc.rep_msg_addr[adapter->rep_msg_host_idx] =
+ cpu_to_le32(rep);
+ wmb(); /* Make sure that all write ops are in order */
+ writel(adapter->rep_msg_host_idx,
+ &adapter->iomem_base->rep_msg_host_idx);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+int leapraid_rep_queue_handler(struct leapraid_rq *rq)
+{
+ struct leapraid_adapter *adapter = rq->adapter;
+ union leapraid_rep_desc_union *rep_desc;
+ u8 req_desc_type;
+ u64 finish_cmds;
+ u8 msix_idx;
+
+ msix_idx = rq->msix_idx;
+ finish_cmds = 0;
+ if (!atomic_add_unless(&rq->busy, LEAPRAID_BUSY_LIMIT,
+ LEAPRAID_BUSY_LIMIT))
+ return finish_cmds;
+
+ rep_desc = &rq->rep_desc[rq->rep_post_host_idx];
+ req_desc_type = rep_desc->dflt_rep.rep_flg &
+ LEAPRAID_RPY_DESC_FLG_TYPE_MASK;
+ if (req_desc_type == LEAPRAID_RPY_DESC_FLG_UNUSED) {
+ atomic_dec(&rq->busy);
+ return finish_cmds;
+ }
+
+ for (;;) {
+ if (rep_desc->u.low == UINT_MAX ||
+ rep_desc->u.high == UINT_MAX)
+ break;
+
+ leapraid_request_descript_handler(adapter, rep_desc,
+ req_desc_type, msix_idx);
+ dev_dbg(&adapter->pdev->dev,
+ "LEAPRAID_SCSIIO: Handled Desc taskid %d, msix %d\n",
+ rep_desc->dflt_rep.taskid, msix_idx);
+ rep_desc->words = cpu_to_le64(ULLONG_MAX);
+ rq->rep_post_host_idx =
+ (rq->rep_post_host_idx ==
+ (adapter->adapter_attr.rep_desc_qd -
+ LEAPRAID_BUSY_LIMIT)) ?
+ 0 : rq->rep_post_host_idx + 1;
+ req_desc_type =
+ rq->rep_desc[rq->rep_post_host_idx].dflt_rep.rep_flg &
+ LEAPRAID_RPY_DESC_FLG_TYPE_MASK;
+ finish_cmds++;
+ if (req_desc_type == LEAPRAID_RPY_DESC_FLG_UNUSED)
+ break;
+ rep_desc = rq->rep_desc + rq->rep_post_host_idx;
+ }
+
+ if (!finish_cmds) {
+ atomic_dec(&rq->busy);
+ return finish_cmds;
+ }
+
+ wmb(); /* Make sure that all write ops are in order */
+ writel(rq->rep_post_host_idx | ((msix_idx & LEAPRAID_MSIX_GROUP_MASK) <<
+ LEAPRAID_RPHI_MSIX_IDX_SHIFT),
+ &adapter->iomem_base->rep_post_reg_idx[msix_idx /
+ LEAPRAID_MSIX_GROUP_SIZE].idx);
+ atomic_dec(&rq->busy);
+ return finish_cmds;
+}
+
+static irqreturn_t leapraid_irq_handler(int irq, void *bus_id)
+{
+ struct leapraid_rq *rq = bus_id;
+ struct leapraid_adapter *adapter = rq->adapter;
+
+ dev_dbg(&adapter->pdev->dev,
+ "LEAPRAID_SCSIIO: Receive a interrupt, irq %d msix %d\n",
+ irq, rq->msix_idx);
+
+ if (adapter->mask_int)
+ return IRQ_NONE;
+
+ return ((leapraid_rep_queue_handler(rq) > 0) ?
+ IRQ_HANDLED : IRQ_NONE);
+}
+
+void leapraid_sync_irqs(struct leapraid_adapter *adapter, bool poll)
+{
+ struct leapraid_int_rq *int_rq;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ unsigned int i;
+
+ if (!adapter->notification_desc.msix_enable)
+ return;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ if (int_rq->rq.msix_idx == 0)
+ continue;
+
+ synchronize_irq(pci_irq_vector(adapter->pdev, int_rq->rq.msix_idx));
+ if (poll)
+ leapraid_rep_queue_handler(&int_rq->rq);
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ blk_mq_poll_rq =
+ &adapter->notification_desc.blk_mq_poll_rqs[i];
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ if (blk_mq_poll_rq->rq.msix_idx == 0)
+ continue;
+
+ leapraid_rep_queue_handler(&blk_mq_poll_rq->rq);
+ }
+}
+
+void leapraid_mq_polling_pause(struct leapraid_adapter *adapter)
+{
+ int iopoll_q_count =
+ adapter->adapter_attr.rq_cnt -
+ adapter->notification_desc.iopoll_qdex;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[qid].pause, 1);
+
+ for (qid = 0; qid < iopoll_q_count; qid++) {
+ while (atomic_read(&adapter->notification_desc.blk_mq_poll_rqs[qid].busy)) {
+ cpu_relax();
+ udelay(LEAPRAID_IO_POLL_DELAY_US);
+ }
+ }
+}
+
+void leapraid_mq_polling_resume(struct leapraid_adapter *adapter)
+{
+ int iopoll_q_count =
+ adapter->adapter_attr.rq_cnt -
+ adapter->notification_desc.iopoll_qdex;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[qid].pause, 0);
+}
+
+static int leapraid_unlock_host_diag(struct leapraid_adapter *adapter,
+ u32 *host_diag)
+{
+ const u32 unlock_seq[] = { 0x0, 0xF, 0x4, 0xB, 0x2, 0x7, 0xD };
+ const int max_retries = LEAPRAID_UNLOCK_RETRY_LIMIT;
+ int retry = 0;
+ unsigned int i;
+
+ *host_diag = 0;
+ while (retry++ <= max_retries) {
+ for (i = 0; i < ARRAY_SIZE(unlock_seq); i++)
+ writel(unlock_seq[i], &adapter->iomem_base->ws);
+
+ msleep(LEAPRAID_UNLOCK_SLEEP_MS);
+
+ *host_diag = leapraid_readl(&adapter->iomem_base->host_diag);
+ if (*host_diag & LEAPRAID_DIAG_WRITE_ENABLE)
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev, "try host reset timeout!\n");
+ return -EFAULT;
+}
+
+static int leapraid_host_diag_reset(struct leapraid_adapter *adapter)
+{
+ u32 host_diag;
+ u32 cnt;
+
+ dev_info(&adapter->pdev->dev, "entering host diag reset!\n");
+ pci_cfg_access_lock(adapter->pdev);
+
+ mutex_lock(&adapter->reset_desc.host_diag_mutex);
+ if (leapraid_unlock_host_diag(adapter, &host_diag))
+ goto out;
+
+ writel(host_diag | LEAPRAID_DIAG_RESET,
+ &adapter->iomem_base->host_diag);
+
+ msleep(LEAPRAID_MSLEEP_NORMAL_MS);
+ for (cnt = 0; cnt < LEAPRAID_RESET_LOOP_COUNT_DEFAULT; cnt++) {
+ host_diag = leapraid_readl(&adapter->iomem_base->host_diag);
+ if (host_diag == LEAPRAID_INVALID_HOST_DIAG_VAL)
+ goto out;
+
+ if (!(host_diag & LEAPRAID_DIAG_RESET))
+ break;
+
+ msleep(LEAPRAID_RESET_POLL_INTERVAL_MS);
+ }
+
+ writel(host_diag & ~LEAPRAID_DIAG_HOLD_ADAPTER_RESET,
+ &adapter->iomem_base->host_diag);
+ writel(0x0, &adapter->iomem_base->ws);
+ mutex_unlock(&adapter->reset_desc.host_diag_mutex);
+ if (!leapraid_wait_adapter_ready(adapter))
+ goto out;
+
+ pci_cfg_access_unlock(adapter->pdev);
+ dev_info(&adapter->pdev->dev, "host diag success!\n");
+ return 0;
+out:
+ pci_cfg_access_unlock(adapter->pdev);
+ dev_info(&adapter->pdev->dev, "host diag failed!\n");
+ mutex_unlock(&adapter->reset_desc.host_diag_mutex);
+ return -EFAULT;
+}
+
+static int leapraid_find_matching_port(
+ struct leapraid_card_port *card_port_table,
+ u8 count, u8 port_id, u64 sas_addr)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (card_port_table[i].port_id == port_id &&
+ card_port_table[i].sas_address == sas_addr)
+ return i;
+ }
+ return -1;
+}
+
+static u8 leapraid_fill_card_port_table(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_io_unit_p0 *sas_iounit_p0,
+ struct leapraid_card_port *new_card_port_table)
+{
+ u8 port_entry_num = 0, port_id;
+ u16 attached_hdl;
+ u64 attached_sas_addr;
+ int i, idx;
+
+ for (i = 0; i < adapter->dev_topo.card.phys_num; i++) {
+ if ((sas_iounit_p0->phy_info[i].neg_link_rate >> 4)
+ < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ continue;
+
+ attached_hdl =
+ le16_to_cpu(sas_iounit_p0->phy_info[i].attached_dev_hdl);
+ if (leapraid_get_sas_address(adapter,
+ attached_hdl,
+ &attached_sas_addr) != 0)
+ continue;
+
+ port_id = sas_iounit_p0->phy_info[i].port;
+
+ idx = leapraid_find_matching_port(new_card_port_table,
+ port_entry_num,
+ port_id,
+ attached_sas_addr);
+ if (idx >= 0) {
+ new_card_port_table[idx].phy_mask |= BIT(i);
+ } else {
+ new_card_port_table[port_entry_num].port_id = port_id;
+ new_card_port_table[port_entry_num].phy_mask = BIT(i);
+ new_card_port_table[port_entry_num].sas_address =
+ attached_sas_addr;
+ port_entry_num++;
+ }
+ }
+
+ return port_entry_num;
+}
+
+static u8 leapraid_set_new_card_port_table_after_reset(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *new_card_port_table)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_io_unit_p0 *sas_iounit_p0 = NULL;
+ u8 port_entry_num = 0;
+ u16 sz;
+
+ sz = offsetof(struct leapraid_sas_io_unit_p0, phy_info) +
+ (adapter->dev_topo.card.phys_num *
+ sizeof(struct leapraid_sas_io_unit0_phy_info));
+ sas_iounit_p0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_p0)
+ return port_entry_num;
+
+ cfgp1.size = sz;
+ if ((leapraid_op_config_page(adapter, sas_iounit_p0, cfgp1, cfgp2,
+ GET_SAS_IOUNIT_PG0)) != 0)
+ goto out;
+
+ port_entry_num = leapraid_fill_card_port_table(adapter,
+ sas_iounit_p0,
+ new_card_port_table);
+out:
+ kfree(sas_iounit_p0);
+ return port_entry_num;
+}
+
+static void leapraid_update_existing_port(struct leapraid_adapter *adapter,
+ struct leapraid_card_port *new_table,
+ int entry_idx, int port_entry_num)
+{
+ struct leapraid_card_port *matched_card_port = NULL;
+ int matched_code;
+ int count = 0, lcount = 0;
+ u64 sas_addr;
+ int i;
+
+ matched_code = leapraid_check_card_port(adapter,
+ &new_table[entry_idx],
+ &matched_card_port,
+ &count);
+
+ if (!matched_card_port)
+ return;
+
+ if (matched_code == SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS ||
+ matched_code == SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS) {
+ leapraid_add_or_del_phys_from_existing_port(adapter,
+ matched_card_port,
+ new_table,
+ entry_idx,
+ port_entry_num);
+ } else if (matched_code == SAME_ADDR_ONLY) {
+ sas_addr = new_table[entry_idx].sas_address;
+ for (i = 0; i < port_entry_num; i++) {
+ if (new_table[i].sas_address == sas_addr)
+ lcount++;
+ }
+ if (count > 1 || lcount > 1)
+ return;
+
+ leapraid_add_or_del_phys_from_existing_port(adapter,
+ matched_card_port,
+ new_table,
+ entry_idx,
+ port_entry_num);
+ }
+
+ if (matched_card_port->port_id != new_table[entry_idx].port_id)
+ matched_card_port->port_id = new_table[entry_idx].port_id;
+
+ matched_card_port->flg &= ~LEAPRAID_CARD_PORT_FLG_DIRTY;
+ matched_card_port->phy_mask = new_table[entry_idx].phy_mask;
+}
+
+static void leapraid_update_card_port_after_reset(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_card_port *new_card_port_table;
+ struct leapraid_card_port *matched_card_port = NULL;
+ u8 port_entry_num = 0;
+ u8 nr_phys;
+ int i;
+
+ if (leapraid_get_adapter_phys(adapter, &nr_phys) || !nr_phys)
+ return;
+
+ adapter->dev_topo.card.phys_num = nr_phys;
+ new_card_port_table = kcalloc(adapter->dev_topo.card.phys_num,
+ sizeof(struct leapraid_card_port),
+ GFP_KERNEL);
+ if (!new_card_port_table)
+ return;
+
+ port_entry_num =
+ leapraid_set_new_card_port_table_after_reset(adapter,
+ new_card_port_table);
+ if (!port_entry_num)
+ return;
+
+ list_for_each_entry(matched_card_port,
+ &adapter->dev_topo.card_port_list, list) {
+ matched_card_port->flg |= LEAPRAID_CARD_PORT_FLG_DIRTY;
+ }
+
+ matched_card_port = NULL;
+ for (i = 0; i < port_entry_num; i++)
+ leapraid_update_existing_port(adapter,
+ new_card_port_table,
+ i, port_entry_num);
+}
+
+static bool leapraid_is_valid_vphy(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_io_unit_p0 *sas_io_unit_p0,
+ int phy_index)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_phy_p0 phy_p0;
+
+ if ((sas_io_unit_p0->phy_info[phy_index].neg_link_rate >> 4) <
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ return false;
+
+ if (!(le32_to_cpu(sas_io_unit_p0->phy_info[phy_index].controller_phy_dev_info) &
+ LEAPRAID_DEVTYP_SEP))
+ return false;
+
+ cfgp1.phy_number = phy_index;
+ if (leapraid_op_config_page(adapter, &phy_p0, cfgp1, cfgp2,
+ GET_PHY_PG0))
+ return false;
+
+ if (!(le32_to_cpu(phy_p0.phy_info) & LEAPRAID_SAS_PHYINFO_VPHY))
+ return false;
+
+ return true;
+}
+
+static void leapraid_update_vphy_binding(struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_vphy *vphy,
+ int phy_index, u8 may_new_port_id,
+ u64 attached_sas_addr)
+{
+ struct leapraid_card_port *may_new_card_port;
+ struct leapraid_sas_dev *sas_dev;
+
+ may_new_card_port = leapraid_get_port_by_id(adapter,
+ may_new_port_id,
+ true);
+ if (!may_new_card_port) {
+ may_new_card_port = kzalloc(sizeof(*may_new_card_port),
+ GFP_KERNEL);
+ if (!may_new_card_port)
+ return;
+ may_new_card_port->port_id = may_new_port_id;
+ dev_err(&adapter->pdev->dev,
+ "%s: new card port %p added, port=%d\n",
+ __func__, may_new_card_port, may_new_port_id);
+ list_add_tail(&may_new_card_port->list,
+ &adapter->dev_topo.card_port_list);
+ }
+
+ if (card_port != may_new_card_port) {
+ if (!may_new_card_port->vphys_mask)
+ INIT_LIST_HEAD(&may_new_card_port->vphys_list);
+ may_new_card_port->vphys_mask |= BIT(phy_index);
+ card_port->vphys_mask &= ~BIT(phy_index);
+ list_move(&vphy->list, &may_new_card_port->vphys_list);
+
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter,
+ attached_sas_addr,
+ card_port);
+ if (sas_dev)
+ sas_dev->card_port = may_new_card_port;
+ }
+
+ if (may_new_card_port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY) {
+ may_new_card_port->sas_address = 0;
+ may_new_card_port->phy_mask = 0;
+ may_new_card_port->flg &= ~LEAPRAID_CARD_PORT_FLG_DIRTY;
+ }
+ vphy->flg &= ~LEAPRAID_VPHY_FLG_DIRTY;
+}
+
+static void leapraid_update_vphys_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_io_unit_p0 *sas_iounit_p0 = NULL;
+ struct leapraid_card_port *card_port, *card_port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+ u64 attached_sas_addr;
+ u16 sz;
+ u16 attached_hdl;
+ bool found = false;
+ u8 port_id;
+ int i;
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (!card_port->vphys_mask)
+ continue;
+
+ list_for_each_entry_safe(vphy, vphy_next,
+ &card_port->vphys_list, list) {
+ vphy->flg |= LEAPRAID_VPHY_FLG_DIRTY;
+ }
+ }
+
+ sz = offsetof(struct leapraid_sas_io_unit_p0, phy_info) +
+ (adapter->dev_topo.card.phys_num *
+ sizeof(struct leapraid_sas_io_unit0_phy_info));
+ sas_iounit_p0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_p0)
+ return;
+
+ cfgp1.size = sz;
+ if ((leapraid_op_config_page(adapter, sas_iounit_p0, cfgp1, cfgp2,
+ GET_SAS_IOUNIT_PG0)) != 0)
+ goto out;
+
+ for (i = 0; i < adapter->dev_topo.card.phys_num; i++) {
+ if (!leapraid_is_valid_vphy(adapter, sas_iounit_p0, i))
+ continue;
+
+ attached_hdl =
+ le16_to_cpu(sas_iounit_p0->phy_info[i].attached_dev_hdl);
+ if (leapraid_get_sas_address(adapter, attached_hdl,
+ &attached_sas_addr) != 0)
+ continue;
+
+ found = false;
+ card_port = NULL;
+ card_port_next = NULL;
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list,
+ list) {
+ if (!card_port->vphys_mask)
+ continue;
+
+ list_for_each_entry_safe(vphy, vphy_next,
+ &card_port->vphys_list,
+ list) {
+ if (!(vphy->flg & LEAPRAID_VPHY_FLG_DIRTY))
+ continue;
+
+ if (vphy->sas_address != attached_sas_addr)
+ continue;
+
+ if (!(vphy->phy_mask & BIT(i)))
+ vphy->phy_mask = BIT(i);
+
+ port_id = sas_iounit_p0->phy_info[i].port;
+
+ leapraid_update_vphy_binding(adapter,
+ card_port,
+ vphy,
+ i,
+ port_id,
+ attached_sas_addr);
+
+ found = true;
+ break;
+ }
+ if (found)
+ break;
+ }
+ }
+out:
+ kfree(sas_iounit_p0);
+}
+
+static void leapraid_mark_all_dev_deleted(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (sdev_priv && sdev_priv->starget_priv)
+ sdev_priv->starget_priv->deleted = true;
+ }
+}
+
+static void leapraid_free_enc_list(struct leapraid_adapter *adapter)
+{
+ struct leapraid_enc_node *enc_dev, *enc_dev_next;
+
+ list_for_each_entry_safe(enc_dev, enc_dev_next,
+ &adapter->dev_topo.enc_list,
+ list) {
+ list_del(&enc_dev->list);
+ kfree(enc_dev);
+ }
+}
+
+static void leapraid_rebuild_enc_list_after_reset(
+ struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_enc_node *enc_node;
+ u16 enc_hdl;
+ int rc;
+
+ leapraid_free_enc_list(adapter);
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (enc_hdl = 0xFFFF; ; enc_hdl = le16_to_cpu(enc_node->pg0.enc_hdl)) {
+ enc_node = kzalloc(sizeof(*enc_node),
+ GFP_KERNEL);
+ if (!enc_node)
+ return;
+
+ cfgp2.handle = enc_hdl;
+ rc = leapraid_op_config_page(adapter, &enc_node->pg0, cfgp1,
+ cfgp2, GET_SAS_ENCLOSURE_PG0);
+ if (rc) {
+ kfree(enc_node);
+ return;
+ }
+
+ list_add_tail(&enc_node->list, &adapter->dev_topo.enc_list);
+ }
+}
+
+static void leapraid_mark_resp_sas_dev(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev_p0 *sas_dev_p0)
+{
+ struct leapraid_starget_priv *starget_priv = NULL;
+ struct leapraid_enc_node *enc_node = NULL;
+ struct leapraid_card_port *card_port;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_target *starget;
+ unsigned long flags;
+
+ card_port = leapraid_get_port_by_id(adapter, sas_dev_p0->physical_port,
+ false);
+ if (sas_dev_p0->enc_hdl) {
+ enc_node = leapraid_enc_find_by_hdl(adapter,
+ le16_to_cpu(
+ sas_dev_p0->enc_hdl));
+ if (!enc_node)
+ dev_info(&adapter->pdev->dev,
+ "enc hdl 0x%04x has no matched enc dev\n",
+ le16_to_cpu(sas_dev_p0->enc_hdl));
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list) {
+ if (sas_dev->sas_addr == le64_to_cpu(sas_dev_p0->sas_address) &&
+ sas_dev->slot == le16_to_cpu(sas_dev_p0->slot) &&
+ sas_dev->card_port == card_port) {
+ sas_dev->resp = true;
+ starget = sas_dev->starget;
+ if (starget && starget->hostdata) {
+ starget_priv = starget->hostdata;
+ starget_priv->tm_busy = false;
+ starget_priv->deleted = false;
+ } else {
+ starget_priv = NULL;
+ }
+
+ if (starget) {
+ starget_printk(KERN_INFO, starget,
+ "dev: hdl=0x%04x, sas addr=0x%016llx, port_id=%d\n",
+ sas_dev->hdl,
+ (unsigned long long)sas_dev->sas_addr,
+ sas_dev->card_port->port_id);
+ if (sas_dev->enc_hdl != 0)
+ starget_printk(KERN_INFO, starget,
+ "enc info: enc_lid=0x%016llx, slot=%d\n",
+ (unsigned long long)sas_dev->enc_lid,
+ sas_dev->slot);
+ }
+
+ if (le16_to_cpu(sas_dev_p0->flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID) {
+ sas_dev->enc_level = sas_dev_p0->enc_level;
+ memcpy(sas_dev->connector_name,
+ sas_dev_p0->connector_name, 4);
+ sas_dev->connector_name[4] = '\0';
+ } else {
+ sas_dev->enc_level = 0;
+ sas_dev->connector_name[0] = '\0';
+ }
+
+ sas_dev->enc_hdl =
+ le16_to_cpu(sas_dev_p0->enc_hdl);
+ if (enc_node) {
+ sas_dev->enc_lid =
+ le64_to_cpu(enc_node->pg0.enc_lid);
+ }
+ if (sas_dev->hdl == le16_to_cpu(sas_dev_p0->dev_hdl))
+ goto out;
+
+ dev_info(&adapter->pdev->dev,
+ "hdl changed: 0x%04x -> 0x%04x\n",
+ sas_dev->hdl, sas_dev_p0->dev_hdl);
+ sas_dev->hdl = le16_to_cpu(sas_dev_p0->dev_hdl);
+ if (starget_priv)
+ starget_priv->hdl =
+ le16_to_cpu(sas_dev_p0->dev_hdl);
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_search_resp_sas_dev(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ u32 device_info;
+
+ dev_info(&adapter->pdev->dev,
+ "begin searching for sas end devices\n");
+
+ if (list_empty(&adapter->dev_topo.sas_dev_list))
+ goto out;
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (cfgp2.handle = 0xFFFF;
+ !leapraid_op_config_page(adapter, &sas_dev_p0,
+ cfgp1, cfgp2, GET_SAS_DEVICE_PG0);
+ cfgp2.handle = le16_to_cpu(sas_dev_p0.dev_hdl)) {
+ device_info = le32_to_cpu(sas_dev_p0.dev_info);
+ if (!(leapraid_is_end_dev(device_info)))
+ continue;
+
+ leapraid_mark_resp_sas_dev(adapter, &sas_dev_p0);
+ }
+out:
+ dev_info(&adapter->pdev->dev,
+ "sas end devices searching complete\n");
+}
+
+static void leapraid_mark_resp_raid_volume(struct leapraid_adapter *adapter,
+ u64 wwid, u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_raid_volume *raid_volume;
+ struct scsi_target *starget;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ list_for_each_entry(raid_volume,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (raid_volume->wwid == wwid && raid_volume->starget) {
+ starget = raid_volume->starget;
+ if (starget && starget->hostdata) {
+ starget_priv = starget->hostdata;
+ starget_priv->deleted = false;
+ } else {
+ starget_priv = NULL;
+ }
+
+ raid_volume->resp = true;
+ spin_unlock_irqrestore(
+ &adapter->dev_topo.raid_volume_lock,
+ flags);
+
+ starget_printk(
+ KERN_INFO, raid_volume->starget,
+ "raid volume: hdl=0x%04x, wwid=0x%016llx\n",
+ hdl, (unsigned long long)raid_volume->wwid);
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ if (raid_volume->hdl == hdl) {
+ spin_unlock_irqrestore(
+ &adapter->dev_topo.raid_volume_lock,
+ flags);
+ return;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "hdl changed: 0x%04x -> 0x%04x\n",
+ raid_volume->hdl, hdl);
+
+ raid_volume->hdl = hdl;
+ if (starget_priv)
+ starget_priv->hdl = hdl;
+ spin_unlock_irqrestore(
+ &adapter->dev_topo.raid_volume_lock,
+ flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static void leapraid_search_resp_raid_volume(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_1 cfgp1_extra = {0};
+ union cfg_param_2 cfgp2 = {0};
+ union cfg_param_2 cfgp2_extra = {0};
+ struct leapraid_raidvol_p1 raidvol_p1;
+ struct leapraid_raidvol_p0 raidvol_p0;
+ struct leapraid_raidpd_p0 raidpd_p0;
+ u16 hdl;
+ u8 phys_disk_num;
+
+ if (!adapter->adapter_attr.raid_support)
+ return;
+
+ dev_info(&adapter->pdev->dev,
+ "begin searching for raid volumes\n");
+
+ if (list_empty(&adapter->dev_topo.raid_volume_list))
+ goto out;
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &raidvol_p1, cfgp1, cfgp2,
+ GET_RAID_VOLUME_PG1);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(raidvol_p1.dev_hdl);
+ cfgp1_extra.size = sizeof(struct leapraid_raidvol_p0);
+ cfgp2_extra.handle = hdl;
+ if (leapraid_op_config_page(adapter, &raidvol_p0, cfgp1_extra,
+ cfgp2_extra, GET_RAID_VOLUME_PG0))
+ continue;
+
+ if (raidvol_p0.volume_state == LEAPRAID_VOL_STATE_OPTIMAL ||
+ raidvol_p0.volume_state == LEAPRAID_VOL_STATE_ONLINE ||
+ raidvol_p0.volume_state == LEAPRAID_VOL_STATE_DEGRADED)
+ leapraid_mark_resp_raid_volume(
+ adapter,
+ le64_to_cpu(raidvol_p1.wwid),
+ hdl);
+ }
+
+ memset(adapter->dev_topo.pd_hdls, 0, adapter->dev_topo.pd_hdls_sz);
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (phys_disk_num = 0xFF, cfgp2.form_specific = phys_disk_num;
+ !leapraid_op_config_page(adapter, &raidpd_p0, cfgp1, cfgp2,
+ GET_PHY_DISK_PG0);
+ cfgp2.form_specific = phys_disk_num) {
+ phys_disk_num = raidpd_p0.phys_disk_num;
+ hdl = le16_to_cpu(raidpd_p0.dev_hdl);
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ }
+out:
+ dev_info(&adapter->pdev->dev,
+ "raid volumes searching complete\n");
+}
+
+static void leapraid_mark_resp_exp(struct leapraid_adapter *adapter,
+ struct leapraid_exp_p0 *exp_pg0)
+{
+ struct leapraid_enc_node *enc_node = NULL;
+ struct leapraid_topo_node *topo_node_exp;
+ u16 enc_hdl = le16_to_cpu(exp_pg0->enc_hdl);
+ u64 sas_address = le64_to_cpu(exp_pg0->sas_address);
+ u16 hdl = le16_to_cpu(exp_pg0->dev_hdl);
+ u8 port_id = exp_pg0->physical_port;
+ struct leapraid_card_port *card_port = leapraid_get_port_by_id(adapter,
+ port_id,
+ false);
+ unsigned long flags;
+ int i;
+
+ if (enc_hdl)
+ enc_node = leapraid_enc_find_by_hdl(adapter, enc_hdl);
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_for_each_entry(topo_node_exp, &adapter->dev_topo.exp_list, list) {
+ if (topo_node_exp->sas_address != sas_address ||
+ topo_node_exp->card_port != card_port)
+ continue;
+
+ topo_node_exp->resp = true;
+ if (enc_node) {
+ topo_node_exp->enc_lid =
+ le64_to_cpu(enc_node->pg0.enc_lid);
+ topo_node_exp->enc_hdl = le16_to_cpu(exp_pg0->enc_hdl);
+ }
+ if (topo_node_exp->hdl == hdl)
+ goto out;
+
+ dev_info(&adapter->pdev->dev,
+ "hdl changed: 0x%04x -> 0x%04x\n",
+ topo_node_exp->hdl, hdl);
+ topo_node_exp->hdl = hdl;
+ for (i = 0; i < topo_node_exp->phys_num; i++)
+ topo_node_exp->card_phy[i].hdl = hdl;
+ goto out;
+ }
+out:
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+}
+
+static void leapraid_search_resp_exp(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_exp_p0 exp_p0;
+ u64 sas_address;
+ u16 hdl;
+ u8 port;
+
+ dev_info(&adapter->pdev->dev,
+ "begin searching for expanders\n");
+ if (list_empty(&adapter->dev_topo.exp_list))
+ goto out;
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &exp_p0, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG0);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(exp_p0.dev_hdl);
+ sas_address = le64_to_cpu(exp_p0.sas_address);
+ port = exp_p0.physical_port;
+
+ dev_info(&adapter->pdev->dev,
+ "exp detected: hdl=0x%04x, sas=0x%016llx, port=%u",
+ hdl, (unsigned long long)sas_address,
+ ((adapter->adapter_attr.enable_mp) ? (port) :
+ (LEAPRAID_DISABLE_MP_PORT_ID)));
+ leapraid_mark_resp_exp(adapter, &exp_p0);
+ }
+out:
+ dev_info(&adapter->pdev->dev,
+ "expander searching complete\n");
+}
+
+void leapraid_wait_cmds_done(struct leapraid_adapter *adapter)
+{
+ struct leapraid_io_req_tracker *io_req_tracker;
+ unsigned long flags;
+ u16 i;
+
+ adapter->reset_desc.pending_io_cnt = 0;
+ if (!leapraid_pci_active(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s %s: pci error, device reset or unplugged!\n",
+ adapter->adapter_attr.name, __func__);
+ return;
+ }
+
+ if (leapraid_get_adapter_state(adapter) != LEAPRAID_DB_OPERATIONAL)
+ return;
+
+ spin_lock_irqsave(&adapter->dynamic_task_desc.task_lock, flags);
+ for (i = 1; i <= adapter->shost->can_queue; i++) {
+ io_req_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ i);
+ if (io_req_tracker && io_req_tracker->taskid != 0)
+ if (io_req_tracker->scmd)
+ adapter->reset_desc.pending_io_cnt++;
+ }
+ spin_unlock_irqrestore(&adapter->dynamic_task_desc.task_lock, flags);
+
+ if (!adapter->reset_desc.pending_io_cnt)
+ return;
+
+ wait_event_timeout(adapter->reset_desc.reset_wait_queue,
+ adapter->reset_desc.pending_io_cnt == 0, 10 * HZ);
+}
+
+int leapraid_hard_reset_handler(struct leapraid_adapter *adapter,
+ enum reset_type type)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!mutex_trylock(&adapter->reset_desc.adapter_reset_mutex)) {
+ do {
+ ssleep(1);
+ } while (adapter->access_ctrl.shost_recovering);
+ return adapter->reset_desc.adapter_reset_results;
+ }
+
+ if (!leapraid_pci_active(adapter)) {
+ if (leapraid_pci_removed(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "pci_dev removed, pausing polling and cleaning cmds\n");
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+ leapraid_mq_polling_resume(adapter);
+ }
+ rc = 0;
+ goto exit_pci_unavailable;
+ }
+
+ dev_info(&adapter->pdev->dev, "starting hard reset\n");
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ adapter->access_ctrl.shost_recovering = true;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+
+ leapraid_wait_cmds_done(adapter);
+ leapraid_mask_int(adapter);
+ leapraid_mq_polling_pause(adapter);
+ rc = leapraid_make_adapter_ready(adapter, type);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to make adapter ready, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = leapraid_fw_log_init(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "firmware log init failed\n");
+ goto out;
+ }
+
+ leapraid_clean_active_cmds(adapter);
+ if (adapter->scan_dev_desc.driver_loading &&
+ adapter->scan_dev_desc.scan_dev_failed) {
+ dev_err(&adapter->pdev->dev,
+ "Previous device scan failed or driver loading\n");
+ adapter->access_ctrl.host_removing = true;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = leapraid_make_adapter_available(adapter);
+ if (!rc) {
+ dev_info(&adapter->pdev->dev,
+ "adapter is now available, rebuilding topology\n");
+ if (adapter->adapter_attr.enable_mp) {
+ leapraid_update_card_port_after_reset(adapter);
+ leapraid_update_vphys_after_reset(adapter);
+ }
+ leapraid_mark_all_dev_deleted(adapter);
+ leapraid_rebuild_enc_list_after_reset(adapter);
+ leapraid_search_resp_sas_dev(adapter);
+ leapraid_search_resp_raid_volume(adapter);
+ leapraid_search_resp_exp(adapter);
+ leapraid_hardreset_barrier(adapter);
+ }
+out:
+ dev_info(&adapter->pdev->dev, "hard reset %s\n",
+ ((rc == 0) ? "SUCCESS" : "FAILED"));
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ adapter->reset_desc.adapter_reset_results = rc;
+ adapter->access_ctrl.shost_recovering = false;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+ adapter->reset_desc.reset_cnt++;
+ mutex_unlock(&adapter->reset_desc.adapter_reset_mutex);
+
+ if (rc)
+ leapraid_clean_active_scsi_cmds(adapter);
+ leapraid_mq_polling_resume(adapter);
+
+exit_pci_unavailable:
+ dev_info(&adapter->pdev->dev, "pcie unavailable!\n");
+ return rc;
+}
+
+static int leapraid_get_adapter_features(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_features_req leap_mpi_req;
+ struct leapraid_adapter_features_rep leap_mpi_rep;
+ u8 fw_major, fw_minor, fw_build, fw_release;
+ u32 db;
+ int r;
+
+ db = leapraid_readl(&adapter->iomem_base->db);
+ if (db & LEAPRAID_DB_USED ||
+ (db & LEAPRAID_DB_MASK) == LEAPRAID_DB_FAULT)
+ return -EFAULT;
+
+ if (((db & LEAPRAID_DB_MASK) != LEAPRAID_DB_READY) &&
+ ((db & LEAPRAID_DB_MASK) != LEAPRAID_DB_OPERATIONAL)) {
+ if (!leapraid_wait_adapter_ready(adapter))
+ return -EFAULT;
+ }
+
+ memset(&leap_mpi_req, 0, sizeof(struct leapraid_adapter_features_req));
+ memset(&leap_mpi_rep, 0, sizeof(struct leapraid_adapter_features_rep));
+ leap_mpi_req.func = LEAPRAID_FUNC_GET_ADAPTER_FEATURES;
+ r = leapraid_handshake_func(adapter,
+ sizeof(struct leapraid_adapter_features_req),
+ (u32 *)&leap_mpi_req,
+ sizeof(struct leapraid_adapter_features_rep),
+ (u16 *)&leap_mpi_rep);
+ if (r) {
+ dev_err(&adapter->pdev->dev,
+ "%s %s: handshake failed, r=%d\n",
+ adapter->adapter_attr.name, __func__, r);
+ return r;
+ }
+
+ memset(&adapter->adapter_attr.features, 0,
+ sizeof(struct leapraid_adapter_features));
+ adapter->adapter_attr.features.req_slot =
+ le16_to_cpu(leap_mpi_rep.req_slot);
+ adapter->adapter_attr.features.hp_slot =
+ le16_to_cpu(leap_mpi_rep.hp_slot);
+ adapter->adapter_attr.features.adapter_caps =
+ le32_to_cpu(leap_mpi_rep.adapter_caps);
+ adapter->adapter_attr.features.max_volumes =
+ leap_mpi_rep.max_volumes;
+ if (!adapter->adapter_attr.features.max_volumes)
+ adapter->adapter_attr.features.max_volumes =
+ LEAPRAID_MAX_VOLUMES_DEFAULT;
+ adapter->adapter_attr.features.max_dev_handle =
+ le16_to_cpu(leap_mpi_rep.max_dev_hdl);
+ if (!adapter->adapter_attr.features.max_dev_handle)
+ adapter->adapter_attr.features.max_dev_handle =
+ LEAPRAID_MAX_DEV_HANDLE_DEFAULT;
+ adapter->adapter_attr.features.min_dev_handle =
+ le16_to_cpu(leap_mpi_rep.min_dev_hdl);
+ if ((adapter->adapter_attr.features.adapter_caps &
+ LEAPRAID_ADAPTER_FEATURES_CAP_INTEGRATED_RAID))
+ adapter->adapter_attr.raid_support = true;
+ if (WARN_ON(!(adapter->adapter_attr.features.adapter_caps &
+ LEAPRAID_ADAPTER_FEATURES_CAP_ATOMIC_REQ)))
+ return -EFAULT;
+ adapter->adapter_attr.features.fw_version =
+ le32_to_cpu(leap_mpi_rep.fw_version);
+
+ fw_major = (adapter->adapter_attr.features.fw_version >> 24) & 0xFF;
+ fw_minor = (adapter->adapter_attr.features.fw_version >> 16) & 0xFF;
+ fw_build = (adapter->adapter_attr.features.fw_version >> 8) & 0xFF;
+ fw_release = adapter->adapter_attr.features.fw_version & 0xFF;
+
+ dev_info(&adapter->pdev->dev,
+ "Firmware version: %u.%u.%u.%u (0x%08x)\n",
+ fw_major, fw_minor, fw_build, fw_release,
+ adapter->adapter_attr.features.fw_version);
+
+ if (fw_major < 2) {
+ dev_err(&adapter->pdev->dev,
+ "Unsupported firmware major version, requires >= 2\n");
+ return -EFAULT;
+ }
+ adapter->shost->max_id = -1;
+
+ return 0;
+}
+
+static inline void leapraid_disable_pcie(struct leapraid_adapter *adapter)
+{
+ mutex_lock(&adapter->access_ctrl.pci_access_lock);
+ if (adapter->iomem_base) {
+ iounmap(adapter->iomem_base);
+ adapter->iomem_base = NULL;
+ }
+ if (pci_is_enabled(adapter->pdev)) {
+ pci_release_regions(adapter->pdev);
+ pci_disable_device(adapter->pdev);
+ }
+ mutex_unlock(&adapter->access_ctrl.pci_access_lock);
+}
+
+static int leapraid_enable_pcie(struct leapraid_adapter *adapter)
+{
+ u64 dma_mask;
+ int rc;
+
+ rc = pci_enable_device(adapter->pdev);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "failed to enable PCI device\n");
+ return rc;
+ }
+
+ rc = pci_request_regions(adapter->pdev, LEAPRAID_DRIVER_NAME);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to obtain PCI resources\n");
+ goto disable_pcie;
+ }
+
+ if (sizeof(dma_addr_t) > 4) {
+ dma_mask = DMA_BIT_MASK(64);
+ adapter->adapter_attr.use_32_dma_mask = false;
+ } else {
+ dma_mask = DMA_BIT_MASK(32);
+ adapter->adapter_attr.use_32_dma_mask = true;
+ }
+
+ rc = dma_set_mask_and_coherent(&adapter->pdev->dev, dma_mask);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to set %lld DMA mask\n", dma_mask);
+ goto disable_pcie;
+ }
+ adapter->iomem_base = ioremap(pci_resource_start(adapter->pdev, 0),
+ sizeof(struct leapraid_reg_base));
+ if (!adapter->iomem_base) {
+ dev_err(&adapter->pdev->dev,
+ "failed to map memory for controller registers\n");
+ rc = -ENOMEM;
+ goto disable_pcie;
+ }
+
+ pci_set_master(adapter->pdev);
+
+ return 0;
+
+disable_pcie:
+ return rc;
+}
+
+static void leapraid_cpus_on_irq(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ unsigned int i, base_group, this_group;
+ unsigned int cpu, nr_cpus, total_msix, index = 0;
+
+ total_msix = adapter->notification_desc.iopoll_qdex;
+ nr_cpus = num_online_cpus();
+
+ if (!nr_cpus || !total_msix)
+ return;
+ base_group = nr_cpus / total_msix;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (index = 0; index < adapter->notification_desc.iopoll_qdex;
+ index++) {
+ int_rq = &adapter->notification_desc.int_rqs[index];
+
+ if (cpu >= nr_cpus)
+ break;
+
+ this_group = base_group +
+ (index < (nr_cpus % total_msix) ? 1 : 0);
+
+ for (i = 0 ; i < this_group ; i++) {
+ adapter->notification_desc.msix_cpu_map[cpu] =
+ int_rq->rq.msix_idx;
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ }
+}
+
+static void leapraid_map_msix_to_cpu(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ const cpumask_t *affinity_mask;
+ u32 i;
+ u16 cpu;
+
+ if (!adapter->adapter_attr.rq_cnt)
+ return;
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ affinity_mask = pci_irq_get_affinity(adapter->pdev,
+ int_rq->rq.msix_idx);
+ if (!affinity_mask)
+ goto out;
+
+ for_each_cpu_and(cpu, affinity_mask, cpu_online_mask) {
+ if (cpu >= adapter->notification_desc.msix_cpu_map_sz)
+ break;
+
+ adapter->notification_desc.msix_cpu_map[cpu] =
+ int_rq->rq.msix_idx;
+ }
+ }
+out:
+ leapraid_cpus_on_irq(adapter);
+}
+
+static void leapraid_configure_reply_queue_affinity(
+ struct leapraid_adapter *adapter)
+{
+ if (!adapter || !adapter->notification_desc.msix_enable)
+ return;
+
+ leapraid_map_msix_to_cpu(adapter);
+}
+
+static void leapraid_free_irq(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ unsigned int i;
+
+ if (!adapter->notification_desc.int_rqs)
+ return;
+
+ for (i = 0; i < adapter->notification_desc.int_rqs_allocated; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ if (!int_rq)
+ continue;
+
+ irq_set_affinity_hint(pci_irq_vector(adapter->pdev,
+ int_rq->rq.msix_idx), NULL);
+ free_irq(pci_irq_vector(adapter->pdev, int_rq->rq.msix_idx),
+ &int_rq->rq);
+ }
+ adapter->notification_desc.int_rqs_allocated = 0;
+
+ if (!adapter->notification_desc.msix_enable)
+ return;
+
+ pci_free_irq_vectors(adapter->pdev);
+ adapter->notification_desc.msix_enable = false;
+
+ kfree(adapter->notification_desc.blk_mq_poll_rqs);
+ adapter->notification_desc.blk_mq_poll_rqs = NULL;
+
+ kfree(adapter->notification_desc.int_rqs);
+ adapter->notification_desc.int_rqs = NULL;
+
+ kfree(adapter->notification_desc.msix_cpu_map);
+ adapter->notification_desc.msix_cpu_map = NULL;
+}
+
+static inline int leapraid_msix_cnt(struct pci_dev *pdev)
+{
+ return pci_msix_vec_count(pdev);
+}
+
+static inline int leapraid_msi_cnt(struct pci_dev *pdev)
+{
+ return pci_msi_vec_count(pdev);
+}
+
+static int leapraid_setup_irqs(struct leapraid_adapter *adapter)
+{
+ unsigned int i;
+ int rc = 0;
+
+ if (interrupt_mode == 0) {
+ rc = pci_alloc_irq_vectors_affinity(
+ adapter->pdev,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qdex,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, NULL);
+
+ if (rc < 0) {
+ dev_err(&adapter->pdev->dev,
+ "%d msi/msix vectors alloacted failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ adapter->notification_desc.int_rqs[i].rq.adapter = adapter;
+ adapter->notification_desc.int_rqs[i].rq.msix_idx = i;
+ atomic_set(&adapter->notification_desc.int_rqs[i].rq.busy, 0);
+ if (interrupt_mode == 0)
+ snprintf(adapter->notification_desc.int_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH, "%s%u-MSIx%u",
+ LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+ else if (interrupt_mode == 1)
+ snprintf(adapter->notification_desc.int_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH, "%s%u-MSI%u",
+ LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+
+ rc = request_irq(pci_irq_vector(adapter->pdev, i),
+ leapraid_irq_handler,
+ IRQF_SHARED,
+ adapter->notification_desc.int_rqs[i].rq.name,
+ &adapter->notification_desc.int_rqs[i].rq);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "MSI/MSIx: request_irq %s failed!\n",
+ adapter->notification_desc.int_rqs[i].rq.name);
+ return rc;
+ }
+ adapter->notification_desc.int_rqs_allocated++;
+ }
+
+ return 0;
+}
+
+static int leapraid_setup_legacy_int(struct leapraid_adapter *adapter)
+{
+ int rc;
+
+ adapter->notification_desc.int_rqs[0].rq.adapter = adapter;
+ adapter->notification_desc.int_rqs[0].rq.msix_idx = 0;
+ atomic_set(&adapter->notification_desc.int_rqs[0].rq.busy, 0);
+ snprintf(adapter->notification_desc.int_rqs[0].rq.name,
+ LEAPRAID_NAME_LENGTH, "%s%d-LegacyInt",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+
+ rc = pci_alloc_irq_vectors_affinity(
+ adapter->pdev,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qdex,
+ PCI_IRQ_LEGACY | PCI_IRQ_AFFINITY,
+ NULL);
+ if (rc < 0) {
+ dev_err(&adapter->pdev->dev,
+ "legacy irq alloacted failed!\n");
+ return rc;
+ }
+
+ rc = request_irq(pci_irq_vector(adapter->pdev, 0),
+ leapraid_irq_handler,
+ IRQF_SHARED,
+ adapter->notification_desc.int_rqs[0].rq.name,
+ &adapter->notification_desc.int_rqs[0].rq);
+ if (rc) {
+ irq_set_affinity_hint(pci_irq_vector(adapter->pdev, 0), NULL);
+ pci_free_irq_vectors(adapter->pdev);
+ dev_err(&adapter->pdev->dev,
+ "Legact Int: request_irq %s failed!\n",
+ adapter->notification_desc.int_rqs[0].rq.name);
+ return -EBUSY;
+ }
+ adapter->notification_desc.int_rqs_allocated = 1;
+ return rc;
+}
+
+static int leapraid_set_legacy_int(struct leapraid_adapter *adapter)
+{
+ int rc;
+
+ adapter->notification_desc.msix_cpu_map_sz = num_online_cpus();
+ adapter->notification_desc.msix_cpu_map =
+ kzalloc(adapter->notification_desc.msix_cpu_map_sz,
+ GFP_KERNEL);
+ if (!adapter->notification_desc.msix_cpu_map)
+ return -ENOMEM;
+
+ adapter->adapter_attr.rq_cnt = 1;
+ adapter->notification_desc.iopoll_qdex =
+ adapter->adapter_attr.rq_cnt;
+ adapter->notification_desc.iopoll_qcnt = 0;
+ dev_info(&adapter->pdev->dev,
+ "Legacy Intr: req queue cnt=%d, intr=%d/poll=%d rep queues!\n",
+ adapter->adapter_attr.rq_cnt,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qcnt);
+ adapter->notification_desc.int_rqs =
+ kcalloc(adapter->notification_desc.iopoll_qdex,
+ sizeof(struct leapraid_int_rq), GFP_KERNEL);
+ if (!adapter->notification_desc.int_rqs) {
+ dev_err(&adapter->pdev->dev,
+ "Legacy Intr: allocate %d intr rep queues failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return -ENOMEM;
+ }
+
+ rc = leapraid_setup_legacy_int(adapter);
+
+ return rc;
+}
+
+static int leapraid_set_msix(struct leapraid_adapter *adapter)
+{
+ int iopoll_qcnt = 0;
+ unsigned int i;
+ int rc, msix_cnt;
+
+ if (msix_disable == 1)
+ goto legacy_int;
+
+ msix_cnt = leapraid_msix_cnt(adapter->pdev);
+ if (msix_cnt <= 0) {
+ dev_info(&adapter->pdev->dev, "msix unsupported!\n");
+ goto legacy_int;
+ }
+
+ if (reset_devices)
+ adapter->adapter_attr.rq_cnt = 1;
+ else
+ adapter->adapter_attr.rq_cnt = min_t(int,
+ num_online_cpus(),
+ msix_cnt);
+
+ if (max_msix_vectors > 0)
+ adapter->adapter_attr.rq_cnt = min_t(
+ int, max_msix_vectors, adapter->adapter_attr.rq_cnt);
+
+ if (adapter->adapter_attr.rq_cnt <= 1)
+ adapter->shost->host_tagset = 0;
+ if (adapter->shost->host_tagset) {
+ iopoll_qcnt = poll_queues;
+ if (iopoll_qcnt >= adapter->adapter_attr.rq_cnt)
+ iopoll_qcnt = 0;
+ }
+ if (iopoll_qcnt) {
+ adapter->notification_desc.blk_mq_poll_rqs =
+ kcalloc(iopoll_qcnt,
+ sizeof(struct leapraid_blk_mq_poll_rq),
+ GFP_KERNEL);
+ if (!adapter->notification_desc.blk_mq_poll_rqs)
+ return -ENOMEM;
+ adapter->adapter_attr.rq_cnt =
+ min(adapter->adapter_attr.rq_cnt + iopoll_qcnt,
+ msix_cnt);
+ }
+
+ adapter->notification_desc.iopoll_qdex =
+ adapter->adapter_attr.rq_cnt - iopoll_qcnt;
+
+ adapter->notification_desc.iopoll_qcnt = iopoll_qcnt;
+ dev_info(&adapter->pdev->dev,
+ "MSIx: req queue cnt=%d, intr=%d/poll=%d rep queues!\n",
+ adapter->adapter_attr.rq_cnt,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qcnt);
+
+ adapter->notification_desc.int_rqs =
+ kcalloc(adapter->notification_desc.iopoll_qdex,
+ sizeof(struct leapraid_int_rq), GFP_KERNEL);
+ if (!adapter->notification_desc.int_rqs) {
+ dev_err(&adapter->pdev->dev,
+ "MSIx: allocate %d interrupt reply queues failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.adapter =
+ adapter;
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.msix_idx =
+ i + adapter->notification_desc.iopoll_qdex;
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[i].rq.busy, 0);
+ snprintf(adapter->notification_desc.blk_mq_poll_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH,
+ "%s%u-MQ-Poll%u", LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[i].busy, 0);
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[i].pause, 0);
+ }
+
+ adapter->notification_desc.msix_cpu_map_sz =
+ num_online_cpus();
+ adapter->notification_desc.msix_cpu_map =
+ kzalloc(adapter->notification_desc.msix_cpu_map_sz,
+ GFP_KERNEL);
+ if (!adapter->notification_desc.msix_cpu_map)
+ return -ENOMEM;
+ memset(adapter->notification_desc.msix_cpu_map, 0,
+ adapter->notification_desc.msix_cpu_map_sz);
+
+ adapter->notification_desc.msix_enable = true;
+ rc = leapraid_setup_irqs(adapter);
+ if (rc) {
+ leapraid_free_irq(adapter);
+ adapter->notification_desc.msix_enable = false;
+ goto legacy_int;
+ }
+
+ return 0;
+
+legacy_int:
+ rc = leapraid_set_legacy_int(adapter);
+
+ return rc;
+}
+
+static int leapraid_set_msi(struct leapraid_adapter *adapter)
+{
+ int iopoll_qcnt = 0;
+ unsigned int i;
+ int rc, msi_cnt;
+
+ if (msix_disable == 1)
+ goto legacy_int1;
+
+ msi_cnt = leapraid_msi_cnt(adapter->pdev);
+ if (msi_cnt <= 0) {
+ dev_info(&adapter->pdev->dev, "msix unsupported!\n");
+ goto legacy_int1;
+ }
+
+ if (reset_devices)
+ adapter->adapter_attr.rq_cnt = 1;
+ else
+ adapter->adapter_attr.rq_cnt = min_t(int,
+ num_online_cpus(),
+ msi_cnt);
+
+ if (max_msix_vectors > 0)
+ adapter->adapter_attr.rq_cnt = min_t(
+ int, max_msix_vectors, adapter->adapter_attr.rq_cnt);
+
+ if (adapter->adapter_attr.rq_cnt <= 1)
+ adapter->shost->host_tagset = 0;
+ if (adapter->shost->host_tagset) {
+ iopoll_qcnt = poll_queues;
+ if (iopoll_qcnt >= adapter->adapter_attr.rq_cnt)
+ iopoll_qcnt = 0;
+ }
+
+ if (iopoll_qcnt) {
+ adapter->notification_desc.blk_mq_poll_rqs =
+ kcalloc(iopoll_qcnt,
+ sizeof(struct leapraid_blk_mq_poll_rq),
+ GFP_KERNEL);
+ if (!adapter->notification_desc.blk_mq_poll_rqs)
+ return -ENOMEM;
+
+ adapter->adapter_attr.rq_cnt =
+ min(adapter->adapter_attr.rq_cnt + iopoll_qcnt,
+ msi_cnt);
+ }
+
+ adapter->notification_desc.iopoll_qdex =
+ adapter->adapter_attr.rq_cnt - iopoll_qcnt;
+ rc = pci_alloc_irq_vectors_affinity(
+ adapter->pdev,
+ 1,
+ adapter->notification_desc.iopoll_qdex,
+ PCI_IRQ_MSI | PCI_IRQ_AFFINITY, NULL);
+ if (rc < 0) {
+ dev_err(&adapter->pdev->dev,
+ "%d msi vectors alloacted failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ goto legacy_int1;
+ }
+ if (rc != adapter->notification_desc.iopoll_qdex) {
+ adapter->notification_desc.iopoll_qdex = rc;
+ adapter->adapter_attr.rq_cnt =
+ adapter->notification_desc.iopoll_qdex + iopoll_qcnt;
+ }
+ adapter->notification_desc.iopoll_qcnt = iopoll_qcnt;
+ dev_info(&adapter->pdev->dev,
+ "MSI: req queue cnt=%d, intr=%d/poll=%d rep queues!\n",
+ adapter->adapter_attr.rq_cnt,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qcnt);
+
+ adapter->notification_desc.int_rqs =
+ kcalloc(adapter->notification_desc.iopoll_qdex,
+ sizeof(struct leapraid_int_rq),
+ GFP_KERNEL);
+ if (!adapter->notification_desc.int_rqs) {
+ dev_err(&adapter->pdev->dev,
+ "MSI: allocate %d interrupt reply queues failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.adapter =
+ adapter;
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.msix_idx =
+ i + adapter->notification_desc.iopoll_qdex;
+ atomic_set(
+ &adapter->notification_desc.blk_mq_poll_rqs[i].rq.busy,
+ 0);
+ snprintf(adapter->notification_desc.blk_mq_poll_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH,
+ "%s%u-MQ-Poll%u", LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+ atomic_set(
+ &adapter->notification_desc.blk_mq_poll_rqs[i].busy,
+ 0);
+ atomic_set(
+ &adapter->notification_desc.blk_mq_poll_rqs[i].pause,
+ 0);
+ }
+
+ adapter->notification_desc.msix_cpu_map_sz = num_online_cpus();
+ adapter->notification_desc.msix_cpu_map =
+ kzalloc(adapter->notification_desc.msix_cpu_map_sz,
+ GFP_KERNEL);
+ if (!adapter->notification_desc.msix_cpu_map)
+ return -ENOMEM;
+ memset(adapter->notification_desc.msix_cpu_map, 0,
+ adapter->notification_desc.msix_cpu_map_sz);
+
+ adapter->notification_desc.msix_enable = true;
+ rc = leapraid_setup_irqs(adapter);
+ if (rc) {
+ leapraid_free_irq(adapter);
+ adapter->notification_desc.msix_enable = false;
+ goto legacy_int1;
+ }
+
+ return 0;
+
+legacy_int1:
+ rc = leapraid_set_legacy_int(adapter);
+
+ return rc;
+}
+
+static int leapraid_set_notification(struct leapraid_adapter *adapter)
+{
+ int rc = 0;
+
+ if (interrupt_mode == 0) {
+ rc = leapraid_set_msix(adapter);
+ if (rc)
+ pr_err("%s enable MSI-X irq failed!\n", __func__);
+ } else if (interrupt_mode == 1) {
+ rc = leapraid_set_msi(adapter);
+ if (rc)
+ pr_err("%s enable MSI irq failed!\n", __func__);
+ } else if (interrupt_mode == 2) {
+ rc = leapraid_set_legacy_int(adapter);
+ if (rc)
+ pr_err("%s enable legacy irq failed!\n", __func__);
+ }
+
+ return rc;
+}
+
+static void leapraid_disable_pcie_and_notification(
+ struct leapraid_adapter *adapter)
+{
+ leapraid_free_irq(adapter);
+ leapraid_disable_pcie(adapter);
+}
+
+int leapraid_set_pcie_and_notification(struct leapraid_adapter *adapter)
+{
+ int rc;
+
+ rc = leapraid_enable_pcie(adapter);
+ if (rc)
+ goto out_fail;
+
+ leapraid_mask_int(adapter);
+
+ rc = leapraid_set_notification(adapter);
+ if (rc)
+ goto out_fail;
+
+ pci_save_state(adapter->pdev);
+
+ return 0;
+
+out_fail:
+ leapraid_disable_pcie_and_notification(adapter);
+ return rc;
+}
+
+void leapraid_disable_controller(struct leapraid_adapter *adapter)
+{
+ if (!adapter->iomem_base)
+ return;
+
+ leapraid_mask_int(adapter);
+
+ adapter->access_ctrl.shost_recovering = true;
+ leapraid_make_adapter_ready(adapter, PART_RESET);
+ adapter->access_ctrl.shost_recovering = false;
+
+ leapraid_disable_pcie_and_notification(adapter);
+}
+
+static int leapraid_adapter_unit_reset(struct leapraid_adapter *adapter)
+{
+ int rc = 0;
+
+ dev_info(&adapter->pdev->dev, "fire unit reset\n");
+ writel(LEAPRAID_FUNC_ADAPTER_UNIT_RESET << LEAPRAID_DB_FUNC_SHIFT,
+ &adapter->iomem_base->db);
+ if (leapraid_db_wait_ack_and_clear_int(adapter))
+ rc = -EFAULT;
+
+ if (!leapraid_wait_adapter_ready(adapter)) {
+ rc = -EFAULT;
+ goto out;
+ }
+out:
+ dev_info(&adapter->pdev->dev, "unit reset: %s\n",
+ ((rc == 0) ? "SUCCESS" : "FAILED"));
+ return rc;
+}
+
+static int leapraid_make_adapter_ready(struct leapraid_adapter *adapter,
+ enum reset_type type)
+{
+ u32 db;
+ int rc;
+ int count;
+
+ if (!leapraid_pci_active(adapter))
+ return 0;
+
+ count = 0;
+ db = leapraid_readl(&adapter->iomem_base->db);
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_RESET) {
+ while ((db & LEAPRAID_DB_MASK) != LEAPRAID_DB_READY) {
+ if (count++ == LEAPRAID_DB_RETRY_COUNT_MAX) {
+ dev_err(&adapter->pdev->dev,
+ "wait adapter ready timeout\n");
+ return -EFAULT;
+ }
+ ssleep(1);
+ db = leapraid_readl(&adapter->iomem_base->db);
+ dev_info(&adapter->pdev->dev,
+ "wait adapter ready, count=%d, db=0x%x\n",
+ count, db);
+ }
+ }
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_READY)
+ return 0;
+
+ if (db & LEAPRAID_DB_USED)
+ goto full_reset;
+
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_FAULT)
+ goto full_reset;
+
+ if (type == FULL_RESET)
+ goto full_reset;
+
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_OPERATIONAL)
+ if (!(leapraid_adapter_unit_reset(adapter)))
+ return 0;
+
+full_reset:
+ rc = leapraid_host_diag_reset(adapter);
+ return rc;
+}
+
+static void leapraid_fw_log_exit(struct leapraid_adapter *adapter)
+{
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return;
+
+ if (adapter->fw_log_desc.fw_log_buffer) {
+ dma_free_coherent(&adapter->pdev->dev,
+ (LEAPRAID_SYS_LOG_BUF_SIZE +
+ LEAPRAID_SYS_LOG_BUF_RESERVE),
+ adapter->fw_log_desc.fw_log_buffer,
+ adapter->fw_log_desc.fw_log_buffer_dma);
+ adapter->fw_log_desc.fw_log_buffer = NULL;
+ }
+}
+
+static int leapraid_fw_log_init(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_log_req adapter_log_req;
+ struct leapraid_adapter_log_rep adapter_log_rep;
+ u16 adapter_status;
+ u64 buf_addr;
+ u32 rc;
+
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return 0;
+
+ if (!adapter->fw_log_desc.fw_log_buffer) {
+ adapter->fw_log_desc.fw_log_buffer =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ (LEAPRAID_SYS_LOG_BUF_SIZE +
+ LEAPRAID_SYS_LOG_BUF_RESERVE),
+ &adapter->fw_log_desc.fw_log_buffer_dma,
+ GFP_KERNEL);
+ if (!adapter->fw_log_desc.fw_log_buffer) {
+ dev_err(&adapter->pdev->dev,
+ "%s: log buf alloc failed.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ memset(&adapter_log_req, 0, sizeof(struct leapraid_adapter_log_req));
+ adapter_log_req.func = LEAPRAID_FUNC_LOGBUF_INIT;
+ buf_addr = adapter->fw_log_desc.fw_log_buffer_dma;
+
+ adapter_log_req.mbox.w[0] =
+ cpu_to_le32((u32)(buf_addr & 0xFFFFFFFF));
+ adapter_log_req.mbox.w[1] =
+ cpu_to_le32((u32)((buf_addr >> 32) & 0xFFFFFFFF));
+ adapter_log_req.mbox.w[2] =
+ cpu_to_le32(LEAPRAID_SYS_LOG_BUF_SIZE);
+ rc = leapraid_handshake_func(adapter,
+ sizeof(struct leapraid_adapter_log_req),
+ (u32 *)&adapter_log_req,
+ sizeof(struct leapraid_adapter_log_rep),
+ (u16 *)&adapter_log_rep);
+ if (rc != 0) {
+ dev_err(&adapter->pdev->dev, "%s: handshake failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ adapter_status = le16_to_cpu(adapter_log_rep.adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "%s: failed!\n", __func__);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static void leapraid_free_host_memory(struct leapraid_adapter *adapter)
+{
+ unsigned int i;
+
+ if (adapter->mem_desc.task_desc) {
+ dma_free_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.task_desc_dma_size,
+ adapter->mem_desc.task_desc,
+ adapter->mem_desc.task_desc_dma);
+ adapter->mem_desc.task_desc = NULL;
+ }
+
+ if (adapter->mem_desc.sense_data) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.io_qd * SCSI_SENSE_BUFFERSIZE,
+ adapter->mem_desc.sense_data,
+ adapter->mem_desc.sense_data_dma);
+ adapter->mem_desc.sense_data = NULL;
+ }
+
+ if (adapter->mem_desc.rep_msg) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd * LEAPRAID_REPLY_SIEZ,
+ adapter->mem_desc.rep_msg,
+ adapter->mem_desc.rep_msg_dma);
+ adapter->mem_desc.rep_msg = NULL;
+ }
+
+ if (adapter->mem_desc.rep_msg_addr) {
+ dma_free_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REP_MSG_ADDR_SIZE,
+ adapter->mem_desc.rep_msg_addr,
+ adapter->mem_desc.rep_msg_addr_dma);
+ adapter->mem_desc.rep_msg_addr = NULL;
+ }
+
+ if (adapter->mem_desc.rep_desc_seg_maint) {
+ for (i = 0; i < adapter->adapter_attr.rep_desc_q_seg_cnt;
+ i++) {
+ if (adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE) *
+ LEAPRAID_REP_DESC_CHUNK_SIZE,
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg,
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg_dma);
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg = NULL;
+ }
+ }
+
+ if (adapter->mem_desc.rep_desc_q_arr) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.rq_cnt *
+ LEAPRAID_REP_RQ_CNT_SIZE,
+ adapter->mem_desc.rep_desc_q_arr,
+ adapter->mem_desc.rep_desc_q_arr_dma);
+ adapter->mem_desc.rep_desc_q_arr = NULL;
+ }
+
+ for (i = 0; i < adapter->adapter_attr.rep_desc_q_seg_cnt; i++)
+ kfree(adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_maint);
+ kfree(adapter->mem_desc.rep_desc_seg_maint);
+ }
+
+ kfree(adapter->mem_desc.taskid_to_uniq_tag);
+ adapter->mem_desc.taskid_to_uniq_tag = NULL;
+
+ dma_pool_destroy(adapter->mem_desc.sg_chain_pool);
+}
+
+static inline bool leapraid_is_in_same_4g_seg(dma_addr_t start, u32 size)
+{
+ return (upper_32_bits(start) == upper_32_bits(start + size - 1));
+}
+
+int leapraid_internal_init_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker)
+{
+ io_tracker->chain =
+ dma_pool_alloc(adapter->mem_desc.sg_chain_pool,
+ GFP_KERNEL,
+ &io_tracker->chain_dma);
+
+ if (!io_tracker->chain)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int leapraid_internal_exit_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker)
+{
+ if (io_tracker && io_tracker->chain)
+ dma_pool_free(adapter->mem_desc.sg_chain_pool,
+ io_tracker->chain,
+ io_tracker->chain_dma);
+
+ return 0;
+}
+
+static int leapraid_request_host_memory(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_features *facts =
+ &adapter->adapter_attr.features;
+ u16 rep_desc_q_cnt_allocated;
+ unsigned int i, j;
+ int rc;
+
+ /* sg table size */
+ adapter->shost->sg_tablesize = LEAPRAID_SG_DEPTH;
+ if (reset_devices)
+ adapter->shost->sg_tablesize =
+ LEAPRAID_KDUMP_MIN_PHYS_SEGMENTS;
+ /* high priority cmds queue depth */
+ adapter->dynamic_task_desc.hp_cmd_qd = facts->hp_slot;
+ adapter->dynamic_task_desc.hp_cmd_qd = LEAPRAID_FIXED_HP_CMDS;
+ /* internal cmds queue depth */
+ adapter->dynamic_task_desc.inter_cmd_qd = LEAPRAID_FIXED_INTER_CMDS;
+ /* adapter cmds total queue depth */
+ if (reset_devices)
+ adapter->adapter_attr.adapter_total_qd =
+ LEAPRAID_DEFAULT_CMD_QD_OFFSET +
+ adapter->dynamic_task_desc.inter_cmd_qd +
+ adapter->dynamic_task_desc.hp_cmd_qd;
+ else
+ adapter->adapter_attr.adapter_total_qd = facts->req_slot +
+ adapter->dynamic_task_desc.hp_cmd_qd;
+ /* reply message queue depth */
+ adapter->adapter_attr.rep_msg_qd =
+ adapter->adapter_attr.adapter_total_qd +
+ LEAPRAID_DEFAULT_CMD_QD_OFFSET;
+ /* reply descriptor queue depth */
+ adapter->adapter_attr.rep_desc_qd =
+ round_up(adapter->adapter_attr.adapter_total_qd +
+ adapter->adapter_attr.rep_msg_qd +
+ LEAPRAID_TASKID_OFFSET_CTRL_CMD,
+ LEAPRAID_REPLY_QD_ALIGNMENT);
+ /* scsi cmd io depth */
+ adapter->adapter_attr.io_qd =
+ adapter->adapter_attr.adapter_total_qd -
+ adapter->dynamic_task_desc.hp_cmd_qd -
+ adapter->dynamic_task_desc.inter_cmd_qd;
+ /* scsi host can queue */
+ adapter->shost->can_queue = adapter->adapter_attr.io_qd -
+ LEAPRAID_TASKID_OFFSET_SCSIIO_CMD;
+ adapter->driver_cmds.ctl_cmd.taskid = adapter->shost->can_queue +
+ LEAPRAID_TASKID_OFFSET_CTRL_CMD;
+ adapter->driver_cmds.driver_scsiio_cmd.taskid =
+ adapter->shost->can_queue +
+ LEAPRAID_TASKID_OFFSET_SCSIIO_CMD;
+
+ /* allocate task descriptor */
+try_again:
+ adapter->adapter_attr.task_desc_dma_size =
+ (adapter->adapter_attr.adapter_total_qd +
+ LEAPRAID_TASKID_OFFSET_CTRL_CMD) *
+ LEAPRAID_REQUEST_SIZE;
+ adapter->mem_desc.task_desc =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.task_desc_dma_size,
+ &adapter->mem_desc.task_desc_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.task_desc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate task descriptor DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* allocate chain message pool */
+ adapter->mem_desc.sg_chain_pool_size =
+ LEAPRAID_DEFAULT_CHAINS_PER_IO * LEAPRAID_CHAIN_SEG_SIZE;
+ adapter->mem_desc.sg_chain_pool =
+ dma_pool_create("leapraid chain pool",
+ &adapter->pdev->dev,
+ adapter->mem_desc.sg_chain_pool_size, 16, 0);
+ if (!adapter->mem_desc.sg_chain_pool) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate chain message DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* allocate io tracker to ref scsi io */
+
+ adapter->mem_desc.taskid_to_uniq_tag =
+ kcalloc(adapter->shost->can_queue, sizeof(u16), GFP_KERNEL);
+ if (!adapter->mem_desc.taskid_to_uniq_tag) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ adapter->dynamic_task_desc.hp_taskid =
+ adapter->adapter_attr.io_qd +
+ LEAPRAID_HP_TASKID_OFFSET_CTL_CMD;
+ /* allocate static hp taskid */
+ adapter->driver_cmds.ctl_cmd.hp_taskid =
+ adapter->dynamic_task_desc.hp_taskid;
+ adapter->driver_cmds.tm_cmd.hp_taskid =
+ adapter->dynamic_task_desc.hp_taskid +
+ LEAPRAID_HP_TASKID_OFFSET_TM_CMD;
+
+ adapter->dynamic_task_desc.inter_taskid =
+ adapter->dynamic_task_desc.hp_taskid +
+ adapter->dynamic_task_desc.hp_cmd_qd;
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid;
+ adapter->driver_cmds.cfg_op_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_CFG_OP_CMD;
+ adapter->driver_cmds.transport_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_TRANSPORT_CMD;
+ adapter->driver_cmds.timestamp_sync_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_TIMESTAMP_SYNC_CMD;
+ adapter->driver_cmds.raid_action_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_RAID_ACTION_CMD;
+ adapter->driver_cmds.enc_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_ENC_CMD;
+ adapter->driver_cmds.notify_event_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_NOTIFY_EVENT_CMD;
+ dev_info(&adapter->pdev->dev, "queue depth:\n");
+ dev_info(&adapter->pdev->dev, " host->can_queue: %d\n",
+ adapter->shost->can_queue);
+ dev_info(&adapter->pdev->dev, " io_qd: %d\n",
+ adapter->adapter_attr.io_qd);
+ dev_info(&adapter->pdev->dev, " hpr_cmd_qd: %d\n",
+ adapter->dynamic_task_desc.hp_cmd_qd);
+ dev_info(&adapter->pdev->dev, " inter_cmd_qd: %d\n",
+ adapter->dynamic_task_desc.inter_cmd_qd);
+ dev_info(&adapter->pdev->dev, " adapter_total_qd: %d\n",
+ adapter->adapter_attr.adapter_total_qd);
+
+ dev_info(&adapter->pdev->dev, "taskid range:\n");
+ dev_info(&adapter->pdev->dev,
+ " adapter->dynamic_task_desc.hp_taskid: %d\n",
+ adapter->dynamic_task_desc.hp_taskid);
+ dev_info(&adapter->pdev->dev,
+ " adapter->dynamic_task_desc.inter_taskid: %d\n",
+ adapter->dynamic_task_desc.inter_taskid);
+
+ /*
+ * allocate sense dma, driver maintain
+ * need in same 4GB segment
+ */
+ adapter->mem_desc.sense_data =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.io_qd * SCSI_SENSE_BUFFERSIZE,
+ &adapter->mem_desc.sense_data_dma, GFP_KERNEL);
+ if (!adapter->mem_desc.sense_data) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate sense data DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (!leapraid_is_in_same_4g_seg(adapter->mem_desc.sense_data_dma,
+ adapter->adapter_attr.io_qd *
+ SCSI_SENSE_BUFFERSIZE)) {
+ dev_warn(&adapter->pdev->dev,
+ "try 32 bit dma due to sense data is not in same 4g!\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* reply frame, need in same 4GB segment */
+ adapter->mem_desc.rep_msg =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REPLY_SIEZ,
+ &adapter->mem_desc.rep_msg_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_msg) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply message DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (!leapraid_is_in_same_4g_seg(adapter->mem_desc.rep_msg_dma,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REPLY_SIEZ)) {
+ dev_warn(&adapter->pdev->dev,
+ "use 32 bit dma due to rep msg is not in same 4g!\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* address of reply frame */
+ adapter->mem_desc.rep_msg_addr =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REP_MSG_ADDR_SIZE,
+ &adapter->mem_desc.rep_msg_addr_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_msg_addr) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply message address DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ adapter->adapter_attr.rep_desc_q_seg_cnt =
+ DIV_ROUND_UP(adapter->adapter_attr.rq_cnt,
+ LEAPRAID_REP_DESC_CHUNK_SIZE);
+ adapter->mem_desc.rep_desc_seg_maint =
+ kcalloc(adapter->adapter_attr.rep_desc_q_seg_cnt,
+ sizeof(struct leapraid_rep_desc_seg_maint),
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_seg_maint) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rep_desc_q_cnt_allocated = 0;
+ for (i = 0; i < adapter->adapter_attr.rep_desc_q_seg_cnt; i++) {
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_maint =
+ kcalloc(LEAPRAID_REP_DESC_CHUNK_SIZE,
+ sizeof(struct leapraid_rep_desc_maint),
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_maint) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE) *
+ LEAPRAID_REP_DESC_CHUNK_SIZE,
+ &adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply descriptor segment DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (j = 0; j < LEAPRAID_REP_DESC_CHUNK_SIZE; j++) {
+ if (rep_desc_q_cnt_allocated >=
+ adapter->adapter_attr.rq_cnt)
+ break;
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_maint[j]
+ .rep_desc =
+ (void *)((u8 *)(
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_seg) +
+ j *
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE));
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_maint[j]
+ .rep_desc_dma =
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_seg_dma +
+ j *
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE);
+ rep_desc_q_cnt_allocated++;
+ }
+ }
+
+ if (!reset_devices) {
+ adapter->mem_desc.rep_desc_q_arr =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.rq_cnt *
+ LEAPRAID_REP_RQ_CNT_SIZE,
+ &adapter->mem_desc.rep_desc_q_arr_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_q_arr) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply descriptor queue array DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ if (rc == -EAGAIN) {
+ leapraid_free_host_memory(adapter);
+ adapter->adapter_attr.use_32_dma_mask = true;
+ rc = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to set 32 DMA mask\n");
+ return rc;
+ }
+ goto try_again;
+ }
+ return rc;
+}
+
+static int leapraid_alloc_dev_topo_bitmaps(struct leapraid_adapter *adapter)
+{
+ adapter->dev_topo.pd_hdls_sz =
+ adapter->adapter_attr.features.max_dev_handle /
+ LEAPRAID_BITS_PER_BYTE;
+ if (adapter->adapter_attr.features.max_dev_handle %
+ LEAPRAID_BITS_PER_BYTE)
+ adapter->dev_topo.pd_hdls_sz++;
+ adapter->dev_topo.pd_hdls =
+ kzalloc(adapter->dev_topo.pd_hdls_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.pd_hdls)
+ return -ENOMEM;
+
+ adapter->dev_topo.blocking_hdls =
+ kzalloc(adapter->dev_topo.pd_hdls_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.blocking_hdls)
+ return -ENOMEM;
+
+ adapter->dev_topo.pending_dev_add_sz =
+ adapter->adapter_attr.features.max_dev_handle /
+ LEAPRAID_BITS_PER_BYTE;
+ if (adapter->adapter_attr.features.max_dev_handle %
+ LEAPRAID_BITS_PER_BYTE)
+ adapter->dev_topo.pending_dev_add_sz++;
+ adapter->dev_topo.pending_dev_add =
+ kzalloc(adapter->dev_topo.pending_dev_add_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.pending_dev_add)
+ return -ENOMEM;
+
+ adapter->dev_topo.dev_removing_sz =
+ adapter->dev_topo.pending_dev_add_sz;
+ adapter->dev_topo.dev_removing =
+ kzalloc(adapter->dev_topo.dev_removing_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.dev_removing)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void leapraid_free_dev_topo_bitmaps(struct leapraid_adapter *adapter)
+{
+ kfree(adapter->dev_topo.pd_hdls);
+ kfree(adapter->dev_topo.blocking_hdls);
+ kfree(adapter->dev_topo.pending_dev_add);
+ kfree(adapter->dev_topo.dev_removing);
+}
+
+static int leapraid_init_driver_cmds(struct leapraid_adapter *adapter)
+{
+ u32 buffer_size = 0;
+ void *buffer;
+
+ INIT_LIST_HEAD(&adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.scan_dev_cmd.cb_idx = LEAPRAID_SCAN_DEV_CB_IDX;
+ list_add_tail(&adapter->driver_cmds.scan_dev_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.cfg_op_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.cfg_op_cmd.cb_idx = LEAPRAID_CONFIG_CB_IDX;
+ mutex_init(&adapter->driver_cmds.cfg_op_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.cfg_op_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.transport_cmd.cb_idx = LEAPRAID_TRANSPORT_CB_IDX;
+ mutex_init(&adapter->driver_cmds.transport_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.transport_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.timestamp_sync_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.timestamp_sync_cmd.cb_idx =
+ LEAPRAID_TIMESTAMP_SYNC_CB_IDX;
+ mutex_init(&adapter->driver_cmds.timestamp_sync_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.timestamp_sync_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.raid_action_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.raid_action_cmd.cb_idx =
+ LEAPRAID_RAID_ACTION_CB_IDX;
+ mutex_init(&adapter->driver_cmds.raid_action_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.raid_action_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.driver_scsiio_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.driver_scsiio_cmd.cb_idx =
+ LEAPRAID_DRIVER_SCSIIO_CB_IDX;
+ mutex_init(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.driver_scsiio_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ buffer_size = sizeof(struct scsi_cmnd) +
+ sizeof(struct leapraid_io_req_tracker) +
+ SCSI_SENSE_BUFFERSIZE +
+ sizeof(struct scatterlist);
+ buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ adapter->driver_cmds.internal_scmd = buffer;
+ buffer = (void *)((u8 *)buffer +
+ sizeof(struct scsi_cmnd) +
+ sizeof(struct leapraid_io_req_tracker));
+ adapter->driver_cmds.internal_scmd->sense_buffer =
+ (unsigned char *)buffer;
+ buffer = (void *)((u8 *)buffer + SCSI_SENSE_BUFFERSIZE);
+ adapter->driver_cmds.internal_scmd->sdb.table.sgl =
+ (struct scatterlist *)buffer;
+ buffer = (void *)((u8 *)buffer + sizeof(struct scatterlist));
+
+ adapter->driver_cmds.enc_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.enc_cmd.cb_idx = LEAPRAID_ENC_CB_IDX;
+ mutex_init(&adapter->driver_cmds.enc_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.enc_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.notify_event_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.notify_event_cmd.cb_idx =
+ LEAPRAID_NOTIFY_EVENT_CB_IDX;
+ mutex_init(&adapter->driver_cmds.notify_event_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.notify_event_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.ctl_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.ctl_cmd.cb_idx = LEAPRAID_CTL_CB_IDX;
+ mutex_init(&adapter->driver_cmds.ctl_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.ctl_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.tm_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.tm_cmd.cb_idx = LEAPRAID_TM_CB_IDX;
+ mutex_init(&adapter->driver_cmds.tm_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.tm_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ return 0;
+}
+
+static void leapraid_unmask_evts(struct leapraid_adapter *adapter, u16 evt)
+{
+ if (evt >= LEAPRAID_MAX_EVENT_NUM)
+ return;
+
+ clear_bit(evt, (unsigned long *)adapter->fw_evt_s.leapraid_evt_masks);
+}
+
+static void leapraid_init_event_mask(struct leapraid_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < LEAPRAID_EVT_MASK_COUNT; i++)
+ adapter->fw_evt_s.leapraid_evt_masks[i] = -1;
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_DISCOVERY);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_DEV_STATUS_CHANGE);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_IR_CHANGE);
+}
+
+static void leapraid_prepare_adp_init_req(
+ struct leapraid_adapter *adapter,
+ struct leapraid_adapter_init_req *init_req)
+{
+ ktime_t cur_time;
+ int i;
+ u32 reply_post_free_ary_sz;
+
+ memset(init_req, 0, sizeof(struct leapraid_adapter_init_req));
+ init_req->func = LEAPRAID_FUNC_ADAPTER_INIT;
+ init_req->who_init = LEAPRAID_WHOINIT_LINUX_DRIVER;
+ init_req->msg_ver = cpu_to_le16(0x0100);
+ init_req->header_ver = cpu_to_le16(0x0000);
+
+ init_req->driver_ver = cpu_to_le32((LEAPRAID_MAJOR_VERSION << 24) |
+ (LEAPRAID_MINOR_VERSION << 16) |
+ (LEAPRAID_BUILD_VERSION << 8) |
+ LEAPRAID_RELEASE_VERSION);
+ if (adapter->notification_desc.msix_enable)
+ init_req->host_msix_vectors = adapter->adapter_attr.rq_cnt;
+
+ init_req->req_frame_size =
+ cpu_to_le16(LEAPRAID_REQUEST_SIZE / LEAPRAID_DWORDS_BYTE_SIZE);
+ init_req->rep_desc_qd =
+ cpu_to_le16(adapter->adapter_attr.rep_desc_qd);
+ init_req->rep_msg_qd =
+ cpu_to_le16(adapter->adapter_attr.rep_msg_qd);
+ init_req->sense_buffer_add_high =
+ cpu_to_le32((u64)adapter->mem_desc.sense_data_dma >> 32);
+ init_req->rep_msg_dma_high =
+ cpu_to_le32((u64)adapter->mem_desc.rep_msg_dma >> 32);
+ init_req->task_desc_base_addr =
+ cpu_to_le64((u64)adapter->mem_desc.task_desc_dma);
+ init_req->rep_msg_addr_dma =
+ cpu_to_le64((u64)adapter->mem_desc.rep_msg_addr_dma);
+ if (!reset_devices) {
+ reply_post_free_ary_sz =
+ adapter->adapter_attr.rq_cnt * LEAPRAID_REP_RQ_CNT_SIZE;
+ memset(adapter->mem_desc.rep_desc_q_arr, 0,
+ reply_post_free_ary_sz);
+
+ for (i = 0; i < adapter->adapter_attr.rq_cnt; i++) {
+ adapter->mem_desc
+ .rep_desc_q_arr[i]
+ .rep_desc_base_addr =
+ cpu_to_le64 (
+ (u64)adapter->mem_desc
+ .rep_desc_seg_maint[i /
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc_maint[i %
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc_dma);
+ }
+
+ init_req->msg_flg =
+ LEAPRAID_ADAPTER_INIT_MSGFLG_RDPQ_ARRAY_MODE;
+ init_req->rep_desc_q_arr_addr =
+ cpu_to_le64((u64)adapter->mem_desc.rep_desc_q_arr_dma);
+ } else {
+ init_req->rep_desc_q_arr_addr =
+ cpu_to_le64((u64)adapter->mem_desc
+ .rep_desc_seg_maint[0]
+ .rep_desc_maint[0]
+ .rep_desc_dma);
+ }
+ cur_time = ktime_get_real();
+ init_req->time_stamp = cpu_to_le64(ktime_to_ms(cur_time));
+}
+
+static int leapraid_send_adapter_init(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_init_req init_req;
+ struct leapraid_adapter_init_rep init_rep;
+ u16 adapter_status;
+ int rc = 0;
+
+ leapraid_prepare_adp_init_req(adapter, &init_req);
+
+ rc = leapraid_handshake_func(adapter,
+ sizeof(struct leapraid_adapter_init_req),
+ (u32 *)&init_req,
+ sizeof(struct leapraid_adapter_init_rep),
+ (u16 *)&init_rep);
+ if (rc != 0) {
+ dev_err(&adapter->pdev->dev, "%s: handshake failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ adapter_status =
+ le16_to_cpu(init_rep.adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "%s: failed\n", __func__);
+ rc = -EIO;
+ }
+
+ adapter->timestamp_sync_cnt = 0;
+ return rc;
+}
+
+static int leapraid_cfg_pages(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_io_unit_page1 *sas_io_unit_page1 = NULL;
+ struct leapraid_bios_page3 bios_page3;
+ struct leapraid_bios_page2 bios_page2;
+ int rc = 0;
+ int sz;
+
+ rc = leapraid_op_config_page(adapter, &bios_page3, cfgp1,
+ cfgp2, GET_BIOS_PG3);
+ if (rc)
+ return rc;
+
+ rc = leapraid_op_config_page(adapter, &bios_page2, cfgp1,
+ cfgp2, GET_BIOS_PG2);
+ if (rc)
+ return rc;
+
+ adapter->adapter_attr.bios_version =
+ le32_to_cpu(bios_page3.bios_version);
+ adapter->adapter_attr.wideport_max_queue_depth =
+ LEAPRAID_SAS_QUEUE_DEPTH;
+ adapter->adapter_attr.narrowport_max_queue_depth =
+ LEAPRAID_SAS_QUEUE_DEPTH;
+ adapter->adapter_attr.sata_max_queue_depth =
+ LEAPRAID_SATA_QUEUE_DEPTH;
+
+ adapter->boot_devs.requested_boot_dev.form =
+ bios_page2.requested_boot_dev_form;
+ memcpy((void *)adapter->boot_devs.requested_boot_dev.pg_dev,
+ (void *)&bios_page2.requested_boot_dev,
+ LEAPRAID_BOOT_DEV_SIZE);
+ adapter->boot_devs.requested_alt_boot_dev.form =
+ bios_page2.requested_alt_boot_dev_form;
+ memcpy((void *)adapter->boot_devs.requested_alt_boot_dev.pg_dev,
+ (void *)&bios_page2.requested_alt_boot_dev,
+ LEAPRAID_BOOT_DEV_SIZE);
+ adapter->boot_devs.current_boot_dev.form =
+ bios_page2.current_boot_dev_form;
+ memcpy((void *)adapter->boot_devs.current_boot_dev.pg_dev,
+ (void *)&bios_page2.current_boot_dev,
+ LEAPRAID_BOOT_DEV_SIZE);
+
+ sz = offsetof(struct leapraid_sas_io_unit_page1, phy_info);
+ sas_io_unit_page1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_page1) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ cfgp1.size = sz;
+
+ rc = leapraid_op_config_page(adapter, sas_io_unit_page1, cfgp1,
+ cfgp2, GET_SAS_IOUNIT_PG1);
+ if (rc)
+ goto out;
+
+ if (le16_to_cpu(sas_io_unit_page1->wideport_max_queue_depth))
+ adapter->adapter_attr.wideport_max_queue_depth =
+ le16_to_cpu(
+ sas_io_unit_page1->wideport_max_queue_depth);
+
+ if (le16_to_cpu(sas_io_unit_page1->narrowport_max_queue_depth))
+ adapter->adapter_attr.narrowport_max_queue_depth =
+ le16_to_cpu(
+ sas_io_unit_page1->narrowport_max_queue_depth);
+
+ if (sas_io_unit_page1->sata_max_queue_depth)
+ adapter->adapter_attr.sata_max_queue_depth =
+ sas_io_unit_page1->sata_max_queue_depth;
+
+out:
+ kfree(sas_io_unit_page1);
+ dev_info(&adapter->pdev->dev,
+ "max wp qd=%d, max np qd=%d, max sata qd=%d\n",
+ adapter->adapter_attr.wideport_max_queue_depth,
+ adapter->adapter_attr.narrowport_max_queue_depth,
+ adapter->adapter_attr.sata_max_queue_depth);
+ return rc;
+}
+
+static int leapraid_evt_notify(struct leapraid_adapter *adapter)
+{
+ struct leapraid_evt_notify_req *evt_notify_req;
+ int rc = 0;
+ int i;
+
+ mutex_lock(&adapter->driver_cmds.notify_event_cmd.mutex);
+ adapter->driver_cmds.notify_event_cmd.status = LEAPRAID_CMD_PENDING;
+ evt_notify_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.notify_event_cmd.inter_taskid);
+ memset(evt_notify_req, 0, sizeof(struct leapraid_evt_notify_req));
+ evt_notify_req->func = LEAPRAID_FUNC_EVENT_NOTIFY;
+ for (i = 0; i < LEAPRAID_EVT_MASK_COUNT; i++)
+ evt_notify_req->evt_masks[i] =
+ cpu_to_le32(adapter->fw_evt_s.leapraid_evt_masks[i]);
+ init_completion(&adapter->driver_cmds.notify_event_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.notify_event_cmd.inter_taskid);
+ wait_for_completion_timeout(
+ &adapter->driver_cmds.notify_event_cmd.done,
+ LEAPRAID_NOTIFY_EVENT_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.notify_event_cmd.status &
+ LEAPRAID_CMD_DONE))
+ if (adapter->driver_cmds.notify_event_cmd.status &
+ LEAPRAID_CMD_RESET)
+ rc = -EFAULT;
+ adapter->driver_cmds.notify_event_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.notify_event_cmd.mutex);
+
+ return rc;
+}
+
+int leapraid_scan_dev(struct leapraid_adapter *adapter, bool async_scan_dev)
+{
+ struct leapraid_scan_dev_req *scan_dev_req;
+ struct leapraid_scan_dev_rep *scan_dev_rep;
+ u16 adapter_status;
+ int rc = 0;
+
+ dev_info(&adapter->pdev->dev,
+ "send device scan, async_scan_dev=%d!\n", async_scan_dev);
+
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_PENDING;
+ adapter->driver_cmds.scan_dev_cmd.async_scan_dev = async_scan_dev;
+ scan_dev_req = leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid);
+ memset(scan_dev_req, 0, sizeof(struct leapraid_scan_dev_req));
+ scan_dev_req->func = LEAPRAID_FUNC_SCAN_DEV;
+
+ if (async_scan_dev) {
+ adapter->scan_dev_desc.first_scan_dev_fired = true;
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid);
+ return 0;
+ }
+
+ init_completion(&adapter->driver_cmds.scan_dev_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.scan_dev_cmd.done,
+ LEAPRAID_SCAN_DEV_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.scan_dev_cmd.status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev, "device scan timeout!\n");
+ if (adapter->driver_cmds.scan_dev_cmd.status &
+ LEAPRAID_CMD_RESET)
+ rc = -EFAULT;
+ else
+ rc = -ETIME;
+ goto out;
+ }
+
+ scan_dev_rep = (void *)(&adapter->driver_cmds.scan_dev_cmd.reply);
+ adapter_status =
+ le16_to_cpu(scan_dev_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "device scan failure!\n");
+ rc = -EFAULT;
+ goto out;
+ }
+
+out:
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_NOT_USED;
+ dev_info(&adapter->pdev->dev,
+ "device scan %s\n", ((rc == 0) ? "SUCCESS" : "FAILED"));
+ return rc;
+}
+
+static void leapraid_init_task_tracker(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dynamic_task_desc.task_lock, flags);
+
+ spin_unlock_irqrestore(&adapter->dynamic_task_desc.task_lock, flags);
+}
+
+static void leapraid_init_rep_msg_addr(struct leapraid_adapter *adapter)
+{
+ u32 reply_address;
+ unsigned int i;
+
+ for (i = 0, reply_address = (u32)adapter->mem_desc.rep_msg_dma;
+ i < adapter->adapter_attr.rep_msg_qd;
+ i++, reply_address += LEAPRAID_REPLY_SIEZ) {
+ adapter->mem_desc.rep_msg_addr[i] = cpu_to_le32(reply_address);
+ }
+}
+
+static void init_rep_desc(struct leapraid_rq *rq, int index,
+ union leapraid_rep_desc_union *reply_post_free_contig)
+{
+ struct leapraid_adapter *adapter = rq->adapter;
+ unsigned int i;
+
+ if (!reset_devices)
+ rq->rep_desc =
+ adapter->mem_desc
+ .rep_desc_seg_maint[index /
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc_maint[index %
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc;
+ else
+ rq->rep_desc = reply_post_free_contig;
+
+ rq->rep_post_host_idx = 0;
+ for (i = 0; i < adapter->adapter_attr.rep_desc_qd; i++)
+ rq->rep_desc[i].words = cpu_to_le64(ULLONG_MAX);
+}
+
+static void leapraid_init_rep_desc(struct leapraid_adapter *adapter)
+{
+ union leapraid_rep_desc_union *reply_post_free_contig;
+ struct leapraid_int_rq *int_rq;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ unsigned int i;
+ int index;
+
+ index = 0;
+ reply_post_free_contig = adapter->mem_desc
+ .rep_desc_seg_maint[0]
+ .rep_desc_maint[0]
+ .rep_desc;
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ init_rep_desc(&int_rq->rq, index, reply_post_free_contig);
+ if (!reset_devices)
+ index++;
+ else
+ reply_post_free_contig +=
+ adapter->adapter_attr.rep_desc_qd;
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ blk_mq_poll_rq = &adapter->notification_desc.blk_mq_poll_rqs[i];
+ init_rep_desc(&blk_mq_poll_rq->rq,
+ index, reply_post_free_contig);
+ if (!reset_devices)
+ index++;
+ else
+ reply_post_free_contig +=
+ adapter->adapter_attr.rep_desc_qd;
+ }
+}
+
+static void leapraid_init_bar_idx_regs(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ unsigned int i, j;
+
+ adapter->rep_msg_host_idx = adapter->adapter_attr.rep_msg_qd - 1;
+ writel(adapter->rep_msg_host_idx,
+ &adapter->iomem_base->rep_msg_host_idx);
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ for (j = 0; j < REP_POST_HOST_IDX_REG_CNT; j++)
+ writel((int_rq->rq.msix_idx & 7) <<
+ LEAPRAID_RPHI_MSIX_IDX_SHIFT,
+ &adapter->iomem_base->rep_post_reg_idx[j].idx);
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ blk_mq_poll_rq =
+ &adapter->notification_desc.blk_mq_poll_rqs[i];
+ for (j = 0; j < REP_POST_HOST_IDX_REG_CNT; j++)
+ writel((blk_mq_poll_rq->rq.msix_idx & 7) <<
+ LEAPRAID_RPHI_MSIX_IDX_SHIFT,
+ &adapter->iomem_base->rep_post_reg_idx[j].idx);
+ }
+}
+
+static int leapraid_make_adapter_available(struct leapraid_adapter *adapter)
+{
+ int rc = 0;
+
+ leapraid_init_task_tracker(adapter);
+ leapraid_init_rep_msg_addr(adapter);
+
+ if (adapter->scan_dev_desc.driver_loading)
+ leapraid_configure_reply_queue_affinity(adapter);
+
+ leapraid_init_rep_desc(adapter);
+ rc = leapraid_send_adapter_init(adapter);
+ if (rc)
+ return rc;
+
+ leapraid_init_bar_idx_regs(adapter);
+ leapraid_unmask_int(adapter);
+ rc = leapraid_cfg_pages(adapter);
+ if (rc)
+ return rc;
+
+ rc = leapraid_evt_notify(adapter);
+ if (rc)
+ return rc;
+
+ if (!adapter->access_ctrl.shost_recovering) {
+ adapter->scan_dev_desc.wait_scan_dev_done = true;
+ return 0;
+ }
+
+ rc = leapraid_scan_dev(adapter, false);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+int leapraid_ctrl_init(struct leapraid_adapter *adapter)
+{
+ u32 cap;
+ int rc = 0;
+
+ rc = leapraid_set_pcie_and_notification(adapter);
+ if (rc)
+ goto out_free_resources;
+
+ pci_set_drvdata(adapter->pdev, adapter->shost);
+
+ pcie_capability_read_dword(adapter->pdev, PCI_EXP_DEVCAP, &cap);
+
+ if (cap & PCI_EXP_DEVCAP_EXT_TAG) {
+ pcie_capability_set_word(adapter->pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_EXT_TAG);
+ }
+
+ rc = leapraid_make_adapter_ready(adapter, PART_RESET);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "make adapter ready failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_get_adapter_features(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "get adapter feature failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_fw_log_init(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "fw log init failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_request_host_memory(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "request host memory failure\n");
+ goto out_free_resources;
+ }
+
+ init_waitqueue_head(&adapter->reset_desc.reset_wait_queue);
+
+ rc = leapraid_alloc_dev_topo_bitmaps(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "alloc topo bitmaps failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_init_driver_cmds(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "init driver cmds failure\n");
+ goto out_free_resources;
+ }
+
+ leapraid_init_event_mask(adapter);
+
+ rc = leapraid_make_adapter_available(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "make adapter available failure\n");
+ goto out_free_resources;
+ }
+ return 0;
+
+out_free_resources:
+ adapter->access_ctrl.host_removing = true;
+ leapraid_fw_log_exit(adapter);
+ leapraid_disable_controller(adapter);
+ leapraid_free_host_memory(adapter);
+ leapraid_free_dev_topo_bitmaps(adapter);
+ pci_set_drvdata(adapter->pdev, NULL);
+ return rc;
+}
+
+void leapraid_remove_ctrl(struct leapraid_adapter *adapter)
+{
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_fw_log_exit(adapter);
+ leapraid_disable_controller(adapter);
+ leapraid_free_host_memory(adapter);
+ leapraid_free_dev_topo_bitmaps(adapter);
+ leapraid_free_enc_list(adapter);
+ pci_set_drvdata(adapter->pdev, NULL);
+}
+
+void leapraid_free_internal_scsi_cmd(struct leapraid_adapter *adapter)
+{
+ mutex_lock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ kfree(adapter->driver_cmds.internal_scmd);
+ adapter->driver_cmds.internal_scmd = NULL;
+ mutex_unlock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+}
diff --git a/drivers/scsi/leapraid/leapraid_func.h b/drivers/scsi/leapraid/leapraid_func.h
new file mode 100644
index 000000000000..2c51ef359b7e
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_func.h
@@ -0,0 +1,1425 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#ifndef LEAPRAID_FUNC_H_INCLUDED
+#define LEAPRAID_FUNC_H_INCLUDED
+
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_sas.h>
+
+#include "leapraid.h"
+
+#include <linux/blk-mq-pci.h>
+/* some requset and reply buffer size */
+#define LEAPRAID_REQUEST_SIZE 128
+#define LEAPRAID_REPLY_SIEZ 128
+#define LEAPRAID_CHAIN_SEG_SIZE 128
+#define LEAPRAID_MAX_SGES_IN_CHAIN 7
+#define LEAPRAID_DEFAULT_CHAINS_PER_IO 19
+#define LEAPRAID_DEFAULT_DIX_CHAINS_PER_IO \
+ (2 * LEAPRAID_DEFAULT_CHAINS_PER_IO) /* TODO DIX */
+#define LEAPRAID_IEEE_SGE64_ENTRY_SIZE 16
+#define LEAPRAID_REP_DESC_CHUNK_SIZE 16
+#define LEAPRAID_REP_DESC_ENTRY_SIZE 8
+#define LEAPRAID_REP_MSG_ADDR_SIZE 4
+#define LEAPRAID_REP_RQ_CNT_SIZE 16
+
+#define LEAPRAID_SYS_LOG_BUF_SIZE 0x200000
+#define LEAPRAID_SYS_LOG_BUF_RESERVE 0x1000
+
+/* Driver version and name */
+#define LEAPRAID_DRIVER_NAME "LeapRaid"
+#define LEAPRAID_NAME_LENGTH 48
+#define LEAPRAID_AUTHOR "LeapIO Inc."
+#define LEAPRAID_DESCRIPTION "LeapRaid Driver"
+#define LEAPRAID_DRIVER_VERSION "2.00.00.05"
+#define LEAPRAID_MAJOR_VERSION 2
+#define LEAPRAID_MINOR_VERSION 00
+#define LEAPRAID_BUILD_VERSION 00
+#define LEAPRAID_RELEASE_VERSION 05
+
+/* Device ID */
+#define LEAPRAID_VENDOR_ID 0xD405
+#define LEAPRAID_DEVID_HBA 0x8200
+#define LEAPRAID_DEVID_RAID 0x8201
+
+#define LEAPRAID_PCI_VENDOR_ID_MASK 0xFFFF
+
+ /* RAID virtual channel ID */
+#define RAID_CHANNEL 1
+
+/* Scatter/Gather (SG) segment limits */
+#define LEAPRAID_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE
+
+#define LEAPRAID_KDUMP_MIN_PHYS_SEGMENTS 32
+#define LEAPRAID_SG_DEPTH LEAPRAID_MAX_PHYS_SEGMENTS
+
+/* firmware / config page operations */
+#define LEAPRAID_SET_PARAMETER_SYNC_TIMESTAMP 0x81
+#define LEAPRAID_CFG_REQ_RETRY_TIMES 2
+
+/* Hardware access helpers*/
+#define leapraid_readl(addr) readl(addr)
+#define leapraid_check_reset(status) \
+ (!((status) & LEAPRAID_CMD_RESET))
+
+/* Polling intervals */
+#define LEAPRAID_PCIE_LOG_POLLING_INTERVAL 1
+#define LEAPRAID_FAULT_POLLING_INTERVAL 1000
+#define LEAPRAID_TIMESTAMP_SYNC_INTERVAL 900
+#define LEAPRAID_SMART_POLLING_INTERVAL (300 * 1000)
+
+/* init mask */
+#define LEAPRAID_RESET_IRQ_MASK 0x40000000
+#define LEAPRAID_REPLY_INT_MASK 0x00000008
+#define LEAPRAID_TO_SYS_DB_MASK 0x00000001
+
+/* queue depth */
+#define LEAPRAID_SATA_QUEUE_DEPTH 32
+#define LEAPRAID_SAS_QUEUE_DEPTH 254
+#define LEAPRAID_RAID_QUEUE_DEPTH 128
+
+/* SCSI device and queue limits */
+#define LEAPRAID_MAX_SECTORS 8192
+#define LEAPRAID_DEF_MAX_SECTORS 32767
+#define LEAPRAID_MAX_CDB_LEN 32
+#define LEAPRAID_MAX_LUNS 16384
+#define LEAPRAID_CAN_QUEUE_MIN 1
+#define LEAPRAID_THIS_ID_NONE -1
+#define LEAPRAID_CMD_PER_LUN 128
+#define LEAPRAID_MAX_SEGMENT_SIZE 0xffffffff
+
+/* SCSI sense and ASC/ASCQ and disk geometry configuration */
+#define DESC_FORMAT_THRESHOLD 0x72
+#define SENSE_KEY_MASK 0x0F
+#define SCSI_SENSE_RESPONSE_CODE_MASK 0x7F
+#define ASC_FAILURE_PREDICTION_THRESHOLD_EXCEEDED 0x5D
+#define LEAPRAID_LARGE_DISK_THRESHOLD 0x200000UL /* in sectors, 1GB */
+#define LEAPRAID_LARGE_DISK_HEADS 255
+#define LEAPRAID_LARGE_DISK_SECTORS 63
+#define LEAPRAID_SMALL_DISK_HEADS 64
+#define LEAPRAID_SMALL_DISK_SECTORS 32
+
+/* SMP (Serial Management Protocol) */
+#define LEAPRAID_SMP_PT_FLAG_SGL_PTR 0x80
+#define LEAPRAID_SMP_FN_REPORT_PHY_ERR_LOG 0x91
+#define LEAPRAID_SMP_FRAME_HEADER_SIZE 4
+#define LEAPRAID_SCSI_HOST_SHIFT 16
+#define LEAPRAID_SCSI_DRIVER_SHIFT 24
+
+/* SCSI ASC/ASCQ definitions */
+#define LEAPRAID_SCSI_ASCQ_DEFAULT 0x00
+#define LEAPRAID_SCSI_ASC_POWER_ON_RESET 0x29
+#define LEAPRAID_SCSI_ASC_INVALID_CMD_CODE 0x20
+#define LEAPRAID_SCSI_ASCQ_POWER_ON_RESET 0x07
+
+/* ---- VPD Page 0x89 (ATA Information) ---- */
+#define LEAPRAID_VPD_PAGE_ATA_INFO 0x89
+#define LEAPRAID_VPD_PG89_MAX_LEN 255
+#define LEAPRAID_VPD_PG89_MIN_LEN 214
+
+/* Byte index for NCQ support flag in VPD Page 0x89 */
+#define LEAPRAID_VPD_PG89_NCQ_BYTE_IDX 213
+#define LEAPRAID_VPD_PG89_NCQ_BIT_SHIFT 4
+#define LEAPRAID_VPD_PG89_NCQ_BIT_MASK 0x1
+
+/* readiness polling: max retries, sleep µs between */
+#define LEAPRAID_ADAPTER_READY_MAX_RETRY 15000
+#define LEAPRAID_ADAPTER_READY_SLEEP_MIN_US 1000
+#define LEAPRAID_ADAPTER_READY_SLEEP_MAX_US 1100
+
+/* Doorbell wait parameters */
+#define LEAPRAID_DB_WAIT_MAX_RETRY 20000
+#define LEAPRAID_DB_WAIT_DELAY_US 500
+
+/* Basic data size definitions */
+#define LEAPRAID_DWORDS_BYTE_SIZE 4
+#define LEAPRAID_WORD_BYTE_SIZE 2
+
+/* SGL threshold and chain offset*/
+#define LEAPRAID_SGL_INLINE_THRESHOLD 2
+#define LEAPRAID_CHAIN_OFFSET_DWORDS 7
+
+/* MSI-X group size and mask */
+#define LEAPRAID_MSIX_GROUP_SIZE 8
+#define LEAPRAID_MSIX_GROUP_MASK 7
+
+/* basic constants and limits */
+#define LEAPRAID_BUSY_LIMIT 1
+#define LEAPRAID_INDEX_FIRST 0
+#define LEAPRAID_BITS_PER_BYTE 8
+#define LEAPRAID_INVALID_HOST_DIAG_VAL 0xFFFFFFFF
+
+/* retry / sleep configuration */
+#define LEAPRAID_UNLOCK_RETRY_LIMIT 20
+#define LEAPRAID_UNLOCK_SLEEP_MS 100
+#define LEAPRAID_MSLEEP_SHORT_MS 50
+#define LEAPRAID_MSLEEP_NORMAL_MS 100
+#define LEAPRAID_MSLEEP_LONG_MS 256
+#define LEAPRAID_MSLEEP_EXTRA_LONG_MS 500
+#define LEAPRAID_IO_POLL_DELAY_US 500
+
+/* controller reset loop parameters */
+#define LEAPRAID_RESET_LOOP_COUNT_REF (300000 / 256)
+#define LEAPRAID_RESET_LOOP_COUNT_DEFAULT 10000
+#define LEAPRAID_RESET_POLL_INTERVAL_MS 500
+
+/* Device / Volume configuration */
+#define LEAPRAID_MAX_VOLUMES_DEFAULT 32
+#define LEAPRAID_MAX_DEV_HANDLE_DEFAULT 2048
+#define LEAPRAID_INVALID_DEV_HANDLE 0xFFFF
+
+/* cmd queue depth */
+#define LEAPRAID_COALESCING_DEPTH_MAX 256
+#define LEAPRAID_DEFAULT_CMD_QD_OFFSET 64
+#define LEAPRAID_REPLY_QD_ALIGNMENT 16
+/* task id offset */
+#define LEAPRAID_TASKID_OFFSET_CTRL_CMD 1
+#define LEAPRAID_TASKID_OFFSET_SCSIIO_CMD 2
+#define LEAPRAID_TASKID_OFFSET_CFG_OP_CMD 1
+#define LEAPRAID_TASKID_OFFSET_TRANSPORT_CMD 2
+#define LEAPRAID_TASKID_OFFSET_TIMESTAMP_SYNC_CMD 3
+#define LEAPRAID_TASKID_OFFSET_RAID_ACTION_CMD 4
+#define LEAPRAID_TASKID_OFFSET_ENC_CMD 5
+#define LEAPRAID_TASKID_OFFSET_NOTIFY_EVENT_CMD 6
+
+/* task id offset for high-priority */
+#define LEAPRAID_HP_TASKID_OFFSET_CTL_CMD 0
+#define LEAPRAID_HP_TASKID_OFFSET_TM_CMD 1
+
+/* Event / Boot configuration */
+#define LEAPRAID_EVT_MASK_COUNT 4
+#define LEAPRAID_BOOT_DEV_SIZE 24
+
+/* logsense command definitions */
+#define LEAPRAID_LOGSENSE_DATA_LENGTH 16
+#define LEAPRAID_LOGSENSE_CDB_LENGTH 10
+#define LEAPRAID_LOGSENSE_CDB_CODE 0x6F
+#define LEAPRAID_LOGSENSE_TIMEOUT 5
+#define LEAPRAID_LOGSENSE_SMART_CODE 0x5D
+
+/* cmd timeout */
+#define LEAPRAID_DRIVER_SCSIIO_CMD_TIMEOUT LEAPRAID_LOGSENSE_TIMEOUT
+#define LEAPRAID_CFG_OP_TIMEOUT 15
+#define LEAPRAID_CTL_CMD_TIMEOUT 10
+#define LEAPRAID_SCAN_DEV_CMD_TIMEOUT 300
+#define LEAPRAID_TIMESTAMP_SYNC_CMD_TIMEOUT 10
+#define LEAPRAID_RAID_ACTION_CMD_TIMEOUT 10
+#define LEAPRAID_ENC_CMD_TIMEOUT 10
+#define LEAPRAID_NOTIFY_EVENT_CMD_TIMEOUT 30
+#define LEAPRAID_TM_CMD_TIMEOUT 30
+#define LEAPRAID_TRANSPORT_CMD_TIMEOUT 10
+
+/**
+ * struct leapraid_adapter_features - Features and
+ * capabilities of a LeapRAID adapter
+ *
+ * @req_slot: Number of request slots supported by the adapter
+ * @hp_slot: Number of high-priority slots supported by the adapter
+ * @adapter_caps: Adapter capabilities
+ * @fw_version: Firmware version of the adapter
+ * @max_dev_handle: Maximum device supported by the adapter
+ */
+struct leapraid_adapter_features {
+ u16 req_slot;
+ u16 hp_slot;
+ u32 adapter_caps;
+ u32 fw_version;
+ u8 max_volumes;
+ u16 max_dev_handle;
+ u16 min_dev_handle;
+};
+
+/**
+ * struct leapraid_adapter_attr - Adapter attributes and capabilities
+ *
+ * @id: Adapter identifier
+ * @raid_support: Indicates if RAID is supported
+ * @bios_version: Version of the adapter BIOS
+ * @enable_mp: Indicates if multipath (MP) support is enabled
+ * @wideport_max_queue_depth: Maximum queue depth for wide ports
+ * @narrowport_max_queue_depth: Maximum queue depth for narrow ports
+ * @sata_max_queue_depth: Maximum queue depth for SATA
+ * @features: Detailed features of the adapter
+ * @adapter_total_qd: Total queue depth available on the adapter
+ * @io_qd: Queue depth allocated for I/O operations
+ * @rep_msg_qd: Queue depth for reply messages
+ * @rep_desc_qd: Queue depth for reply descriptors
+ * @rep_desc_q_seg_cnt: Number of segments in a reply descriptor queue
+ * @rq_cnt: Number of request queues
+ * @task_desc_dma_size: Size of task descriptor DMA memory
+ * @use_32_dma_mask: Indicates if 32-bit DMA mask is used
+ * @name: Adapter name string
+ */
+struct leapraid_adapter_attr {
+ u8 id;
+ bool raid_support;
+ u32 bios_version;
+ bool enable_mp;
+ u32 wideport_max_queue_depth;
+ u32 narrowport_max_queue_depth;
+ u32 sata_max_queue_depth;
+ struct leapraid_adapter_features features;
+ u32 adapter_total_qd;
+ u32 io_qd;
+ u32 rep_msg_qd;
+ u32 rep_desc_qd;
+ u32 rep_desc_q_seg_cnt;
+ u16 rq_cnt;
+ u32 task_desc_dma_size;
+ bool use_32_dma_mask;
+ char name[LEAPRAID_NAME_LENGTH];
+};
+
+/**
+ * struct leapraid_io_req_tracker - Track a SCSI I/O request
+ * for the adapter
+ *
+ * @taskid: Unique task ID for this I/O request
+ * @scmd: Pointer to the associated SCSI command
+ * @chain_list: List of chain frames associated with this request
+ * @msix_io: MSI-X vector assigned to this I/O request
+ * @chain: Pointer to the chain memory for this request
+ * @chain_dma: DMA address of the chain memory
+ */
+struct leapraid_io_req_tracker {
+ u16 taskid;
+ struct scsi_cmnd *scmd;
+ struct list_head chain_list;
+ u16 msix_io;
+ void *chain;
+ dma_addr_t chain_dma;
+};
+
+/**
+ * struct leapraid_task_tracker - Tracks a task in the adapter
+ *
+ * @taskid: Unique task ID for this tracker
+ * @cb_idx: Callback index associated with this task
+ * @tracker_list: Linked list node to chain this tracker in lists
+ */
+struct leapraid_task_tracker {
+ u16 taskid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct leapraid_rep_desc_maint - Maintains reply descriptor
+ * memory
+ *
+ * @rep_desc: Pointer to the reply descriptor
+ * @rep_desc_dma: DMA address of the reply descriptor
+ */
+struct leapraid_rep_desc_maint {
+ union leapraid_rep_desc_union *rep_desc;
+ dma_addr_t rep_desc_dma;
+};
+
+/**
+ * struct leapraid_rep_desc_seg_maint - Maintains reply descriptor
+ * segment memory
+ *
+ * @rep_desc_seg: Pointer to the reply descriptor segment
+ * @rep_desc_seg_dma: DMA address of the reply descriptor segment
+ * @rep_desc_maint: Pointer to the main reply descriptor structure
+ */
+struct leapraid_rep_desc_seg_maint {
+ void *rep_desc_seg;
+ dma_addr_t rep_desc_seg_dma;
+ struct leapraid_rep_desc_maint *rep_desc_maint;
+};
+
+/**
+ * struct leapraid_mem_desc - Memory descriptor for LeapRaid adapter
+ *
+ * @task_desc: Pointer to task descriptor
+ * @task_desc_dma: DMA address of task descriptor
+ * @sg_chain_pool: DMA pool for SGL chain allocations
+ * @sg_chain_pool_size: Size of the sg_chain_pool
+ * @taskid_to_uniq_tag: Mapping from task ID to unique tag
+ * @sense_data: Buffer for SCSI sense data
+ * @sense_data_dma: DMA address of sense_data buffer
+ * @rep_msg: Buffer for reply message
+ * @rep_msg_dma: DMA address of reply message buffer
+ * @rep_msg_addr: Pointer to reply message address
+ * @rep_msg_addr_dma: DMA address of reply message address
+ * @rep_desc_seg_maint: Pointer to reply descriptor segment
+ * @rep_desc_q_arr: Pointer to reply descriptor queue array
+ * @rep_desc_q_arr_dma: DMA address of reply descriptor queue array
+ */
+struct leapraid_mem_desc {
+ void *task_desc;
+ dma_addr_t task_desc_dma;
+ struct dma_pool *sg_chain_pool;
+ u16 sg_chain_pool_size;
+ u16 *taskid_to_uniq_tag;
+ u8 *sense_data;
+ dma_addr_t sense_data_dma;
+ u8 *rep_msg;
+ dma_addr_t rep_msg_dma;
+ __le32 *rep_msg_addr;
+ dma_addr_t rep_msg_addr_dma;
+ struct leapraid_rep_desc_seg_maint *rep_desc_seg_maint;
+ struct leapraid_rep_desc_q_arr *rep_desc_q_arr;
+ dma_addr_t rep_desc_q_arr_dma;
+};
+
+#define LEAPRAID_FIXED_INTER_CMDS 7
+#define LEAPRAID_FIXED_HP_CMDS 2
+#define LEAPRAID_INTER_HP_CMDS_DIF \
+ (LEAPRAID_FIXED_INTER_CMDS - LEAPRAID_FIXED_HP_CMDS)
+
+#define LEAPRAID_CMD_NOT_USED 0x8000
+#define LEAPRAID_CMD_DONE 0x0001
+#define LEAPRAID_CMD_PENDING 0x0002
+#define LEAPRAID_CMD_REPLY_VALID 0x0004
+#define LEAPRAID_CMD_RESET 0x0008
+
+/**
+ * enum LEAPRAID_CB_INDEX - Callback index for LeapRaid driver
+ *
+ * @LEAPRAID_SCAN_DEV_CB_IDX: Scan device callback index
+ * @LEAPRAID_CONFIG_CB_IDX: Configuration callback index
+ * @LEAPRAID_TRANSPORT_CB_IDX: Transport callback index
+ * @LEAPRAID_TIMESTAMP_SYNC_CB_IDX: Timestamp sync callback index
+ * @LEAPRAID_RAID_ACTION_CB_IDX: RAID action callback index
+ * @LEAPRAID_DRIVER_SCSIIO_CB_IDX: Driver SCSI I/O callback index
+ * @LEAPRAID_SAS_CTRL_CB_IDX: SAS controller callback index
+ * @LEAPRAID_ENC_CB_IDX: Encryption callback index
+ * @LEAPRAID_NOTIFY_EVENT_CB_IDX: Notify event callback index
+ * @LEAPRAID_CTL_CB_IDX: Control callback index
+ * @LEAPRAID_TM_CB_IDX: Task management callback index
+ */
+enum LEAPRAID_CB_INDEX {
+ LEAPRAID_SCAN_DEV_CB_IDX = 0x1,
+ LEAPRAID_CONFIG_CB_IDX = 0x2,
+ LEAPRAID_TRANSPORT_CB_IDX = 0x3,
+ LEAPRAID_TIMESTAMP_SYNC_CB_IDX = 0x4,
+ LEAPRAID_RAID_ACTION_CB_IDX = 0x5,
+ LEAPRAID_DRIVER_SCSIIO_CB_IDX = 0x6,
+ LEAPRAID_SAS_CTRL_CB_IDX = 0x7,
+ LEAPRAID_ENC_CB_IDX = 0x8,
+ LEAPRAID_NOTIFY_EVENT_CB_IDX = 0x9,
+ LEAPRAID_CTL_CB_IDX = 0xA,
+ LEAPRAID_TM_CB_IDX = 0xB,
+ LEAPRAID_NUM_CB_IDXS
+};
+
+struct leapraid_default_reply {
+ u8 pad[LEAPRAID_REPLY_SIEZ];
+};
+
+struct leapraid_sense_buffer {
+ u8 pad[SCSI_SENSE_BUFFERSIZE];
+};
+
+/**
+ * struct leapraid_driver_cmd - Driver command tracking structure
+ *
+ * @reply: Default reply structure returned by the adapter
+ * @done: Completion object used to signal command completion
+ * @status: Status code returned by the firmware
+ * @taskid: Unique task identifier for this command
+ * @hp_taskid: Task identifier for high-priority commands
+ * @inter_taskid: Task identifier for internal commands
+ * @cb_idx: Callback index used to identify completion context
+ * @async_scan_dev: True if this command is for asynchronous device scan
+ * @sense: Sense buffer holding error information from device
+ * @mutex: Mutex to protect access to this command structure
+ * @list: List node for linking driver commands into lists
+ */
+struct leapraid_driver_cmd {
+ struct leapraid_default_reply reply;
+ struct completion done;
+ u16 status;
+ u16 taskid;
+ u16 hp_taskid;
+ u16 inter_taskid;
+ u8 cb_idx;
+ bool async_scan_dev;
+ struct leapraid_sense_buffer sense;
+ struct mutex mutex;
+ struct list_head list;
+};
+
+/**
+ * struct leapraid_driver_cmds - Collection of driver command objects
+ *
+ * @special_cmd_list: List head for tracking special driver commands
+ * @scan_dev_cmd: Command used for asynchronous device scan operations
+ * @cfg_op_cmd: Command for configuration operations
+ * @transport_cmd: Command for transport-level operations
+ * @timestamp_sync_cmd: Command for synchronizing timestamp with firmware
+ * @raid_action_cmd: Command for RAID-related management or action requests
+ * @driver_scsiio_cmd: Command used for internal SCSI I/O processing
+ * @enc_cmd: Command for enclosure management operations
+ * @notify_event_cmd: Command for asynchronous event notification handling
+ * @ctl_cmd: Command for generic control or maintenance operations
+ * @tm_cmd: Task management command
+ * @internal_scmd: Pointer to internal SCSI command used by the driver
+ */
+struct leapraid_driver_cmds {
+ struct list_head special_cmd_list;
+ struct leapraid_driver_cmd scan_dev_cmd;
+ struct leapraid_driver_cmd cfg_op_cmd;
+ struct leapraid_driver_cmd transport_cmd;
+ struct leapraid_driver_cmd timestamp_sync_cmd;
+ struct leapraid_driver_cmd raid_action_cmd;
+ struct leapraid_driver_cmd driver_scsiio_cmd;
+ struct leapraid_driver_cmd enc_cmd;
+ struct leapraid_driver_cmd notify_event_cmd;
+ struct leapraid_driver_cmd ctl_cmd;
+ struct leapraid_driver_cmd tm_cmd;
+ struct scsi_cmnd *internal_scmd;
+};
+
+/**
+ * struct leapraid_dynamic_task_desc - Dynamic task descriptor
+ *
+ * @task_lock: Spinlock to protect concurrent access
+ * @hp_taskid: Current high-priority task ID
+ * @hp_cmd_qd: Fixed command queue depth for high-priority tasks
+ * @inter_taskid: Current internal task ID
+ * @inter_cmd_qd: Fixed command queue depth for internal tasks
+ */
+struct leapraid_dynamic_task_desc {
+ spinlock_t task_lock;
+ u16 hp_taskid;
+ u16 hp_cmd_qd;
+ u16 inter_taskid;
+ u16 inter_cmd_qd;
+};
+
+/**
+ * struct leapraid_fw_evt_work - Firmware event work structure
+ *
+ * @list: Linked list node for queuing the work
+ * @adapter: Pointer to the associated LeapRaid adapter
+ * @work: Work structure used by the kernel workqueue
+ * @refcnt: Reference counter for managing the lifetime of this work
+ * @evt_data: Pointer to firmware event data
+ * @dev_handle: Device handle associated with the event
+ * @evt_type: Type of firmware event
+ * @ignore: Flag indicating whether the event should be ignored
+ */
+struct leapraid_fw_evt_work {
+ struct list_head list;
+ struct leapraid_adapter *adapter;
+ struct work_struct work;
+ struct kref refcnt;
+ void *evt_data;
+ u16 dev_handle;
+ u16 evt_type;
+ u8 ignore;
+};
+
+/**
+ * struct leapraid_fw_evt_struct - Firmware event handling structure
+ *
+ * @fw_evt_name: Name of the firmware event
+ * @fw_evt_thread: Workqueue used for processing firmware events
+ * @fw_evt_lock: Spinlock protecting access to the firmware event list
+ * @fw_evt_list: Linked list of pending firmware events
+ * @cur_evt: Pointer to the currently processing firmware event
+ * @fw_evt_cleanup: Flag indicating whether cleanup of events is in progress
+ * @leapraid_evt_masks: Array of event masks for filtering firmware events
+ */
+struct leapraid_fw_evt_struct {
+ char fw_evt_name[48];
+ struct workqueue_struct *fw_evt_thread;
+ spinlock_t fw_evt_lock;
+ struct list_head fw_evt_list;
+ struct leapraid_fw_evt_work *cur_evt;
+ int fw_evt_cleanup;
+ u32 leapraid_evt_masks[4];
+};
+
+/**
+ * struct leapraid_rq - Represents a LeapRaid request queue
+ *
+ * @adapter: Pointer to the associated LeapRaid adapter
+ * @msix_idx: MSI-X vector index used by this queue
+ * @rep_post_host_idx: Index of the last processed reply descriptor
+ * @rep_desc: Pointer to the reply descriptor associated with this queue
+ * @name: Name of the request queue
+ * @busy: Atomic counter indicating if the queue is busy
+ */
+struct leapraid_rq {
+ struct leapraid_adapter *adapter;
+ u8 msix_idx;
+ u32 rep_post_host_idx;
+ union leapraid_rep_desc_union *rep_desc;
+ char name[LEAPRAID_NAME_LENGTH];
+ atomic_t busy;
+};
+
+/**
+ * struct leapraid_int_rq - Internal request queue for a CPU
+ *
+ * @affinity_hint: CPU affinity mask for the queue
+ * @rq: Underlying LeapRaid request queue structure
+ */
+struct leapraid_int_rq {
+ cpumask_var_t affinity_hint;
+ struct leapraid_rq rq;
+};
+
+/**
+ * struct leapraid_blk_mq_poll_rq - Polling request for LeapRaid blk-mq
+ *
+ * @busy: Atomic flag indicating request is being processed
+ * @pause: Atomic flag to temporarily suspend polling
+ * @rq: The underlying LeapRaid request structure
+ */
+struct leapraid_blk_mq_poll_rq {
+ atomic_t busy;
+ atomic_t pause;
+ struct leapraid_rq rq;
+};
+
+/**
+ * struct leapraid_notification_desc - Notification
+ * descriptor for LeapRaid
+ *
+ * @iopoll_qdex: Index of the I/O polling queue
+ * @iopoll_qcnt: Count of I/O polling queues
+ * @msix_enable: Flag indicating MSI-X is enabled
+ * @msix_cpu_map: CPU map for MSI-X interrupts
+ * @msix_cpu_map_sz: Size of the MSI-X CPU map
+ * @int_rqs: Array of interrupt request queues
+ * @int_rqs_allocated: Count of allocated interrupt request queues
+ * @blk_mq_poll_rqs: Array of blk-mq polling requests
+ */
+struct leapraid_notification_desc {
+ u32 iopoll_qdex;
+ u32 iopoll_qcnt;
+ bool msix_enable;
+ u8 *msix_cpu_map;
+ u32 msix_cpu_map_sz;
+ struct leapraid_int_rq *int_rqs;
+ u32 int_rqs_allocated;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rqs;
+};
+
+/**
+ * struct leapraid_reset_desc - Reset descriptor for LeapRaid
+ *
+ * @fault_reset_wq: Workqueue for fault reset operations
+ * @fault_reset_work: Delayed work structure for fault reset
+ * @fault_reset_wq_name: Name of the fault reset workqueue
+ * @host_diag_mutex: Mutex for host diagnostic operations
+ * @adapter_reset_lock: Spinlock for adapter reset operations
+ * @adapter_reset_mutex: Mutex for adapter reset operations
+ * @adapter_link_resetting: Flag indicating if adapter link is resetting
+ * @adapter_reset_results: Results of the adapter reset operation
+ * @pending_io_cnt: Count of pending I/O operations
+ * @reset_wait_queue: Wait queue for reset operations
+ * @reset_cnt: Counter for reset operations
+ */
+struct leapraid_reset_desc {
+ struct workqueue_struct *fault_reset_wq;
+ struct delayed_work fault_reset_work;
+ char fault_reset_wq_name[48];
+ struct mutex host_diag_mutex;
+ spinlock_t adapter_reset_lock;
+ struct mutex adapter_reset_mutex;
+ bool adapter_link_resetting;
+ int adapter_reset_results;
+ int pending_io_cnt;
+ wait_queue_head_t reset_wait_queue;
+ u32 reset_cnt;
+};
+
+/**
+ * struct leapraid_scan_dev_desc - Scan device descriptor
+ * for LeapRaid
+ *
+ * @wait_scan_dev_done: Flag indicating if scan device operation is done
+ * @driver_loading: Flag indicating if driver is loading
+ * @first_scan_dev_fired: Flag indicating if first scan device operation fired
+ * @scan_dev_failed: Flag indicating if scan device operation failed
+ * @scan_start: Flag indicating if scan operation started
+ * @scan_start_failed: Count of failed scan start operations
+ */
+struct leapraid_scan_dev_desc {
+ bool wait_scan_dev_done;
+ bool driver_loading;
+ bool first_scan_dev_fired;
+ bool scan_dev_failed;
+ bool scan_start;
+ u16 scan_start_failed;
+};
+
+/**
+ * struct leapraid_access_ctrl - Access control structure for LeapRaid
+ *
+ * @pci_access_lock: Mutex for PCI access control
+ * @adapter_thermal_alert: Flag indicating if adapter thermal alert is active
+ * @shost_recovering: Flag indicating if host is recovering
+ * @host_removing: Flag indicating if host is being removed
+ * @pcie_recovering: Flag indicating if PCIe is recovering
+ */
+struct leapraid_access_ctrl {
+ struct mutex pci_access_lock;
+ bool adapter_thermal_alert;
+ bool shost_recovering;
+ bool host_removing;
+ bool pcie_recovering;
+};
+
+/**
+ * struct leapraid_fw_log_desc - Firmware log descriptor for LeapRaid
+ *
+ * @fw_log_buffer: Buffer for firmware log data
+ * @fw_log_buffer_dma: DMA address of the firmware log buffer
+ * @fw_log_wq_name: Name of the firmware log workqueue
+ * @fw_log_wq: Workqueue for firmware log operations
+ * @fw_log_work: Delayed work structure for firmware log
+ * @open_pcie_trace: Flag indicating if PCIe tracing is open
+ * @fw_log_init_flag: Flag indicating if firmware log is initialized
+ */
+struct leapraid_fw_log_desc {
+ u8 *fw_log_buffer;
+ dma_addr_t fw_log_buffer_dma;
+ char fw_log_wq_name[48];
+ struct workqueue_struct *fw_log_wq;
+ struct delayed_work fw_log_work;
+ int open_pcie_trace;
+ int fw_log_init_flag;
+};
+
+#define LEAPRAID_CARD_PORT_FLG_DIRTY 0x01
+#define LEAPRAID_CARD_PORT_FLG_NEW 0x02
+#define LEAPRAID_DISABLE_MP_PORT_ID 0xFF
+/**
+ * struct leapraid_card_port - Card port structure for LeapRaid
+ *
+ * @list: List head for card port
+ * @vphys_list: List head for virtual phy list
+ * @port_id: Port ID
+ * @sas_address: SAS address
+ * @phy_mask: Mask of phy
+ * @vphys_mask: Mask of virtual phy
+ * @flg: Flags for the port
+ */
+struct leapraid_card_port {
+ struct list_head list;
+ struct list_head vphys_list;
+ u8 port_id;
+ u64 sas_address;
+ u32 phy_mask;
+ u32 vphys_mask;
+ u8 flg;
+};
+
+/**
+ * struct leapraid_card_phy - Card phy structure for LeapRaid
+ *
+ * @port_siblings: List head for port siblings
+ * @card_port: Pointer to the card port
+ * @identify: SAS identify structure
+ * @remote_identify: Remote SAS identify structure
+ * @phy: SAS phy structure
+ * @phy_id: Phy ID
+ * @hdl: Handle for the port
+ * @attached_hdl: Handle for the attached port
+ * @phy_is_assigned: Flag indicating if phy is assigned
+ * @vphy: Flag indicating if virtual phy
+ */
+struct leapraid_card_phy {
+ struct list_head port_siblings;
+ struct leapraid_card_port *card_port;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 hdl;
+ u16 attached_hdl;
+ bool phy_is_assigned;
+ bool vphy;
+};
+
+/**
+ * struct leapraid_topo_node - SAS topology node for LeapRaid
+ *
+ * @list: List head for linking nodes
+ * @sas_port_list: List of SAS ports
+ * @card_port: Associated card port
+ * @card_phy: Associated card PHY
+ * @rphy: SAS remote PHY device
+ * @parent_dev: Parent device pointer
+ * @sas_address: SAS address of this node
+ * @sas_address_parent: Parent node's SAS address
+ * @phys_num: Number of physical links
+ * @hdl: Handle identifier
+ * @enc_hdl: Enclosure handle
+ * @enc_lid: Enclosure logical identifier
+ * @resp: Response status flag
+ */
+struct leapraid_topo_node {
+ struct list_head list;
+ struct list_head sas_port_list;
+ struct leapraid_card_port *card_port;
+ struct leapraid_card_phy *card_phy;
+ struct sas_rphy *rphy;
+ struct device *parent_dev;
+ u64 sas_address;
+ u64 sas_address_parent;
+ u8 phys_num;
+ u16 hdl;
+ u16 enc_hdl;
+ u64 enc_lid;
+ bool resp;
+};
+
+/**
+ * struct leapraid_dev_topo - LeapRaid device topology management structure
+ *
+ * @topo_node_lock: Spinlock for protecting topology node operations
+ * @sas_dev_lock: Spinlock for SAS device list access
+ * @raid_volume_lock: Spinlock for RAID volume list access
+ * @sas_id: SAS domain identifier
+ * @card: Main card topology node
+ * @exp_list: List of expander devices
+ * @enc_list: List of enclosure devices
+ * @sas_dev_list: List of SAS devices
+ * @sas_dev_init_list: List of SAS devices being initialized
+ * @raid_volume_list: List of RAID volumes
+ * @card_port_list: List of card ports
+ * @pd_hdls: Array of physical disk handles
+ * @dev_removing: Array tracking devices being removed
+ * @pending_dev_add: Array tracking devices pending addition
+ * @blocking_hdls: Array of blocking handles
+ */
+struct leapraid_dev_topo {
+ spinlock_t topo_node_lock;
+ spinlock_t sas_dev_lock;
+ spinlock_t raid_volume_lock;
+ int sas_id;
+ struct leapraid_topo_node card;
+ struct list_head exp_list;
+ struct list_head enc_list;
+ struct list_head sas_dev_list;
+ struct list_head sas_dev_init_list;
+ struct list_head raid_volume_list;
+ struct list_head card_port_list;
+ u16 pd_hdls_sz;
+ void *pd_hdls;
+ void *blocking_hdls;
+ u16 pending_dev_add_sz;
+ void *pending_dev_add;
+ u16 dev_removing_sz;
+ void *dev_removing;
+};
+
+/**
+ * struct leapraid_boot_dev - Boot device structure for LeapRaid
+ *
+ * @dev: Device pointer
+ * @chnl: Channel number
+ * @form: Form factor
+ * @pg_dev: Config page device content
+ */
+struct leapraid_boot_dev {
+ void *dev;
+ u8 chnl;
+ u8 form;
+ u8 pg_dev[24];
+};
+
+/**
+ * struct leapraid_boot_devs - Boot device management structure
+ * @requested_boot_dev: Requested primary boot device
+ * @requested_alt_boot_dev: Requested alternate boot device
+ * @current_boot_dev: Currently active boot device
+ */
+struct leapraid_boot_devs {
+ struct leapraid_boot_dev requested_boot_dev;
+ struct leapraid_boot_dev requested_alt_boot_dev;
+ struct leapraid_boot_dev current_boot_dev;
+};
+
+/**
+ * struct leapraid_smart_poll_desc - SMART polling descriptor
+ * @smart_poll_wq: Workqueue for SMART polling tasks
+ * @smart_poll_work: Delayed work for SMART polling operations
+ * @smart_poll_wq_name: Workqueue name string
+ */
+struct leapraid_smart_poll_desc {
+ struct workqueue_struct *smart_poll_wq;
+ struct delayed_work smart_poll_work;
+ char smart_poll_wq_name[48];
+};
+
+/**
+ * struct leapraid_adapter - Main LeapRaid adapter structure
+ * @list: List head for adapter management
+ * @shost: SCSI host structure
+ * @pdev: PCI device structure
+ * @iomem_base: I/O memory mapped base address
+ * @rep_msg_host_idx: Host index for reply messages
+ * @mask_int: Interrupt masking flag
+ * @timestamp_sync_cnt: Timestamp synchronization counter
+ * @adapter_attr: Adapter attributes
+ * @mem_desc: Memory descriptor
+ * @driver_cmds: Driver commands
+ * @dynamic_task_desc: Dynamic task descriptor
+ * @fw_evt_s: Firmware event structure
+ * @notification_desc: Notification descriptor
+ * @reset_desc: Reset descriptor
+ * @scan_dev_desc: Device scan descriptor
+ * @access_ctrl: Access control
+ * @fw_log_desc: Firmware log descriptor
+ * @dev_topo: Device topology
+ * @boot_devs: Boot devices
+ * @smart_poll_desc: SMART polling descriptor
+ */
+struct leapraid_adapter {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ struct pci_dev *pdev;
+ struct leapraid_reg_base __iomem *iomem_base;
+ u32 rep_msg_host_idx;
+ bool mask_int;
+ u32 timestamp_sync_cnt;
+
+ struct leapraid_adapter_attr adapter_attr;
+ struct leapraid_mem_desc mem_desc;
+ struct leapraid_driver_cmds driver_cmds;
+ struct leapraid_dynamic_task_desc dynamic_task_desc;
+ struct leapraid_fw_evt_struct fw_evt_s;
+ struct leapraid_notification_desc notification_desc;
+ struct leapraid_reset_desc reset_desc;
+ struct leapraid_scan_dev_desc scan_dev_desc;
+ struct leapraid_access_ctrl access_ctrl;
+ struct leapraid_fw_log_desc fw_log_desc;
+ struct leapraid_dev_topo dev_topo;
+ struct leapraid_boot_devs boot_devs;
+ struct leapraid_smart_poll_desc smart_poll_desc;
+};
+
+union cfg_param_1 {
+ u32 form;
+ u32 size;
+ u32 phy_number;
+};
+
+union cfg_param_2 {
+ u32 handle;
+ u32 form_specific;
+};
+
+enum config_page_action {
+ GET_BIOS_PG2,
+ GET_BIOS_PG3,
+ GET_SAS_DEVICE_PG0,
+ GET_SAS_IOUNIT_PG0,
+ GET_SAS_IOUNIT_PG1,
+ GET_SAS_EXPANDER_PG0,
+ GET_SAS_EXPANDER_PG1,
+ GET_SAS_ENCLOSURE_PG0,
+ GET_PHY_PG0,
+ GET_RAID_VOLUME_PG0,
+ GET_RAID_VOLUME_PG1,
+ GET_PHY_DISK_PG0,
+};
+
+/**
+ * struct leapraid_enc_node - Enclosure node structure
+ * @list: List head for enclosure management
+ * @pg0: Enclosure page 0 data
+ */
+struct leapraid_enc_node {
+ struct list_head list;
+ struct leapraid_enc_p0 pg0;
+};
+
+/**
+ * struct leapraid_raid_volume - RAID volume structure
+ * @list: List head for volume management
+ * @starget: SCSI target structure
+ * @sdev: SCSI device structure
+ * @id: Volume ID
+ * @channel: SCSI channel
+ * @wwid: World Wide Identifier
+ * @hdl: Volume handle
+ * @vol_type: Volume type
+ * @pd_num: Number of physical disks
+ * @resp: Response status
+ * @dev_info: Device information
+ */
+struct leapraid_raid_volume {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct scsi_device *sdev;
+ unsigned int id;
+ unsigned int channel;
+ u64 wwid;
+ u16 hdl;
+ u8 vol_type;
+ u8 pd_num;
+ u8 resp;
+ u32 dev_info;
+};
+
+#define LEAPRAID_TGT_FLG_RAID_MEMBER 0x01
+#define LEAPRAID_TGT_FLG_VOLUME 0x02
+#define LEAPRAID_NO_ULD_ATTACH 1
+/**
+ * struct leapraid_starget_priv - SCSI target private data
+ * @starget: SCSI target structure
+ * @sas_address: SAS address
+ * @hdl: Device handle
+ * @num_luns: Number of LUNs
+ * @flg: Flags
+ * @deleted: Deletion flag
+ * @tm_busy: Task management busy flag
+ * @card_port: Associated card port
+ * @sas_dev: SAS device structure
+ */
+struct leapraid_starget_priv {
+ struct scsi_target *starget;
+ u64 sas_address;
+ u16 hdl;
+ int num_luns;
+ u32 flg;
+ bool deleted;
+ bool tm_busy;
+ struct leapraid_card_port *card_port;
+ struct leapraid_sas_dev *sas_dev;
+};
+
+#define LEAPRAID_DEVICE_FLG_INIT 0x01
+/**
+ * struct leapraid_sdev_priv - SCSI device private data
+ * @starget_priv: Associated target private data
+ * @lun: Logical Unit Number
+ * @flg: Flags
+ * @block: Block flag
+ * @deleted: Deletion flag
+ * @sep: SEP flag
+ */
+struct leapraid_sdev_priv {
+ struct leapraid_starget_priv *starget_priv;
+ unsigned int lun;
+ u32 flg;
+ bool ncq;
+ bool block;
+ bool deleted;
+ bool sep;
+};
+
+/**
+ * struct leapraid_sas_dev - SAS device structure
+ * @list: List head for device management
+ * @starget: SCSI target structure
+ * @card_port: Associated card port
+ * @rphy: SAS remote PHY
+ * @refcnt: Reference count
+ * @id: Device ID
+ * @channel: SCSI channel
+ * @slot: Slot number
+ * @phy: PHY identifier
+ * @resp: Response status
+ * @led_on: LED state
+ * @sas_addr: SAS address
+ * @dev_name: Device name
+ * @hdl: Device handle
+ * @parent_sas_addr: Parent SAS address
+ * @enc_hdl: Enclosure handle
+ * @enc_lid: Enclosure logical ID
+ * @volume_hdl: Volume handle
+ * @volume_wwid: Volume WWID
+ * @dev_info: Device information
+ * @pend_sas_rphy_add: Pending SAS rphy addition flag
+ * @enc_level: Enclosure level
+ * @port_type: Port type
+ * @connector_name: Connector name
+ * @support_smart: SMART support flag
+ */
+struct leapraid_sas_dev {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct leapraid_card_port *card_port;
+ struct sas_rphy *rphy;
+ struct kref refcnt;
+ unsigned int id;
+ unsigned int channel;
+ u16 slot;
+ u8 phy;
+ bool resp;
+ bool led_on;
+ u64 sas_addr;
+ u64 dev_name;
+ u16 hdl;
+ u64 parent_sas_addr;
+ u16 enc_hdl;
+ u64 enc_lid;
+ u16 volume_hdl;
+ u64 volume_wwid;
+ u32 dev_info;
+ u8 pend_sas_rphy_add;
+ u8 enc_level;
+ u8 port_type;
+ u8 connector_name[5];
+ bool support_smart;
+};
+
+static inline void leapraid_sdev_free(struct kref *ref)
+{
+ kfree(container_of(ref, struct leapraid_sas_dev, refcnt));
+}
+
+#define leapraid_sdev_get(sdev) kref_get(&(sdev)->refcnt)
+#define leapraid_sdev_put(sdev) kref_put(&(sdev)->refcnt, leapraid_sdev_free)
+
+/**
+ * struct leapraid_sas_port - SAS port structure
+ * @port_list: List head for port management
+ * @phy_list: List of PHYs in this port
+ * @port: SAS port structure
+ * @card_port: Associated card port
+ * @remote_identify: Remote device identification
+ * @rphy: SAS remote PHY
+ * @phys_num: Number of PHYs in this port
+ */
+struct leapraid_sas_port {
+ struct list_head port_list;
+ struct list_head phy_list;
+ struct sas_port *port;
+ struct leapraid_card_port *card_port;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ u8 phys_num;
+};
+
+#define LEAPRAID_VPHY_FLG_DIRTY 0x01
+/**
+ * struct leapraid_vphy - Virtual PHY structure
+ * @list: List head for PHY management
+ * @sas_address: SAS address
+ * @phy_mask: PHY mask
+ * @flg: Flags
+ */
+struct leapraid_vphy {
+ struct list_head list;
+ u64 sas_address;
+ u32 phy_mask;
+ u8 flg;
+};
+
+struct leapraid_tgt_rst_list {
+ struct list_head list;
+ u16 handle;
+ u16 state;
+};
+
+struct leapraid_sc_list {
+ struct list_head list;
+ u16 handle;
+};
+
+struct sense_info {
+ u8 sense_key;
+ u8 asc;
+ u8 ascq;
+};
+
+struct leapraid_fw_log_info {
+ u32 user_position;
+ u32 adapter_position;
+};
+
+/**
+ * enum reset_type - Reset type enumeration
+ * @FULL_RESET: Full hardware reset
+ * @PART_RESET: Partial reset
+ */
+enum reset_type {
+ FULL_RESET,
+ PART_RESET,
+};
+
+enum leapraid_card_port_checking_flg {
+ CARD_PORT_FURTHER_CHECKING_NEEDED = 0,
+ CARD_PORT_SKIP_CHECKING,
+};
+
+enum leapraid_port_checking_state {
+ NEW_CARD_PORT = 0,
+ SAME_PORT_WITH_NOTHING_CHANGED,
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS,
+ SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS,
+ SAME_ADDR_ONLY,
+};
+
+/**
+ * struct leapraid_card_port_feature - Card port feature
+ * @dirty_flg: Dirty flag indicator
+ * @same_addr: Same address flag
+ * @exact_phy: Exact PHY match flag
+ * @phy_overlap: PHY overlap bitmap
+ * @same_port: Same port flag
+ * @cur_chking_old_port: Current checking old port
+ * @expected_old_port: Expected old port
+ * @same_addr_port_count: Same address port count
+ * @checking_state: Port checking state
+ */
+struct leapraid_card_port_feature {
+ u8 dirty_flg;
+ bool same_addr;
+ bool exact_phy;
+ u32 phy_overlap;
+ bool same_port;
+ struct leapraid_card_port *cur_chking_old_port;
+ struct leapraid_card_port *expected_old_port;
+ int same_addr_port_count;
+ enum leapraid_port_checking_state checking_state;
+};
+
+#define SMP_REPORT_MANUFACTURER_INFORMATION_FRAME_TYPE 0x40
+#define SMP_REPORT_MANUFACTURER_INFORMATION_FUNC 0x01
+
+/**
+ * ref: SAS-2(INCITS 457-2010) 10.4.3.5
+ */
+struct leapraid_rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 allocated_response_length;
+ u8 request_length;
+};
+
+/**
+ * ref: SAS-2(INCITS 457-2010) 10.4.3.5
+ */
+struct leapraid_rep_manu_reply {
+ u8 smp_frame_type;
+ u8 function;
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 r1[2];
+ u8 sas_format;
+ u8 r2[3];
+ u8 vendor_identification[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_identification[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_revision_level[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_identification[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_level;
+ u8 r3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * struct leapraid_scsi_cmd_desc - SCSI command descriptor
+ * @hdl: Device handle
+ * @lun: Logical Unit Number
+ * @raid_member: RAID member flag
+ * @dir: DMA data direction
+ * @data_length: Data transfer length
+ * @data_buffer: Data buffer pointer
+ * @cdb_length: CDB length
+ * @cdb: Command Descriptor Block
+ * @time_out: Timeout
+ */
+struct leapraid_scsi_cmd_desc {
+ u16 hdl;
+ u32 lun;
+ bool raid_member;
+ enum dma_data_direction dir;
+ u32 data_length;
+ void *data_buffer;
+ u8 cdb_length;
+ u8 cdb[32];
+ u8 time_out;
+};
+
+extern struct list_head leapraid_adapter_list;
+extern spinlock_t leapraid_adapter_lock;
+extern char driver_name[LEAPRAID_NAME_LENGTH];
+
+int leapraid_ctrl_init(struct leapraid_adapter *adapter);
+void leapraid_remove_ctrl(struct leapraid_adapter *adapter);
+void leapraid_check_scheduled_fault_start(struct leapraid_adapter *adapter);
+void leapraid_check_scheduled_fault_stop(struct leapraid_adapter *adapter);
+void leapraid_fw_log_start(struct leapraid_adapter *adapter);
+void leapraid_fw_log_stop(struct leapraid_adapter *adapter);
+int leapraid_set_pcie_and_notification(struct leapraid_adapter *adapter);
+void leapraid_disable_controller(struct leapraid_adapter *adapter);
+int leapraid_hard_reset_handler(struct leapraid_adapter *adapter,
+ enum reset_type type);
+void leapraid_mask_int(struct leapraid_adapter *adapter);
+void leapraid_unmask_int(struct leapraid_adapter *adapter);
+u32 leapraid_get_adapter_state(struct leapraid_adapter *adapter);
+bool leapraid_pci_removed(struct leapraid_adapter *adapter);
+int leapraid_check_adapter_is_op(struct leapraid_adapter *adapter);
+void *leapraid_get_task_desc(struct leapraid_adapter *adapter, u16 taskid);
+void *leapraid_get_sense_buffer(struct leapraid_adapter *adapter, u16 taskid);
+__le32 leapraid_get_sense_buffer_dma(struct leapraid_adapter *adapter,
+ u16 taskid);
+void *leapraid_get_reply_vaddr(struct leapraid_adapter *adapter,
+ u32 phys_addr);
+u16 leapraid_alloc_scsiio_taskid(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd);
+void leapraid_free_taskid(struct leapraid_adapter *adapter, u16 taskid);
+struct leapraid_io_req_tracker *leapraid_get_io_tracker_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid);
+struct leapraid_io_req_tracker *leapraid_get_scmd_priv(struct scsi_cmnd *scmd);
+struct scsi_cmnd *leapraid_get_scmd_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid);
+int leapraid_scan_dev(struct leapraid_adapter *adapter, bool async_scan_dev);
+void leapraid_scan_dev_done(struct leapraid_adapter *adapter);
+void leapraid_wait_cmds_done(struct leapraid_adapter *adapter);
+void leapraid_clean_active_scsi_cmds(struct leapraid_adapter *adapter);
+void leapraid_sync_irqs(struct leapraid_adapter *adapter, bool poll);
+int leapraid_rep_queue_handler(struct leapraid_rq *rq);
+int leapraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
+void leapraid_mq_polling_pause(struct leapraid_adapter *adapter);
+void leapraid_mq_polling_resume(struct leapraid_adapter *adapter);
+void leapraid_set_tm_flg(struct leapraid_adapter *adapter, u16 handle);
+void leapraid_clear_tm_flg(struct leapraid_adapter *adapter, u16 handle);
+void leapraid_async_turn_on_led(struct leapraid_adapter *adapter, u16 handle);
+int leapraid_issue_locked_tm(struct leapraid_adapter *adapter, u16 handle,
+ uint channel, uint id, uint lun, u8 type,
+ u16 taskid_task, u8 tr_method);
+int leapraid_issue_tm(struct leapraid_adapter *adapter, u16 handle,
+ uint channel, uint id, uint lun, u8 type,
+ u16 taskid_task, u8 tr_method);
+u8 leapraid_scsiio_done(struct leapraid_adapter *adapter, u16 taskid,
+ u8 msix_index, u32 rep);
+int leapraid_get_volume_cap(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume);
+int leapraid_internal_init_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker);
+int leapraid_internal_exit_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker);
+void leapraid_clean_active_fw_evt(struct leapraid_adapter *adapter);
+bool leapraid_scmd_find_by_lun(struct leapraid_adapter *adapter,
+ uint id, unsigned int lun, uint channel);
+bool leapraid_scmd_find_by_tgt(struct leapraid_adapter *adapter,
+ uint id, uint channel);
+struct leapraid_vphy *leapraid_get_vphy_by_phy(struct leapraid_card_port *port,
+ u32 phy);
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_id(
+ struct leapraid_adapter *adapter, uint id, uint channel);
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 handle);
+struct leapraid_topo_node *leapraid_exp_find_by_sas_address(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct leapraid_card_port *port);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct sas_rphy *rphy);
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct leapraid_card_port *port);
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 handle);
+struct leapraid_sas_dev *leapraid_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 handle);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct leapraid_card_port *port);
+struct leapraid_sas_dev *leapraid_get_next_sas_dev_from_init_list(
+ struct leapraid_adapter *adapter);
+void leapraid_sas_dev_remove_by_sas_address(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port);
+void leapraid_sas_dev_remove(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev);
+void leapraid_raid_volume_remove(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume);
+void leapraid_exp_rm(struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port);
+void leapraid_build_mpi_sg(struct leapraid_adapter *adapter,
+ void *sge, dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size);
+void leapraid_build_ieee_nodata_sg(struct leapraid_adapter *adapter,
+ void *sge);
+void leapraid_build_ieee_sg(struct leapraid_adapter *adapter,
+ void *psge, dma_addr_t h2c_dma_addr,
+ size_t h2c_size, dma_addr_t c2h_dma_addr,
+ size_t c2h_size);
+int leapraid_build_scmd_ieee_sg(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd, u16 taskid);
+void leapraid_fire_scsi_io(struct leapraid_adapter *adapter,
+ u16 taskid, u16 handle);
+void leapraid_fire_hpr_task(struct leapraid_adapter *adapter, u16 taskid,
+ u16 msix_task);
+void leapraid_fire_task(struct leapraid_adapter *adapter, u16 taskid);
+int leapraid_cfg_get_volume_hdl(struct leapraid_adapter *adapter,
+ u16 pd_handle, u16 *volume_handle);
+int leapraid_cfg_get_volume_wwid(struct leapraid_adapter *adapter,
+ u16 volume_handle, u64 *wwid);
+int leapraid_op_config_page(struct leapraid_adapter *adapter,
+ void *cfgp, union cfg_param_1 cfgp1,
+ union cfg_param_2 cfgp2,
+ enum config_page_action cfg_op);
+void leapraid_adjust_sdev_queue_depth(struct scsi_device *sdev, int qdepth);
+
+int leapraid_ctl_release(struct inode *inode, struct file *filep);
+void leapraid_ctl_init(void);
+void leapraid_ctl_exit(void);
+
+extern struct sas_function_template leapraid_transport_functions;
+extern struct scsi_transport_template *leapraid_transport_template;
+struct leapraid_sas_port *leapraid_transport_port_add(
+ struct leapraid_adapter *adapter, u16 handle, u64 sas_address,
+ struct leapraid_card_port *card_port);
+void leapraid_transport_port_remove(struct leapraid_adapter *adapter,
+ u64 sas_address, u64 sas_address_parent,
+ struct leapraid_card_port *card_port);
+void leapraid_transport_add_card_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_sas_phy_p0 *phy_pg0,
+ struct device *parent_dev);
+int leapraid_transport_add_exp_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_exp_p1 *exp_pg1,
+ struct device *parent_dev);
+void leapraid_transport_update_links(struct leapraid_adapter *adapter,
+ u64 sas_address, u16 handle,
+ u8 phy_number, u8 link_rate,
+ struct leapraid_card_port *card_port);
+void leapraid_transport_detach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_phy *card_phy);
+void leapraid_transport_attach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *sas_node,
+ struct leapraid_card_phy *card_phy,
+ u64 sas_address,
+ struct leapraid_card_port *card_port);
+int leapraid_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd);
+void leapraid_smart_polling_start(struct leapraid_adapter *adapter);
+void leapraid_smart_polling_stop(struct leapraid_adapter *adapter);
+void leapraid_smart_fault_detect(struct leapraid_adapter *adapter, u16 hdl);
+void leapraid_free_internal_scsi_cmd(struct leapraid_adapter *adapter);
+
+#endif /* LEAPRAID_FUNC_H_INCLUDED */
diff --git a/drivers/scsi/leapraid/leapraid_os.c b/drivers/scsi/leapraid/leapraid_os.c
new file mode 100644
index 000000000000..be0f7cbb6684
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_os.c
@@ -0,0 +1,2365 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <linux/module.h>
+
+#include "leapraid_func.h"
+#include "leapraid.h"
+
+LIST_HEAD(leapraid_adapter_list);
+DEFINE_SPINLOCK(leapraid_adapter_lock);
+
+MODULE_AUTHOR(LEAPRAID_AUTHOR);
+MODULE_DESCRIPTION(LEAPRAID_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LEAPRAID_DRIVER_VERSION);
+
+static int leapraid_ids;
+
+static int open_pcie_trace = 1;
+module_param(open_pcie_trace, int, 0644);
+MODULE_PARM_DESC(open_pcie_trace, "open_pcie_trace: default=1(open)/0(close)");
+
+static int enable_mp = 1;
+module_param(enable_mp, int, 0444);
+MODULE_PARM_DESC(enable_mp,
+ "enable multipath on target device. default=1(enable)");
+
+static inline void leapraid_get_sense_data(char *sense,
+ struct sense_info *data)
+{
+ bool desc_format = (sense[0] & SCSI_SENSE_RESPONSE_CODE_MASK) >=
+ DESC_FORMAT_THRESHOLD;
+
+ if (desc_format) {
+ data->sense_key = sense[1] & SENSE_KEY_MASK;
+ data->asc = sense[2];
+ data->ascq = sense[3];
+ } else {
+ data->sense_key = sense[2] & SENSE_KEY_MASK;
+ data->asc = sense[12];
+ data->ascq = sense[13];
+ }
+}
+
+static struct Scsi_Host *pdev_to_shost(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+static struct leapraid_adapter *pdev_to_adapter(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+
+ if (!shost)
+ return NULL;
+
+ return shost_priv(shost);
+}
+
+struct leapraid_io_req_tracker *leapraid_get_scmd_priv(struct scsi_cmnd *scmd)
+{
+ return scsi_cmd_priv(scmd);
+}
+
+void leapraid_set_tm_flg(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+ bool skip = false;
+
+ /* don't break out of the loop */
+ shost_for_each_device(sdev, adapter->shost) {
+ if (skip)
+ continue;
+
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl == hdl) {
+ sdev_priv->starget_priv->tm_busy = true;
+ skip = true;
+ }
+ }
+}
+
+void leapraid_clear_tm_flg(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+ bool skip = false;
+
+ /* don't break out of the loop */
+ shost_for_each_device(sdev, adapter->shost) {
+ if (skip)
+ continue;
+
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl == hdl) {
+ sdev_priv->starget_priv->tm_busy = false;
+ skip = true;
+ }
+ }
+}
+
+static int leapraid_tm_cmd_map_status(struct leapraid_adapter *adapter,
+ uint channel,
+ uint id,
+ uint lun,
+ u8 type,
+ u16 taskid_task)
+{
+ int rc = FAILED;
+
+ if (taskid_task <= adapter->shost->can_queue) {
+ switch (type) {
+ case LEAPRAID_TM_TASKTYPE_ABRT_TASK_SET:
+ case LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!leapraid_scmd_find_by_lun(adapter, id, lun,
+ channel))
+ rc = SUCCESS;
+ break;
+ case LEAPRAID_TM_TASKTYPE_TARGET_RESET:
+ if (!leapraid_scmd_find_by_tgt(adapter, id, channel))
+ rc = SUCCESS;
+ break;
+ default:
+ rc = SUCCESS;
+ }
+ }
+
+ if (taskid_task == adapter->driver_cmds.driver_scsiio_cmd.taskid) {
+ if ((adapter->driver_cmds.driver_scsiio_cmd.status &
+ LEAPRAID_CMD_DONE) ||
+ (adapter->driver_cmds.driver_scsiio_cmd.status &
+ LEAPRAID_CMD_NOT_USED))
+ rc = SUCCESS;
+ }
+
+ if (taskid_task == adapter->driver_cmds.ctl_cmd.hp_taskid) {
+ if ((adapter->driver_cmds.ctl_cmd.status &
+ LEAPRAID_CMD_DONE) ||
+ (adapter->driver_cmds.ctl_cmd.status &
+ LEAPRAID_CMD_NOT_USED))
+ rc = SUCCESS;
+ }
+
+ return rc;
+}
+
+static int leapraid_tm_post_processing(struct leapraid_adapter *adapter,
+ u16 hdl, uint channel, uint id,
+ uint lun, u8 type, u16 taskid_task)
+{
+ int rc;
+
+ rc = leapraid_tm_cmd_map_status(adapter, channel, id, lun,
+ type, taskid_task);
+ if (rc == SUCCESS)
+ return rc;
+
+ leapraid_mask_int(adapter);
+ leapraid_sync_irqs(adapter, true);
+ leapraid_unmask_int(adapter);
+
+ rc = leapraid_tm_cmd_map_status(adapter, channel, id, lun, type,
+ taskid_task);
+ return rc;
+}
+
+static void leapraid_build_tm_req(struct leapraid_scsi_tm_req *scsi_tm_req,
+ u16 hdl, uint lun, u8 type, u8 tr_method,
+ u16 target_taskid)
+{
+ memset(scsi_tm_req, 0, sizeof(*scsi_tm_req));
+ scsi_tm_req->func = LEAPRAID_FUNC_SCSI_TMF;
+ scsi_tm_req->dev_hdl = cpu_to_le16(hdl);
+ scsi_tm_req->task_type = type;
+ scsi_tm_req->msg_flg = tr_method;
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK ||
+ type == LEAPRAID_TM_TASKTYPE_QUERY_TASK)
+ scsi_tm_req->task_mid = cpu_to_le16(target_taskid);
+ int_to_scsilun(lun, (struct scsi_lun *)scsi_tm_req->lun);
+}
+
+int leapraid_issue_tm(struct leapraid_adapter *adapter, u16 hdl, uint channel,
+ uint id, uint lun, u8 type,
+ u16 target_taskid, u8 tr_method)
+{
+ struct leapraid_scsi_tm_req *scsi_tm_req;
+ struct leapraid_scsiio_req *scsiio_req;
+ struct leapraid_io_req_tracker *io_req_tracker = NULL;
+ u16 msix_task = 0;
+ bool issue_reset = false;
+ u32 db;
+ int rc;
+
+ lockdep_assert_held(&adapter->driver_cmds.tm_cmd.mutex);
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering) {
+ dev_info(&adapter->pdev->dev,
+ "%s %s: host is recovering, skip tm command!\n",
+ __func__, adapter->adapter_attr.name);
+ return FAILED;
+ }
+
+ db = leapraid_readl(&adapter->iomem_base->db);
+ if (db & LEAPRAID_DB_USED) {
+ dev_info(&adapter->pdev->dev,
+ "%s unexpected db status, issuing hard reset!\n",
+ adapter->adapter_attr.name);
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ return (!rc) ? SUCCESS : FAILED;
+ }
+
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_FAULT) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ return (!rc) ? SUCCESS : FAILED;
+ }
+
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK)
+ io_req_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ target_taskid);
+
+ adapter->driver_cmds.tm_cmd.status = LEAPRAID_CMD_PENDING;
+ scsi_tm_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.tm_cmd.hp_taskid);
+ leapraid_build_tm_req(scsi_tm_req, hdl, lun, type, tr_method,
+ target_taskid);
+ memset((void *)(&adapter->driver_cmds.tm_cmd.reply), 0,
+ sizeof(struct leapraid_scsi_tm_rep));
+ leapraid_set_tm_flg(adapter, hdl);
+ init_completion(&adapter->driver_cmds.tm_cmd.done);
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK &&
+ io_req_tracker &&
+ io_req_tracker->msix_io < adapter->adapter_attr.rq_cnt)
+ msix_task = io_req_tracker->msix_io;
+ else
+ msix_task = 0;
+ leapraid_fire_hpr_task(adapter,
+ adapter->driver_cmds.tm_cmd.hp_taskid,
+ msix_task);
+ wait_for_completion_timeout(&adapter->driver_cmds.tm_cmd.done,
+ LEAPRAID_TM_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.tm_cmd.status & LEAPRAID_CMD_DONE)) {
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.tm_cmd.status);
+ if (issue_reset) {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto out;
+ }
+ }
+
+ leapraid_sync_irqs(adapter, false);
+
+ switch (type) {
+ case LEAPRAID_TM_TASKTYPE_TARGET_RESET:
+ case LEAPRAID_TM_TASKTYPE_ABRT_TASK_SET:
+ case LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET:
+ rc = leapraid_tm_post_processing(adapter, hdl, channel, id, lun,
+ type, target_taskid);
+ break;
+ case LEAPRAID_TM_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ scsiio_req = leapraid_get_task_desc(adapter, target_taskid);
+ if (le16_to_cpu(scsiio_req->dev_hdl) != hdl)
+ break;
+ dev_err(&adapter->pdev->dev, "%s abort failed, hdl=0x%04x\n",
+ adapter->adapter_attr.name, hdl);
+ rc = FAILED;
+ break;
+ case LEAPRAID_TM_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+out:
+ leapraid_clear_tm_flg(adapter, hdl);
+ adapter->driver_cmds.tm_cmd.status = LEAPRAID_CMD_NOT_USED;
+ return rc;
+}
+
+int leapraid_issue_locked_tm(struct leapraid_adapter *adapter, u16 hdl,
+ uint channel, uint id, uint lun, u8 type,
+ u16 target_taskid, u8 tr_method)
+{
+ int rc;
+
+ mutex_lock(&adapter->driver_cmds.tm_cmd.mutex);
+ rc = leapraid_issue_tm(adapter, hdl, channel, id, lun, type,
+ target_taskid, tr_method);
+ mutex_unlock(&adapter->driver_cmds.tm_cmd.mutex);
+
+ return rc;
+}
+
+void leapraid_smart_fault_detect(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_target *starget;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (!sas_dev) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ goto out;
+ }
+
+ starget = sas_dev->starget;
+ starget_priv = starget->hostdata;
+ if ((starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER) ||
+ (starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME)) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ goto out;
+ }
+
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_async_turn_on_led(adapter, hdl);
+out:
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_process_sense_data(struct leapraid_adapter *adapter,
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct scsi_cmnd *scmd, u16 taskid)
+{
+ struct sense_info data;
+ const void *sense_data;
+ u32 sz;
+
+ if (!(scsiio_rep->scsi_state & LEAPRAID_SCSI_STATE_AUTOSENSE_VALID))
+ return;
+
+ sense_data = leapraid_get_sense_buffer(adapter, taskid);
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_rep->sense_count));
+
+ memcpy(scmd->sense_buffer, sense_data, sz);
+ leapraid_get_sense_data(scmd->sense_buffer, &data);
+ if (data.asc == ASC_FAILURE_PREDICTION_THRESHOLD_EXCEEDED)
+ leapraid_smart_fault_detect(adapter,
+ le16_to_cpu(scsiio_rep->dev_hdl));
+}
+
+static void leapraid_handle_data_underrun(
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct scsi_cmnd *scmd, u32 xfer_cnt)
+{
+ u8 scsi_status = scsiio_rep->scsi_status;
+ u8 scsi_state = scsiio_rep->scsi_state;
+
+ scmd->result = (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) | scsi_status;
+
+ if (scsi_state & LEAPRAID_SCSI_STATE_AUTOSENSE_VALID)
+ return;
+
+ if (xfer_cnt < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR <<
+ LEAPRAID_SCSI_HOST_SHIFT;
+ } else if (scsi_state & (LEAPRAID_SCSI_STATE_AUTOSENSE_FAILED |
+ LEAPRAID_SCSI_STATE_NO_SCSI_STATUS)) {
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ } else if (scsi_state & LEAPRAID_SCSI_STATE_TERMINATED) {
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ } else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
+ scsiio_rep->scsi_state = LEAPRAID_SCSI_STATE_AUTOSENSE_VALID;
+ scsiio_rep->scsi_status = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
+ LEAPRAID_SCSI_ASC_INVALID_CMD_CODE,
+ LEAPRAID_SCSI_ASCQ_DEFAULT);
+ }
+}
+
+static void leapraid_handle_success_status(
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct scsi_cmnd *scmd,
+ u32 response_code)
+{
+ u8 scsi_status = scsiio_rep->scsi_status;
+ u8 scsi_state = scsiio_rep->scsi_state;
+
+ scmd->result = (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) | scsi_status;
+
+ if (response_code == LEAPRAID_TM_RSP_INVALID_FRAME ||
+ (scsi_state & (LEAPRAID_SCSI_STATE_AUTOSENSE_FAILED |
+ LEAPRAID_SCSI_STATE_NO_SCSI_STATUS)))
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ else if (scsi_state & LEAPRAID_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+}
+
+static void leapraid_scsiio_done_dispatch(struct leapraid_adapter *adapter,
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct leapraid_sdev_priv *sdev_priv,
+ struct scsi_cmnd *scmd,
+ u16 taskid, u32 response_code)
+{
+ u8 scsi_status = scsiio_rep->scsi_status;
+ u8 scsi_state = scsiio_rep->scsi_state;
+ u16 adapter_status;
+ u32 xfer_cnt;
+ u32 sz;
+
+ adapter_status = le16_to_cpu(scsiio_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+
+ xfer_cnt = le32_to_cpu(scsiio_rep->transfer_count);
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
+
+ if (adapter_status == LEAPRAID_ADAPTER_STATUS_SCSI_DATA_UNDERRUN &&
+ xfer_cnt == 0 &&
+ (scsi_status == LEAPRAID_SCSI_STATUS_BUSY ||
+ scsi_status == LEAPRAID_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == LEAPRAID_SCSI_STATUS_TASK_SET_FULL)) {
+ adapter_status = LEAPRAID_ADAPTER_STATUS_SUCCESS;
+ }
+
+ switch (adapter_status) {
+ case LEAPRAID_ADAPTER_STATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_BUSY:
+ case LEAPRAID_ADAPTER_STATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_RESIDUAL_MISMATCH:
+ if (xfer_cnt == 0 || scmd->underflow > xfer_cnt)
+ scmd->result = DID_SOFT_ERROR <<
+ LEAPRAID_SCSI_HOST_SHIFT;
+ else
+ scmd->result = (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) |
+ scsi_status;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_ADAPTER_TERMINATED:
+ if (sdev_priv->block) {
+ scmd->result = DID_TRANSPORT_DISRUPTED <<
+ LEAPRAID_SCSI_HOST_SHIFT;
+ return;
+ }
+
+ if (scmd->device->channel == RAID_CHANNEL &&
+ scsi_state == (LEAPRAID_SCSI_STATE_TERMINATED |
+ LEAPRAID_SCSI_STATE_NO_SCSI_STATUS)) {
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+ }
+
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_TASK_TERMINATED:
+ case LEAPRAID_ADAPTER_STATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_DATA_UNDERRUN:
+ leapraid_handle_data_underrun(scsiio_rep, scmd, xfer_cnt);
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ leapraid_handle_success_status(scsiio_rep, scmd,
+ response_code);
+ break;
+ case LEAPRAID_ADAPTER_STATUS_SCSI_RECOVERED_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_SUCCESS:
+ leapraid_handle_success_status(scsiio_rep, scmd,
+ response_code);
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_PROTOCOL_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_INTERNAL_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_SCSI_IO_DATA_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_SCSI_TASK_MGMT_FAILED:
+ default:
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+ }
+
+ if (!scmd->result)
+ return;
+
+ scsi_print_command(scmd);
+ dev_warn(&adapter->pdev->dev,
+ "scsiio warn: hdl=0x%x, status are: 0x%x, 0x%x, 0x%x\n",
+ le16_to_cpu(scsiio_rep->dev_hdl), adapter_status,
+ scsi_status, scsi_state);
+
+ if (scsi_state & LEAPRAID_SCSI_STATE_AUTOSENSE_VALID) {
+ struct scsi_sense_hdr sshdr;
+
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_rep->sense_count));
+ if (scsi_normalize_sense(scmd->sense_buffer, sz,
+ &sshdr)) {
+ dev_warn(&adapter->pdev->dev,
+ "sense: key=0x%x asc=0x%x ascq=0x%x\n",
+ sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "sense: invalid sense data\n");
+ }
+ }
+}
+
+u8 leapraid_scsiio_done(struct leapraid_adapter *adapter, u16 taskid,
+ u8 msix_index, u32 rep)
+{
+ struct leapraid_scsiio_rep *scsiio_rep = NULL;
+ struct leapraid_sdev_priv *sdev_priv = NULL;
+ struct scsi_cmnd *scmd = NULL;
+ u32 response_code = 0;
+
+ if (likely(taskid != adapter->driver_cmds.driver_scsiio_cmd.taskid))
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ else
+ scmd = adapter->driver_cmds.internal_scmd;
+ if (!scmd)
+ return 1;
+
+ scsiio_rep = leapraid_get_reply_vaddr(adapter, rep);
+ if (!scsiio_rep) {
+ scmd->result = DID_OK << LEAPRAID_SCSI_HOST_SHIFT;
+ goto out;
+ }
+
+ sdev_priv = scmd->device->hostdata;
+ if (!sdev_priv ||
+ !sdev_priv->starget_priv ||
+ sdev_priv->starget_priv->deleted) {
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+ goto out;
+ }
+
+ if (scsiio_rep->scsi_state & LEAPRAID_SCSI_STATE_RESPONSE_INFO_VALID)
+ response_code = le32_to_cpu(scsiio_rep->resp_info) & 0xFF;
+
+ leapraid_process_sense_data(adapter, scsiio_rep, scmd, taskid);
+ leapraid_scsiio_done_dispatch(adapter, scsiio_rep, sdev_priv, scmd,
+ taskid, response_code);
+
+out:
+ scsi_dma_unmap(scmd);
+ if (unlikely(taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid)) {
+ adapter->driver_cmds.driver_scsiio_cmd.status =
+ LEAPRAID_CMD_DONE;
+ complete(&adapter->driver_cmds.driver_scsiio_cmd.done);
+ return 0;
+ }
+ leapraid_free_taskid(adapter, taskid);
+ scsi_done(scmd);
+ return 0;
+}
+
+static void leapraid_probe_raid(struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_volume *raid_volume, *raid_volume_next;
+ int rc;
+
+ list_for_each_entry_safe(raid_volume, raid_volume_next,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (raid_volume->starget)
+ continue;
+
+ rc = scsi_add_device(adapter->shost, RAID_CHANNEL,
+ raid_volume->id, 0);
+ if (rc)
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ }
+}
+
+static void leapraid_sas_dev_make_active(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ leapraid_sdev_put(sas_dev);
+ }
+
+ leapraid_sdev_get(sas_dev);
+ list_add_tail(&sas_dev->list, &adapter->dev_topo.sas_dev_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_probe_sas(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sas_dev *sas_dev;
+ bool added;
+
+ for (;;) {
+ sas_dev = leapraid_get_next_sas_dev_from_init_list(adapter);
+ if (!sas_dev)
+ break;
+
+ added = leapraid_transport_port_add(adapter,
+ sas_dev->hdl,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+
+ if (!added)
+ goto remove_dev;
+
+ if (!sas_dev->starget &&
+ !adapter->scan_dev_desc.driver_loading) {
+ leapraid_transport_port_remove(adapter,
+ sas_dev->sas_addr,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ goto remove_dev;
+ }
+
+ leapraid_sas_dev_make_active(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ continue;
+
+remove_dev:
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+}
+
+static bool leapraid_get_boot_dev(struct leapraid_boot_dev *boot_dev,
+ void **pdev, u32 *pchnl)
+{
+ if (boot_dev->dev) {
+ *pdev = boot_dev->dev;
+ *pchnl = boot_dev->chnl;
+ return true;
+ }
+ return false;
+}
+
+static void leapraid_probe_boot_dev(struct leapraid_adapter *adapter)
+{
+ void *dev = NULL;
+ u32 chnl;
+
+ if (leapraid_get_boot_dev(&adapter->boot_devs.requested_boot_dev, &dev,
+ &chnl))
+ goto boot_dev_found;
+
+ if (leapraid_get_boot_dev(&adapter->boot_devs.requested_alt_boot_dev,
+ &dev, &chnl))
+ goto boot_dev_found;
+
+ if (leapraid_get_boot_dev(&adapter->boot_devs.current_boot_dev, &dev,
+ &chnl))
+ goto boot_dev_found;
+
+ return;
+
+boot_dev_found:
+ switch (chnl) {
+ case RAID_CHANNEL:
+ {
+ struct leapraid_raid_volume *raid_volume =
+ (struct leapraid_raid_volume *)dev;
+
+ if (raid_volume->starget)
+ return;
+
+ /* TODO eedp */
+
+ if (scsi_add_device(adapter->shost, RAID_CHANNEL,
+ raid_volume->id, 0))
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ break;
+ }
+ default:
+ {
+ struct leapraid_sas_dev *sas_dev =
+ (struct leapraid_sas_dev *)dev;
+ struct leapraid_sas_port *sas_port;
+ unsigned long flags;
+
+ if (sas_dev->starget)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ list_move_tail(&sas_dev->list,
+ &adapter->dev_topo.sas_dev_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (!sas_dev->card_port)
+ return;
+
+ sas_port = leapraid_transport_port_add(adapter, sas_dev->hdl,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ if (!sas_port)
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ break;
+ }
+ }
+}
+
+static void leapraid_probe_devices(struct leapraid_adapter *adapter)
+{
+ leapraid_probe_boot_dev(adapter);
+
+ if (adapter->adapter_attr.raid_support) {
+ leapraid_probe_raid(adapter);
+ leapraid_probe_sas(adapter);
+ } else {
+ leapraid_probe_sas(adapter);
+ }
+}
+
+void leapraid_scan_dev_done(struct leapraid_adapter *adapter)
+{
+ if (adapter->scan_dev_desc.wait_scan_dev_done) {
+ adapter->scan_dev_desc.wait_scan_dev_done = false;
+ leapraid_probe_devices(adapter);
+ }
+
+ leapraid_check_scheduled_fault_start(adapter);
+ leapraid_fw_log_start(adapter);
+ adapter->scan_dev_desc.driver_loading = false;
+ leapraid_smart_polling_start(adapter);
+}
+
+static void leapraid_ir_shutdown(struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_act_req *raid_act_req;
+ struct leapraid_raid_act_rep *raid_act_rep;
+ struct leapraid_driver_cmd *raid_action_cmd;
+
+ if (!adapter || !adapter->adapter_attr.raid_support)
+ return;
+
+ if (list_empty(&adapter->dev_topo.raid_volume_list))
+ return;
+
+ if (leapraid_pci_removed(adapter))
+ return;
+
+ raid_action_cmd = &adapter->driver_cmds.raid_action_cmd;
+
+ mutex_lock(&raid_action_cmd->mutex);
+ raid_action_cmd->status = LEAPRAID_CMD_PENDING;
+
+ raid_act_req = leapraid_get_task_desc(adapter,
+ raid_action_cmd->inter_taskid);
+ memset(raid_act_req, 0, sizeof(struct leapraid_raid_act_req));
+ raid_act_req->func = LEAPRAID_FUNC_RAID_ACTION;
+ raid_act_req->act = LEAPRAID_RAID_ACT_SYSTEM_SHUTDOWN_INITIATED;
+
+ dev_info(&adapter->pdev->dev, "ir shutdown start\n");
+ init_completion(&raid_action_cmd->done);
+ leapraid_fire_task(adapter, raid_action_cmd->inter_taskid);
+ wait_for_completion_timeout(&raid_action_cmd->done,
+ LEAPRAID_RAID_ACTION_CMD_TIMEOUT * HZ);
+
+ if (!(raid_action_cmd->status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: timeout waiting for ir shutdown\n", __func__);
+ goto out;
+ }
+
+ if (raid_action_cmd->status & LEAPRAID_CMD_REPLY_VALID) {
+ raid_act_rep = (void *)(&raid_action_cmd->reply);
+ dev_info(&adapter->pdev->dev,
+ "ir shutdown done, adapter status=0x%04x\n",
+ le16_to_cpu(raid_act_rep->adapter_status));
+ }
+
+out:
+ raid_action_cmd->status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&raid_action_cmd->mutex);
+}
+
+static const struct pci_device_id leapraid_pci_table[] = {
+ { PCI_DEVICE(LEAPRAID_VENDOR_ID, LEAPRAID_DEVID_HBA) },
+ { PCI_DEVICE(LEAPRAID_VENDOR_ID, LEAPRAID_DEVID_RAID) },
+ { 0, }
+};
+
+static inline bool leapraid_is_scmd_permitted(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd)
+{
+ u8 opcode;
+
+ if (adapter->access_ctrl.pcie_recovering ||
+ adapter->access_ctrl.adapter_thermal_alert)
+ return false;
+
+ if (adapter->access_ctrl.host_removing) {
+ if (leapraid_pci_removed(adapter))
+ return false;
+
+ opcode = scmd->cmnd[0];
+ if (opcode == SYNCHRONIZE_CACHE || opcode == START_STOP)
+ return true;
+ else
+ return false;
+ }
+ return true;
+}
+
+static bool leapraid_should_queuecommand(struct leapraid_adapter *adapter,
+ struct leapraid_sdev_priv *sdev_priv,
+ struct scsi_cmnd *scmd, int *rc)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ if (!sdev_priv || !sdev_priv->starget_priv)
+ goto no_connect;
+
+ if (!leapraid_is_scmd_permitted(adapter, scmd))
+ goto no_connect;
+
+ starget_priv = sdev_priv->starget_priv;
+ if (starget_priv->hdl == LEAPRAID_INVALID_DEV_HANDLE)
+ goto no_connect;
+
+ if (sdev_priv->block &&
+ scmd->device->host->shost_state == SHOST_RECOVERY &&
+ scmd->cmnd[0] == TEST_UNIT_READY) {
+ scsi_build_sense(scmd, 0, UNIT_ATTENTION,
+ LEAPRAID_SCSI_ASC_POWER_ON_RESET,
+ LEAPRAID_SCSI_ASCQ_POWER_ON_RESET);
+ goto done_out;
+ }
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->reset_desc.adapter_link_resetting) {
+ *rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ } else if (starget_priv->deleted || sdev_priv->deleted) {
+ goto no_connect;
+ } else if (starget_priv->tm_busy || sdev_priv->block) {
+ *rc = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
+ return true;
+
+no_connect:
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+done_out:
+ if (likely(scmd != adapter->driver_cmds.internal_scmd))
+ scsi_done(scmd);
+out:
+ return false;
+}
+
+static u32 build_scsiio_req_control(struct scsi_cmnd *scmd,
+ struct leapraid_sdev_priv *sdev_priv)
+{
+ u32 control;
+
+ switch (scmd->sc_data_direction) {
+ case DMA_FROM_DEVICE:
+ control = LEAPRAID_SCSIIO_CTRL_READ;
+ break;
+ case DMA_TO_DEVICE:
+ control = LEAPRAID_SCSIIO_CTRL_WRITE;
+ break;
+ default:
+ control = LEAPRAID_SCSIIO_CTRL_NODATATRANSFER;
+ break;
+ }
+
+ control |= LEAPRAID_SCSIIO_CTRL_SIMPLEQ;
+
+ if (sdev_priv->ncq &&
+ (IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))) ==
+ IOPRIO_CLASS_RT))
+ control |= LEAPRAID_SCSIIO_CTRL_CMDPRI;
+ if (scmd->cmd_len == 32)
+ control |= 4 << LEAPRAID_SCSIIO_CTRL_ADDCDBLEN_SHIFT;
+
+ return control;
+}
+
+int leapraid_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct leapraid_adapter *adapter = shost_priv(scmd->device->host);
+ struct leapraid_sdev_priv *sdev_priv = scmd->device->hostdata;
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_scsiio_req *scsiio_req;
+ u32 control;
+ u16 taskid;
+ u16 hdl;
+ int rc = 0;
+
+ if (!leapraid_should_queuecommand(adapter, sdev_priv, scmd, &rc))
+ goto out;
+
+ starget_priv = sdev_priv->starget_priv;
+ hdl = starget_priv->hdl;
+ control = build_scsiio_req_control(scmd, sdev_priv);
+
+ if (unlikely(scmd == adapter->driver_cmds.internal_scmd))
+ taskid = adapter->driver_cmds.driver_scsiio_cmd.taskid;
+ else
+ taskid = leapraid_alloc_scsiio_taskid(adapter, scmd);
+ scsiio_req = leapraid_get_task_desc(adapter, taskid);
+
+ scsiio_req->func = LEAPRAID_FUNC_SCSIIO_REQ;
+ if (sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER)
+ scsiio_req->func = LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH;
+ else
+ scsiio_req->func = LEAPRAID_FUNC_SCSIIO_REQ;
+
+ scsiio_req->dev_hdl = cpu_to_le16(hdl);
+ scsiio_req->data_len = cpu_to_le32(scsi_bufflen(scmd));
+ scsiio_req->ctrl = cpu_to_le32(control);
+ scsiio_req->io_flg = cpu_to_le16(scmd->cmd_len);
+ scsiio_req->msg_flg = 0;
+ scsiio_req->sense_buffer_len = SCSI_SENSE_BUFFERSIZE;
+ scsiio_req->sense_buffer_low_add =
+ leapraid_get_sense_buffer_dma(adapter, taskid);
+ scsiio_req->sgl_offset0 =
+ offsetof(struct leapraid_scsiio_req, sgl) /
+ LEAPRAID_DWORDS_BYTE_SIZE;
+ int_to_scsilun(sdev_priv->lun, (struct scsi_lun *)scsiio_req->lun);
+ memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
+ if (scsiio_req->data_len) {
+ if (leapraid_build_scmd_ieee_sg(adapter, scmd, taskid)) {
+ leapraid_free_taskid(adapter, taskid);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ } else {
+ leapraid_build_ieee_nodata_sg(adapter, &scsiio_req->sgl);
+ }
+
+ if (likely(scsiio_req->func == LEAPRAID_FUNC_SCSIIO_REQ)) {
+ leapraid_fire_scsi_io(adapter, taskid,
+ le16_to_cpu(scsiio_req->dev_hdl));
+ } else {
+ leapraid_fire_task(adapter, taskid);
+ }
+ dev_dbg(&adapter->pdev->dev,
+ "LEAPRAID_SCSIIO: Send Descriptor taskid %d, req type 0x%x\n",
+ taskid, scsiio_req->func);
+out:
+ return rc;
+}
+
+static int leapraid_init_cmd_priv(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ struct leapraid_io_req_tracker *io_tracker;
+
+ io_tracker = leapraid_get_scmd_priv(scmd);
+ leapraid_internal_init_cmd_priv(adapter, io_tracker);
+
+ return 0;
+}
+
+static int leapraid_exit_cmd_priv(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ struct leapraid_io_req_tracker *io_tracker;
+
+ io_tracker = leapraid_get_scmd_priv(scmd);
+ leapraid_internal_exit_cmd_priv(adapter, io_tracker);
+
+ return 0;
+}
+
+static int leapraid_error_handler(struct scsi_cmnd *scmd,
+ const char *str, u8 type)
+{
+ struct leapraid_adapter *adapter = shost_priv(scmd->device->host);
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct leapraid_starget_priv *starget_priv = starget->hostdata;
+ struct leapraid_io_req_tracker *io_req_tracker = NULL;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ u16 hdl;
+ int rc;
+
+ dev_info(&adapter->pdev->dev,
+ "EH enter: type=%s, scmd=0x%p, req tag=%d\n", str, scmd,
+ scsi_cmd_to_rq(scmd)->tag);
+ scsi_print_command(scmd);
+
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK) {
+ io_req_tracker = leapraid_get_scmd_priv(scmd);
+ dev_info(&adapter->pdev->dev,
+ "EH ABORT: scmd=0x%p, pending=%u ms, tout=%u ms, req tag=%d\n",
+ scmd,
+ jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
+ (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000,
+ scsi_cmd_to_rq(scmd)->tag);
+ }
+
+ if (leapraid_pci_removed(adapter) ||
+ adapter->access_ctrl.host_removing) {
+ dev_err(&adapter->pdev->dev,
+ "EH %s failed: %s scmd=0x%p\n", str,
+ (adapter->access_ctrl.host_removing ?
+ "shost removing!" : "pci_dev removed!"), scmd);
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK)
+ if (io_req_tracker && io_req_tracker->taskid)
+ leapraid_free_taskid(adapter,
+ io_req_tracker->taskid);
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+#ifdef FAST_IO_FAIL
+ rc = FAST_IO_FAIL;
+#else
+ rc = FAILED;
+#endif
+ goto out;
+ }
+
+ sdev_priv = scmd->device->hostdata;
+ if (!sdev_priv || !sdev_priv->starget_priv) {
+ dev_warn(&adapter->pdev->dev,
+ "EH %s: sdev or starget gone, scmd=0x%p\n",
+ str, scmd);
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+ scsi_done(scmd);
+ rc = SUCCESS;
+ goto out;
+ }
+
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK) {
+ if (!io_req_tracker) {
+ dev_warn(&adapter->pdev->dev,
+ "EH ABORT: no io tracker, scmd 0x%p\n", scmd);
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ rc = SUCCESS;
+ goto out;
+ }
+
+ if (sdev_priv->starget_priv->flg &
+ LEAPRAID_TGT_FLG_RAID_MEMBER ||
+ sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME) {
+ dev_err(&adapter->pdev->dev,
+ "EH ABORT: skip RAID/VOLUME target, scmd=0x%p\n",
+ scmd);
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ rc = FAILED;
+ goto out;
+ }
+
+ hdl = sdev_priv->starget_priv->hdl;
+ } else {
+ hdl = 0;
+ if (sdev_priv->starget_priv->flg &
+ LEAPRAID_TGT_FLG_RAID_MEMBER) {
+ sas_dev = leapraid_get_sas_dev_from_tgt(adapter,
+ starget_priv);
+ if (sas_dev)
+ hdl = sas_dev->volume_hdl;
+ } else {
+ hdl = sdev_priv->starget_priv->hdl;
+ }
+
+ if (!hdl) {
+ dev_err(&adapter->pdev->dev,
+ "EH %s failed: target handle is 0, scmd=0x%p\n",
+ str, scmd);
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ rc = FAILED;
+ goto out;
+ }
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "EH issue TM: type=%s, scmd=0x%p, hdl=0x%x\n",
+ str, scmd, hdl);
+
+ rc = leapraid_issue_locked_tm(adapter, hdl, scmd->device->channel,
+ scmd->device->id,
+ (type == LEAPRAID_TM_TASKTYPE_TARGET_RESET ?
+ 0 : scmd->device->lun),
+ type,
+ (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK ?
+ io_req_tracker->taskid : 0),
+ LEAPRAID_TM_MSGFLAGS_LINK_RESET);
+
+out:
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK) {
+ dev_info(&adapter->pdev->dev,
+ "EH ABORT result: %s, scmd=0x%p\n",
+ ((rc == SUCCESS) ? "success" : "failed"), scmd);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "EH %s result: %s, scmd=0x%p\n",
+ str, ((rc == SUCCESS) ? "success" : "failed"), scmd);
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ }
+ return rc;
+}
+
+static int leapraid_eh_abort_handler(struct scsi_cmnd *scmd)
+{
+ return leapraid_error_handler(scmd, "ABORT TASK",
+ LEAPRAID_TM_TASKTYPE_ABORT_TASK);
+}
+
+static int leapraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+ return leapraid_error_handler(scmd, "UNIT RESET",
+ LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET);
+}
+
+static int leapraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
+{
+ return leapraid_error_handler(scmd, "TARGET RESET",
+ LEAPRAID_TM_TASKTYPE_TARGET_RESET);
+}
+
+static int leapraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct leapraid_adapter *adapter = shost_priv(scmd->device->host);
+ int rc;
+
+ dev_info(&adapter->pdev->dev,
+ "EH HOST RESET enter: scmd=%p, req tag=%d\n",
+ scmd,
+ scsi_cmd_to_rq(scmd)->tag);
+ scsi_print_command(scmd);
+
+ if (adapter->scan_dev_desc.driver_loading ||
+ adapter->access_ctrl.host_removing) {
+ dev_err(&adapter->pdev->dev,
+ "EH HOST RESET failed: %s scmd=0x%p\n",
+ (adapter->access_ctrl.host_removing ?
+ "shost removing!" : "driver loading!"), scmd);
+ rc = FAILED;
+ goto out;
+ }
+
+ dev_info(&adapter->pdev->dev, "%s:%d issuing hard reset\n",
+ __func__, __LINE__);
+ if (leapraid_hard_reset_handler(adapter, FULL_RESET) < 0)
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+
+out:
+ dev_info(&adapter->pdev->dev, "EH HOST RESET result: %s, scmd=0x%p\n",
+ ((rc == SUCCESS) ? "success" : "failed"), scmd);
+ return rc;
+}
+
+static int leapraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct leapraid_raid_volume *raid_volume;
+ struct leapraid_starget_priv *stgt_priv;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_adapter *adapter;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_target *tgt;
+ struct Scsi_Host *shost;
+ unsigned long flags;
+
+ sdev_priv = kzalloc(sizeof(*sdev_priv), GFP_KERNEL);
+ if (!sdev_priv)
+ return -ENOMEM;
+
+ sdev_priv->lun = sdev->lun;
+ sdev_priv->flg = LEAPRAID_DEVICE_FLG_INIT;
+ tgt = scsi_target(sdev);
+ stgt_priv = tgt->hostdata;
+ stgt_priv->num_luns++;
+ sdev_priv->starget_priv = stgt_priv;
+ sdev->hostdata = sdev_priv;
+ if ((stgt_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER))
+ sdev->no_uld_attach = LEAPRAID_NO_ULD_ATTACH;
+
+ shost = dev_to_shost(&tgt->dev);
+ adapter = shost_priv(shost);
+ if (tgt->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_id(adapter,
+ tgt->id,
+ tgt->channel);
+ if (raid_volume)
+ raid_volume->sdev = sdev;
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ }
+
+ if (!(stgt_priv->flg & LEAPRAID_TGT_FLG_VOLUME)) {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter,
+ stgt_priv->sas_address,
+ stgt_priv->card_port);
+ if (sas_dev && !sas_dev->starget) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: assign starget to sas_dev\n", __func__);
+ sas_dev->starget = tgt;
+ }
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ }
+ return 0;
+}
+
+static int leapraid_slave_cfg_volume(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ struct leapraid_raid_volume *raid_volume;
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_sdev_priv *sdev_priv;
+ unsigned long flags;
+ int qd;
+ u16 hdl;
+
+ sdev_priv = sdev->hostdata;
+ starget_priv = sdev_priv->starget_priv;
+ hdl = starget_priv->hdl;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_hdl(adapter, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+ if (!raid_volume) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: raid_volume not found, hdl=0x%x\n",
+ __func__, hdl);
+ return 1;
+ }
+
+ if (leapraid_get_volume_cap(adapter, raid_volume)) {
+ sdev_printk(KERN_ERR, sdev,
+ "%s: failed to get volume cap, hdl=0x%x\n",
+ __func__, hdl);
+ return 1;
+ }
+
+ qd = (raid_volume->dev_info & LEAPRAID_DEVTYP_SSP_TGT) ?
+ LEAPRAID_SAS_QUEUE_DEPTH : LEAPRAID_SATA_QUEUE_DEPTH;
+ if (raid_volume->vol_type != LEAPRAID_VOL_TYPE_RAID0)
+ qd = LEAPRAID_RAID_QUEUE_DEPTH;
+
+ sdev_printk(KERN_INFO, sdev,
+ "raid volume: hdl=0x%04x, wwid=0x%016llx\n",
+ raid_volume->hdl, (unsigned long long)raid_volume->wwid);
+
+ if (shost->max_sectors > LEAPRAID_MAX_SECTORS)
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ LEAPRAID_MAX_SECTORS);
+
+ leapraid_adjust_sdev_queue_depth(sdev, qd);
+ return 0;
+}
+
+static int leapraid_slave_configure_extra(struct scsi_device *sdev,
+ struct leapraid_sas_dev **psas_dev,
+ u16 vol_hdl, u64 volume_wwid,
+ bool *is_target_ssp, int *qd)
+{
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct Scsi_Host *shost = sdev->host;
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ unsigned long flags;
+
+ sdev_priv = sdev->hostdata;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ *is_target_ssp = false;
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter,
+ sdev_priv->starget_priv->sas_address,
+ sdev_priv->starget_priv->card_port);
+ if (!sas_dev) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sas_dev not found, sas=0x%llx\n",
+ __func__, sdev_priv->starget_priv->sas_address);
+ return 1;
+ }
+
+ *psas_dev = sas_dev;
+ sas_dev->volume_hdl = vol_hdl;
+ sas_dev->volume_wwid = volume_wwid;
+ if (sas_dev->dev_info & LEAPRAID_DEVTYP_SSP_TGT) {
+ *qd = (sas_dev->port_type > 1) ?
+ adapter->adapter_attr.wideport_max_queue_depth :
+ adapter->adapter_attr.narrowport_max_queue_depth;
+ *is_target_ssp = true;
+ if (sas_dev->dev_info & LEAPRAID_DEVTYP_SEP)
+ sdev_priv->sep = true;
+ } else {
+ *qd = adapter->adapter_attr.sata_max_queue_depth;
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "sdev: dev name=0x%016llx, sas addr=0x%016llx\n",
+ (unsigned long long)sas_dev->dev_name,
+ (unsigned long long)sas_dev->sas_addr);
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return 0;
+}
+
+static int leapraid_slave_configure(struct scsi_device *sdev)
+{
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct Scsi_Host *shost = sdev->host;
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_adapter *adapter;
+ u16 hdl, vol_hdl = 0;
+ bool is_target_ssp = false;
+ u64 volume_wwid = 0;
+ int qd = 1;
+
+ adapter = shost_priv(shost);
+ sdev_priv = sdev->hostdata;
+ sdev_priv->flg &= ~LEAPRAID_DEVICE_FLG_INIT;
+ starget_priv = sdev_priv->starget_priv;
+ hdl = starget_priv->hdl;
+ if (starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME)
+ return leapraid_slave_cfg_volume(sdev);
+
+ if (starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER) {
+ if (leapraid_cfg_get_volume_hdl(adapter, hdl, &vol_hdl)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: get volume hdl failed, hdl=0x%x\n",
+ __func__, hdl);
+ return 1;
+ }
+
+ if (vol_hdl && leapraid_cfg_get_volume_wwid(adapter, vol_hdl,
+ &volume_wwid)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: get wwid failed, volume_hdl=0x%x\n",
+ __func__, vol_hdl);
+ return 1;
+ }
+ }
+
+ if (leapraid_slave_configure_extra(sdev, &sas_dev, vol_hdl,
+ volume_wwid, &is_target_ssp, &qd)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: slave_configure_extra failed\n", __func__);
+ return 1;
+ }
+
+ leapraid_adjust_sdev_queue_depth(sdev, qd);
+ if (is_target_ssp)
+ sas_read_port_mode_page(sdev);
+
+ return 0;
+}
+
+static void leapraid_slave_destroy(struct scsi_device *sdev)
+{
+ struct leapraid_adapter *adapter;
+ struct Scsi_Host *shost;
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_starget_priv *starget_priv;
+ struct scsi_target *stgt;
+ unsigned long flags;
+
+ if (!sdev->hostdata)
+ return;
+
+ stgt = scsi_target(sdev);
+ starget_priv = stgt->hostdata;
+ starget_priv->num_luns--;
+ shost = dev_to_shost(&stgt->dev);
+ adapter = shost_priv(shost);
+ if (!(starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME)) {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_from_tgt(adapter,
+ starget_priv);
+ if (sas_dev && !starget_priv->num_luns)
+ sas_dev->starget = NULL;
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ }
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+static int leapraid_target_alloc_raid(struct scsi_target *tgt)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_raid_volume *raid_volume;
+ struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ unsigned long flags;
+
+ starget_priv = (struct leapraid_starget_priv *)tgt->hostdata;
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_id(adapter, tgt->id,
+ tgt->channel);
+ if (raid_volume) {
+ starget_priv->hdl = raid_volume->hdl;
+ starget_priv->sas_address = raid_volume->wwid;
+ starget_priv->flg |= LEAPRAID_TGT_FLG_VOLUME;
+ raid_volume->starget = tgt;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+ return 0;
+}
+
+static int leapraid_target_alloc_sas(struct scsi_target *tgt)
+{
+ struct sas_rphy *rphy;
+ struct Scsi_Host *shost;
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_adapter *adapter;
+ struct leapraid_starget_priv *starget_priv;
+ unsigned long flags;
+
+ shost = dev_to_shost(&tgt->dev);
+ adapter = shost_priv(shost);
+ starget_priv = (struct leapraid_starget_priv *)tgt->hostdata;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ rphy = dev_to_rphy(tgt->dev.parent);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(adapter,
+ rphy->identify.sas_address,
+ rphy);
+ if (sas_dev) {
+ starget_priv->sas_dev = sas_dev;
+ starget_priv->card_port = sas_dev->card_port;
+ starget_priv->sas_address = sas_dev->sas_addr;
+ starget_priv->hdl = sas_dev->hdl;
+ sas_dev->channel = tgt->channel;
+ sas_dev->id = tgt->id;
+ sas_dev->starget = tgt;
+ if (test_bit(sas_dev->hdl,
+ (unsigned long *)adapter->dev_topo.pd_hdls))
+ starget_priv->flg |= LEAPRAID_TGT_FLG_RAID_MEMBER;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ return 0;
+}
+
+static int leapraid_target_alloc(struct scsi_target *tgt)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ starget_priv = kzalloc(sizeof(*starget_priv), GFP_KERNEL);
+ if (!starget_priv)
+ return -ENOMEM;
+
+ tgt->hostdata = starget_priv;
+ starget_priv->starget = tgt;
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ if (tgt->channel == RAID_CHANNEL)
+ return leapraid_target_alloc_raid(tgt);
+
+ return leapraid_target_alloc_sas(tgt);
+}
+
+static void leapraid_target_destroy_raid(struct scsi_target *tgt)
+{
+ struct leapraid_raid_volume *raid_volume;
+ struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_id(adapter, tgt->id,
+ tgt->channel);
+ if (raid_volume) {
+ raid_volume->starget = NULL;
+ raid_volume->sdev = NULL;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static void leapraid_target_destroy_sas(struct scsi_target *tgt)
+{
+ struct leapraid_adapter *adapter;
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_starget_priv *starget_priv;
+ struct Scsi_Host *shost;
+ unsigned long flags;
+
+ shost = dev_to_shost(&tgt->dev);
+ adapter = shost_priv(shost);
+ starget_priv = tgt->hostdata;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_from_tgt(adapter,
+ starget_priv);
+ if (sas_dev &&
+ sas_dev->starget == tgt &&
+ sas_dev->id == tgt->id &&
+ sas_dev->channel == tgt->channel)
+ sas_dev->starget = NULL;
+
+ if (sas_dev) {
+ starget_priv->sas_dev = NULL;
+ leapraid_sdev_put(sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_target_destroy(struct scsi_target *tgt)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ starget_priv = tgt->hostdata;
+ if (!starget_priv)
+ return;
+
+ if (tgt->channel == RAID_CHANNEL) {
+ leapraid_target_destroy_raid(tgt);
+ goto out;
+ }
+
+ leapraid_target_destroy_sas(tgt);
+
+out:
+ kfree(starget_priv);
+ tgt->hostdata = NULL;
+}
+
+static bool leapraid_scan_check_status(struct leapraid_adapter *adapter,
+ bool *need_hard_reset)
+{
+ u32 adapter_state;
+
+ if (adapter->scan_dev_desc.scan_start) {
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state == LEAPRAID_DB_FAULT) {
+ *need_hard_reset = true;
+ return true;
+ }
+ return false;
+ }
+
+ if (adapter->driver_cmds.scan_dev_cmd.status & LEAPRAID_CMD_RESET) {
+ dev_err(&adapter->pdev->dev,
+ "device scan: aborted due to reset\n");
+ adapter->driver_cmds.scan_dev_cmd.status =
+ LEAPRAID_CMD_NOT_USED;
+ adapter->scan_dev_desc.driver_loading = false;
+ return true;
+ }
+
+ if (adapter->scan_dev_desc.scan_start_failed) {
+ dev_err(&adapter->pdev->dev,
+ "device scan: failed with adapter_status=0x%08x\n",
+ adapter->scan_dev_desc.scan_start_failed);
+ adapter->scan_dev_desc.driver_loading = false;
+ adapter->scan_dev_desc.wait_scan_dev_done = false;
+ adapter->access_ctrl.host_removing = true;
+ return true;
+ }
+
+ dev_info(&adapter->pdev->dev, "device scan: SUCCESS\n");
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_NOT_USED;
+ leapraid_scan_dev_done(adapter);
+ return true;
+}
+
+static int leapraid_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ bool need_hard_reset = false;
+
+ if (time >= (LEAPRAID_SCAN_DEV_CMD_TIMEOUT * HZ)) {
+ adapter->driver_cmds.scan_dev_cmd.status =
+ LEAPRAID_CMD_NOT_USED;
+ dev_err(&adapter->pdev->dev,
+ "device scan: failed with timeout 300s\n");
+ adapter->scan_dev_desc.driver_loading = false;
+ return 1;
+ }
+
+ if (!leapraid_scan_check_status(adapter, &need_hard_reset))
+ return 0;
+
+ if (need_hard_reset) {
+ adapter->driver_cmds.scan_dev_cmd.status =
+ LEAPRAID_CMD_NOT_USED;
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ if (leapraid_hard_reset_handler(adapter, PART_RESET))
+ adapter->scan_dev_desc.driver_loading = false;
+ }
+
+ return 1;
+}
+
+static void leapraid_scan_start(struct Scsi_Host *shost)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+
+ adapter->scan_dev_desc.scan_start = true;
+ leapraid_scan_dev(adapter, true);
+}
+
+static int leapraid_calc_max_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost;
+ int max_depth;
+
+ shost = sdev->host;
+ max_depth = shost->can_queue;
+
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+
+ return qdepth;
+}
+
+static int leapraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ qdepth = leapraid_calc_max_queue_depth(sdev, qdepth);
+ scsi_change_queue_depth(sdev, qdepth);
+ return sdev->queue_depth;
+}
+
+void leapraid_adjust_sdev_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ leapraid_change_queue_depth(sdev, qdepth);
+}
+
+static void leapraid_map_queues(struct Scsi_Host *shost)
+{
+ struct leapraid_adapter *adapter;
+ struct blk_mq_queue_map *queue_map;
+ int msix_queue_count;
+ int poll_queue_count;
+ int queue_offset;
+ int map_index;
+
+ adapter = (struct leapraid_adapter *)shost->hostdata;
+ if (shost->nr_hw_queues == 1)
+ goto out;
+
+ msix_queue_count = adapter->notification_desc.iopoll_qdex;
+ poll_queue_count = adapter->adapter_attr.rq_cnt - msix_queue_count;
+
+ queue_offset = 0;
+ for (map_index = 0; map_index < shost->nr_maps; map_index++) {
+ queue_map = &shost->tag_set.map[map_index];
+ queue_map->nr_queues = 0;
+
+ switch (map_index) {
+ case HCTX_TYPE_DEFAULT:
+ queue_map->nr_queues = msix_queue_count;
+ queue_map->queue_offset = queue_offset;
+ BUG_ON(!queue_map->nr_queues);
+ blk_mq_pci_map_queues(queue_map, adapter->pdev, 0);
+ break;
+ case HCTX_TYPE_POLL:
+ queue_map->nr_queues = poll_queue_count;
+ queue_map->queue_offset = queue_offset;
+ blk_mq_map_queues(queue_map);
+ break;
+ default:
+ queue_map->queue_offset = queue_offset;
+ blk_mq_pci_map_queues(queue_map, adapter->pdev, 0);
+ break;
+ }
+ queue_offset += queue_map->nr_queues;
+ }
+
+out:
+ return;
+}
+
+int leapraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ struct leapraid_adapter *adapter =
+ (struct leapraid_adapter *)shost->hostdata;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ int num_entries;
+ int qid = queue_num - adapter->notification_desc.iopoll_qdex;
+
+ if (atomic_read(&adapter->notification_desc.blk_mq_poll_rqs[qid].pause) ||
+ !atomic_add_unless(&adapter->notification_desc.blk_mq_poll_rqs[qid].busy, 1, 1))
+ return 0;
+
+ blk_mq_poll_rq = &adapter->notification_desc.blk_mq_poll_rqs[qid];
+ num_entries = leapraid_rep_queue_handler(&blk_mq_poll_rq->rq);
+ atomic_dec(&adapter->notification_desc.blk_mq_poll_rqs[qid].busy);
+ return num_entries;
+}
+
+static int leapraid_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads = 0;
+ int sectors = 0;
+ sector_t cylinders;
+
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
+
+ if ((ulong)capacity >= LEAPRAID_LARGE_DISK_THRESHOLD) {
+ heads = LEAPRAID_LARGE_DISK_HEADS;
+ sectors = LEAPRAID_LARGE_DISK_SECTORS;
+ } else {
+ heads = LEAPRAID_SMALL_DISK_HEADS;
+ sectors = LEAPRAID_SMALL_DISK_SECTORS;
+ }
+
+ cylinders = capacity;
+ sector_div(cylinders, heads * sectors);
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+ return 0;
+}
+
+static ssize_t fw_queue_depth_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+
+ return sysfs_emit(buf, "%02d\n",
+ adapter->adapter_attr.features.req_slot);
+}
+
+static ssize_t host_sas_address_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+
+ return sysfs_emit(buf, "0x%016llx\n",
+ (unsigned long long)adapter->dev_topo.card.sas_address);
+}
+
+static DEVICE_ATTR_RO(fw_queue_depth);
+static DEVICE_ATTR_RO(host_sas_address);
+
+static struct attribute *leapraid_shost_attrs[] = {
+ &dev_attr_fw_queue_depth.attr,
+ &dev_attr_host_sas_address.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(leapraid_shost);
+
+static ssize_t sas_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+
+ return sysfs_emit(buf, "0x%016llx\n",
+ (unsigned long long)sas_device_priv_data->starget_priv->sas_address);
+}
+
+static ssize_t sas_device_handle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+
+ return sysfs_emit(buf, "0x%04x\n",
+ sas_device_priv_data->starget_priv->hdl);
+}
+
+static ssize_t sas_ncq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+
+ return sysfs_emit(buf, "%d\n", sas_device_priv_data->ncq);
+}
+
+static ssize_t sas_ncq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+ struct scsi_vpd *vpd_pg89;
+ int ncq_op = 0;
+ bool ncq_supported = false;
+
+ if (kstrtoint(buf, 0, &ncq_op))
+ goto out;
+
+ rcu_read_lock();
+ vpd_pg89 = rcu_dereference(sdev->vpd_pg89);
+ if (!vpd_pg89 || vpd_pg89->len < LEAPRAID_VPD_PG89_MIN_LEN) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ ncq_supported = (vpd_pg89->data[LEAPRAID_VPD_PG89_NCQ_BYTE_IDX] >>
+ LEAPRAID_VPD_PG89_NCQ_BIT_SHIFT) &
+ LEAPRAID_VPD_PG89_NCQ_BIT_MASK;
+ rcu_read_unlock();
+ if (ncq_supported)
+ sas_device_priv_data->ncq = ncq_op;
+ return strlen(buf);
+out:
+ return -EINVAL;
+}
+
+static DEVICE_ATTR_RO(sas_address);
+static DEVICE_ATTR_RO(sas_device_handle);
+
+static DEVICE_ATTR_RW(sas_ncq);
+
+static struct attribute *leapraid_sdev_attrs[] = {
+ &dev_attr_sas_address.attr,
+ &dev_attr_sas_device_handle.attr,
+ &dev_attr_sas_ncq.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(leapraid_sdev);
+
+static struct scsi_host_template leapraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "LEAPIO RAID Host",
+ .proc_name = LEAPRAID_DRIVER_NAME,
+ .queuecommand = leapraid_queuecommand,
+ .cmd_size = sizeof(struct leapraid_io_req_tracker),
+ .init_cmd_priv = leapraid_init_cmd_priv,
+ .exit_cmd_priv = leapraid_exit_cmd_priv,
+ .eh_abort_handler = leapraid_eh_abort_handler,
+ .eh_device_reset_handler = leapraid_eh_device_reset_handler,
+ .eh_target_reset_handler = leapraid_eh_target_reset_handler,
+ .eh_host_reset_handler = leapraid_eh_host_reset_handler,
+ .slave_alloc = leapraid_slave_alloc,
+ .slave_destroy = leapraid_slave_destroy,
+ .slave_configure = leapraid_slave_configure,
+ .target_alloc = leapraid_target_alloc,
+ .target_destroy = leapraid_target_destroy,
+ .scan_finished = leapraid_scan_finished,
+ .scan_start = leapraid_scan_start,
+ .change_queue_depth = leapraid_change_queue_depth,
+ .map_queues = leapraid_map_queues,
+ .mq_poll = leapraid_blk_mq_poll,
+ .bios_param = leapraid_bios_param,
+ .can_queue = LEAPRAID_CAN_QUEUE_MIN,
+ .this_id = LEAPRAID_THIS_ID_NONE,
+ .sg_tablesize = LEAPRAID_SG_DEPTH,
+ .max_sectors = LEAPRAID_DEF_MAX_SECTORS,
+ .max_segment_size = LEAPRAID_MAX_SEGMENT_SIZE,
+ .cmd_per_lun = LEAPRAID_CMD_PER_LUN,
+ .shost_groups = leapraid_shost_groups,
+ .sdev_groups = leapraid_sdev_groups,
+ .track_queue_depth = 1,
+};
+
+static void leapraid_lock_init(struct leapraid_adapter *adapter)
+{
+ mutex_init(&adapter->reset_desc.adapter_reset_mutex);
+ mutex_init(&adapter->reset_desc.host_diag_mutex);
+ mutex_init(&adapter->access_ctrl.pci_access_lock);
+
+ spin_lock_init(&adapter->reset_desc.adapter_reset_lock);
+ spin_lock_init(&adapter->dynamic_task_desc.task_lock);
+ spin_lock_init(&adapter->dev_topo.sas_dev_lock);
+ spin_lock_init(&adapter->dev_topo.topo_node_lock);
+ spin_lock_init(&adapter->fw_evt_s.fw_evt_lock);
+ spin_lock_init(&adapter->dev_topo.raid_volume_lock);
+}
+
+static void leapraid_list_init(struct leapraid_adapter *adapter)
+{
+ INIT_LIST_HEAD(&adapter->dev_topo.sas_dev_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.card_port_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.sas_dev_init_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.exp_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.enc_list);
+ INIT_LIST_HEAD(&adapter->fw_evt_s.fw_evt_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.raid_volume_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.card.sas_port_list);
+}
+
+static int leapraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct leapraid_adapter *adapter = NULL;
+ struct Scsi_Host *shost = NULL;
+ int iopoll_q_count = 0;
+ int rc;
+
+ shost = scsi_host_alloc(&leapraid_driver_template,
+ sizeof(struct leapraid_adapter));
+ if (!shost)
+ return -ENODEV;
+
+ adapter = shost_priv(shost);
+ memset(adapter, 0, sizeof(struct leapraid_adapter));
+ adapter->adapter_attr.id = leapraid_ids++;
+
+ adapter->adapter_attr.enable_mp = enable_mp;
+
+ adapter = shost_priv(shost);
+ INIT_LIST_HEAD(&adapter->list);
+ spin_lock(&leapraid_adapter_lock);
+ list_add_tail(&adapter->list, &leapraid_adapter_list);
+ spin_unlock(&leapraid_adapter_lock);
+
+ adapter->shost = shost;
+ adapter->pdev = pdev;
+ adapter->fw_log_desc.open_pcie_trace = open_pcie_trace;
+ leapraid_lock_init(adapter);
+ leapraid_list_init(adapter);
+ sprintf(adapter->adapter_attr.name, "%s%d",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+
+ shost->max_cmd_len = LEAPRAID_MAX_CDB_LEN;
+ shost->max_lun = LEAPRAID_MAX_LUNS;
+ shost->transportt = leapraid_transport_template;
+ shost->unique_id = adapter->adapter_attr.id;
+
+ snprintf(adapter->fw_evt_s.fw_evt_name,
+ sizeof(adapter->fw_evt_s.fw_evt_name),
+ "fw_event_%s%d", LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id);
+ adapter->fw_evt_s.fw_evt_thread =
+ alloc_ordered_workqueue(adapter->fw_evt_s.fw_evt_name, 0);
+ if (!adapter->fw_evt_s.fw_evt_thread) {
+ rc = -ENODEV;
+ goto evt_wq_fail;
+ }
+
+ shost->host_tagset = 1;
+ adapter->scan_dev_desc.driver_loading = true;
+ if ((leapraid_ctrl_init(adapter))) {
+ rc = -ENODEV;
+ goto ctrl_init_fail;
+ }
+
+ shost->nr_hw_queues = 1;
+ if (shost->host_tagset) {
+ shost->nr_hw_queues = adapter->adapter_attr.rq_cnt;
+ iopoll_q_count = adapter->adapter_attr.rq_cnt -
+ adapter->notification_desc.iopoll_qdex;
+ shost->nr_maps = iopoll_q_count ? 3 : 1;
+ dev_info(&adapter->pdev->dev,
+ "max scsi io cmds %d shared with nr_hw_queues=%d\n",
+ shost->can_queue, shost->nr_hw_queues);
+ }
+
+ rc = scsi_add_host(shost, &pdev->dev);
+ if (rc) {
+ spin_lock(&leapraid_adapter_lock);
+ list_del(&adapter->list);
+ spin_unlock(&leapraid_adapter_lock);
+ goto scsi_add_shost_fail;
+ }
+
+ scsi_scan_host(shost);
+ return 0;
+
+scsi_add_shost_fail:
+ leapraid_remove_ctrl(adapter);
+ctrl_init_fail:
+ destroy_workqueue(adapter->fw_evt_s.fw_evt_thread);
+evt_wq_fail:
+ spin_lock(&leapraid_adapter_lock);
+ list_del(&adapter->list);
+ spin_unlock(&leapraid_adapter_lock);
+ scsi_host_put(shost);
+ return rc;
+}
+
+static void leapraid_cleanup_lists(struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_volume *raid_volume, *next_raid_volume;
+ struct leapraid_starget_priv *starget_priv_data;
+ struct leapraid_sas_port *leapraid_port, *next_port;
+ struct leapraid_card_port *port, *port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+
+ list_for_each_entry_safe(raid_volume, next_raid_volume,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (raid_volume->starget) {
+ starget_priv_data = raid_volume->starget->hostdata;
+ starget_priv_data->deleted = true;
+ scsi_remove_target(&raid_volume->starget->dev);
+ }
+ pr_info("removing hdl=0x%04x, wwid=0x%016llx\n",
+ raid_volume->hdl,
+ (unsigned long long)raid_volume->wwid);
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ }
+
+ list_for_each_entry_safe(leapraid_port, next_port,
+ &adapter->dev_topo.card.sas_port_list,
+ port_list) {
+ if (leapraid_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ leapraid_sas_dev_remove_by_sas_address(adapter,
+ leapraid_port->remote_identify.sas_address,
+ leapraid_port->card_port);
+ else if (leapraid_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ leapraid_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ leapraid_exp_rm(adapter,
+ leapraid_port->remote_identify.sas_address,
+ leapraid_port->card_port);
+ }
+
+ list_for_each_entry_safe(port, port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (port->vphys_mask)
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+ list_del(&port->list);
+ kfree(port);
+ }
+
+ if (adapter->dev_topo.card.phys_num) {
+ kfree(adapter->dev_topo.card.card_phy);
+ adapter->dev_topo.card.card_phy = NULL;
+ adapter->dev_topo.card.phys_num = 0;
+ }
+}
+
+static void leapraid_remove(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "unable to remove!\n");
+ return;
+ }
+
+ while (adapter->scan_dev_desc.driver_loading)
+ ssleep(1);
+
+ while (adapter->access_ctrl.shost_recovering)
+ ssleep(1);
+
+ adapter->access_ctrl.host_removing = true;
+
+ leapraid_wait_cmds_done(adapter);
+
+ leapraid_smart_polling_stop(adapter);
+ leapraid_free_internal_scsi_cmd(adapter);
+
+ if (leapraid_pci_removed(adapter)) {
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+ }
+ leapraid_clean_active_fw_evt(adapter);
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ wq = adapter->fw_evt_s.fw_evt_thread;
+ adapter->fw_evt_s.fw_evt_thread = NULL;
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ leapraid_ir_shutdown(adapter);
+ sas_remove_host(shost);
+ leapraid_cleanup_lists(adapter);
+ leapraid_remove_ctrl(adapter);
+ spin_lock(&leapraid_adapter_lock);
+ list_del(&adapter->list);
+ spin_unlock(&leapraid_adapter_lock);
+ scsi_host_put(shost);
+}
+
+static void leapraid_shutdown(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "unable to shutdown!\n");
+ return;
+ }
+
+ adapter->access_ctrl.host_removing = true;
+ leapraid_wait_cmds_done(adapter);
+ leapraid_clean_active_fw_evt(adapter);
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ wq = adapter->fw_evt_s.fw_evt_thread;
+ adapter->fw_evt_s.fw_evt_thread = NULL;
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ leapraid_ir_shutdown(adapter);
+ leapraid_disable_controller(adapter);
+}
+
+static pci_ers_result_t leapraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "failed to error detected for device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pr_err("%s: pci error detected, state=%d\n",
+ adapter->adapter_attr.name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ adapter->access_ctrl.pcie_recovering = true;
+ scsi_block_requests(adapter->shost);
+ leapraid_smart_polling_stop(adapter);
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_disable_controller(adapter);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ adapter->access_ctrl.pcie_recovering = true;
+ leapraid_smart_polling_stop(adapter);
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t leapraid_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "failed to enable mmio for device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ dev_info(&pdev->dev, "%s: pci error mmio enabled\n",
+ adapter->adapter_attr.name);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t leapraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ int rc;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "failed to slot reset for device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ dev_err(&pdev->dev, "%s pci error slot reset\n",
+ adapter->adapter_attr.name);
+
+ adapter->access_ctrl.pcie_recovering = false;
+ adapter->pdev = pdev;
+ pci_restore_state(pdev);
+ if (leapraid_set_pcie_and_notification(adapter))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ dev_info(&pdev->dev, "%s: hard reset triggered by pci slot reset\n",
+ adapter->adapter_attr.name);
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ dev_info(&pdev->dev, "%s hard reset: %s\n",
+ adapter->adapter_attr.name, (rc == 0) ? "success" : "failed");
+
+ return (rc == 0) ? PCI_ERS_RESULT_RECOVERED :
+ PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void leapraid_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "failed to resume\n");
+ return;
+ }
+
+ dev_err(&pdev->dev, "PCI error resume!\n");
+ pci_aer_clear_nonfatal_status(pdev);
+ leapraid_check_scheduled_fault_start(adapter);
+ leapraid_fw_log_start(adapter);
+ scsi_unblock_requests(adapter->shost);
+ leapraid_smart_polling_start(adapter);
+}
+
+MODULE_DEVICE_TABLE(pci, leapraid_pci_table);
+static struct pci_error_handlers leapraid_err_handler = {
+ .error_detected = leapraid_pci_error_detected,
+ .mmio_enabled = leapraid_pci_mmio_enabled,
+ .slot_reset = leapraid_pci_slot_reset,
+ .resume = leapraid_pci_resume,
+};
+
+#ifdef CONFIG_PM
+static int leapraid_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ pci_power_t device_state;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "suspend failed, invalid host or adapter\n");
+ return -ENXIO;
+ }
+
+ leapraid_smart_polling_stop(adapter);
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ scsi_block_requests(shost);
+ device_state = pci_choose_state(pdev, state);
+ leapraid_ir_shutdown(adapter);
+
+ dev_info(&pdev->dev, "entering PCI power state D%d, (slot=%s)\n",
+ device_state, pci_name(pdev));
+
+ pci_save_state(pdev);
+ leapraid_disable_controller(adapter);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+static int leapraid_resume(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ pci_power_t device_state = pdev->current_state;
+ int rc;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "resume failed, invalid host or adapter\n");
+ return -ENXIO;
+ }
+
+ dev_info(&pdev->dev,
+ "resuming device %s, previous state D%d\n",
+ pci_name(pdev), device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ adapter->pdev = pdev;
+ rc = leapraid_set_pcie_and_notification(adapter);
+ if (rc)
+ return rc;
+
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, PART_RESET);
+ scsi_unblock_requests(shost);
+ leapraid_check_scheduled_fault_start(adapter);
+ leapraid_fw_log_start(adapter);
+ leapraid_smart_polling_start(adapter);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver leapraid_driver = {
+ .name = LEAPRAID_DRIVER_NAME,
+ .id_table = leapraid_pci_table,
+ .probe = leapraid_probe,
+ .remove = leapraid_remove,
+ .shutdown = leapraid_shutdown,
+ .err_handler = &leapraid_err_handler,
+#ifdef CONFIG_PM
+ .suspend = leapraid_suspend,
+ .resume = leapraid_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init leapraid_init(void)
+{
+ int error;
+
+ pr_info("%s version %s loaded\n", LEAPRAID_DRIVER_NAME,
+ LEAPRAID_DRIVER_VERSION);
+
+ leapraid_transport_template =
+ sas_attach_transport(&leapraid_transport_functions);
+ if (!leapraid_transport_template)
+ return -ENODEV;
+
+ leapraid_ids = 0;
+
+ leapraid_ctl_init();
+
+ error = pci_register_driver(&leapraid_driver);
+ if (error)
+ sas_release_transport(leapraid_transport_template);
+
+ return error;
+}
+
+static void __exit leapraid_exit(void)
+{
+ pr_info("leapraid version %s unloading\n",
+ LEAPRAID_DRIVER_VERSION);
+
+ leapraid_ctl_exit();
+ pci_unregister_driver(&leapraid_driver);
+ sas_release_transport(leapraid_transport_template);
+}
+
+module_init(leapraid_init);
+module_exit(leapraid_exit);
diff --git a/drivers/scsi/leapraid/leapraid_transport.c b/drivers/scsi/leapraid/leapraid_transport.c
new file mode 100644
index 000000000000..d224449732a3
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_transport.c
@@ -0,0 +1,1256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <scsi/scsi_host.h>
+
+#include "leapraid_func.h"
+
+static struct leapraid_topo_node *leapraid_transport_topo_node_by_sas_addr(
+ struct leapraid_adapter *adapter,
+ u64 sas_addr,
+ struct leapraid_card_port *card_port)
+{
+ if (adapter->dev_topo.card.sas_address == sas_addr)
+ return &adapter->dev_topo.card;
+ else
+ return leapraid_exp_find_by_sas_address(adapter,
+ sas_addr,
+ card_port);
+}
+
+static u8 leapraid_get_port_id_by_expander(struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_for_each_entry(topo_node_exp, &adapter->dev_topo.exp_list, list) {
+ if (topo_node_exp->rphy == rphy) {
+ port_id = topo_node_exp->card_port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ return port_id;
+}
+
+static u8 leapraid_get_port_id_by_end_dev(struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(adapter,
+ rphy->identify.sas_address,
+ rphy);
+ if (sas_dev) {
+ port_id = sas_dev->card_port->port_id;
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ return port_id;
+}
+
+static u8 leapraid_transport_get_port_id_by_rphy(
+ struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy)
+{
+ if (!rphy)
+ return 0xFF;
+
+ switch (rphy->identify.device_type) {
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ return leapraid_get_port_id_by_expander(adapter, rphy);
+ case SAS_END_DEVICE:
+ return leapraid_get_port_id_by_end_dev(adapter, rphy);
+ default:
+ return 0xFF;
+ }
+}
+
+static enum sas_linkrate leapraid_transport_convert_phy_link_rate(u8 link_rate)
+{
+ unsigned int i;
+
+ #define SAS_RATE_12G SAS_LINK_RATE_12_0_GBPS
+
+ const struct linkrate_map {
+ u8 in;
+ enum sas_linkrate out;
+ } linkrate_table[] = {
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ SAS_LINK_RATE_1_5_GBPS
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_3_0,
+ SAS_LINK_RATE_3_0_GBPS
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_6_0,
+ SAS_LINK_RATE_6_0_GBPS
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_12_0,
+ SAS_RATE_12G
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_PHY_DISABLED,
+ SAS_PHY_DISABLED
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED,
+ SAS_LINK_RATE_FAILED
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_PORT_SELECTOR,
+ SAS_SATA_PORT_SELECTOR
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_SMP_RESETTING,
+ SAS_LINK_RATE_UNKNOWN
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE,
+ SAS_LINK_RATE_UNKNOWN
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE,
+ SAS_LINK_RATE_UNKNOWN
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(linkrate_table); i++) {
+ if (linkrate_table[i].in == link_rate)
+ return linkrate_table[i].out;
+ }
+
+ return SAS_LINK_RATE_UNKNOWN;
+}
+
+static void leapraid_set_identify_protocol_flags(u32 dev_info,
+ struct sas_identify *identify)
+{
+ unsigned int i;
+
+ const struct protocol_mapping {
+ u32 mask;
+ u32 *target;
+ u32 protocol;
+ } mappings[] = {
+ {
+ LEAPRAID_DEVTYP_SSP_INIT,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_SSP
+ },
+ {
+ LEAPRAID_DEVTYP_STP_INIT,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_STP
+ },
+ {
+ LEAPRAID_DEVTYP_SMP_INIT,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_SMP
+ },
+ {
+ LEAPRAID_DEVTYP_SATA_HOST,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_SATA
+ },
+ {
+ LEAPRAID_DEVTYP_SSP_TGT,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_SSP
+ },
+ {
+ LEAPRAID_DEVTYP_STP_TGT,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_STP
+ },
+ {
+ LEAPRAID_DEVTYP_SMP_TGT,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_SMP
+ },
+ {
+ LEAPRAID_DEVTYP_SATA_DEV,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_SATA
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(mappings); i++)
+ if ((dev_info & mappings[i].mask) && mappings[i].target)
+ *mappings[i].target |= mappings[i].protocol;
+}
+
+static int leapraid_transport_set_identify(struct leapraid_adapter *adapter,
+ u16 hdl,
+ struct sas_identify *identify)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_pg0;
+ u32 dev_info;
+
+ if ((adapter->access_ctrl.shost_recovering &&
+ !adapter->scan_dev_desc.driver_loading) ||
+ adapter->access_ctrl.pcie_recovering)
+ return -EFAULT;
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_pg0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0)))
+ return -ENXIO;
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ dev_info = le32_to_cpu(sas_dev_pg0.dev_info);
+ identify->sas_address = le64_to_cpu(sas_dev_pg0.sas_address);
+ identify->phy_identifier = sas_dev_pg0.phy_num;
+
+ switch (dev_info & LEAPRAID_DEVTYP_MASK_DEV_TYPE) {
+ case LEAPRAID_DEVTYP_NO_DEV:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case LEAPRAID_DEVTYP_END_DEV:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case LEAPRAID_DEVTYP_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case LEAPRAID_DEVTYP_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+
+ leapraid_set_identify_protocol_flags(dev_info, identify);
+
+ return 0;
+}
+
+static void leapraid_transport_exp_set_edev(struct leapraid_adapter *adapter,
+ void *data_out,
+ struct sas_expander_device *edev)
+{
+ struct leapraid_smp_passthrough_rep *smp_passthrough_rep;
+ struct leapraid_rep_manu_reply *rep_manu_reply;
+ u8 *component_id;
+ ssize_t __maybe_unused ret;
+
+ smp_passthrough_rep =
+ (void *)(&adapter->driver_cmds.transport_cmd.reply);
+ if (le16_to_cpu(smp_passthrough_rep->resp_data_len) !=
+ sizeof(struct leapraid_rep_manu_reply))
+ return;
+
+ rep_manu_reply = data_out + sizeof(struct leapraid_rep_manu_request);
+ ret = strscpy(edev->vendor_id, rep_manu_reply->vendor_identification,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ ret = strscpy(edev->product_id, rep_manu_reply->product_identification,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ ret = strscpy(edev->product_rev,
+ rep_manu_reply->product_revision_level,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = rep_manu_reply->sas_format & 1;
+ if (edev->level) {
+ ret = strscpy(edev->component_vendor_id,
+ rep_manu_reply->component_vendor_identification,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+
+ component_id = (u8 *)&rep_manu_reply->component_id;
+ edev->component_id = component_id[0] << 8 | component_id[1];
+ edev->component_revision_id =
+ rep_manu_reply->component_revision_level;
+ }
+}
+
+static int leapraid_transport_exp_report_manu(struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct sas_expander_device *edev,
+ u8 port_id)
+{
+ struct leapraid_smp_passthrough_req *smp_passthrough_req;
+ struct leapraid_rep_manu_request *rep_manu_request;
+ dma_addr_t h2c_dma_addr;
+ dma_addr_t c2h_dma_addr;
+ bool issue_reset = false;
+ void *data_out = NULL;
+ size_t c2h_size;
+ size_t h2c_size;
+ void *psge;
+ int rc = 0;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering) {
+ return -EFAULT;
+ }
+
+ mutex_lock(&adapter->driver_cmds.transport_cmd.mutex);
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_PENDING;
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ h2c_size = sizeof(struct leapraid_rep_manu_request);
+ c2h_size = sizeof(struct leapraid_rep_manu_reply);
+ data_out = dma_alloc_coherent(&adapter->pdev->dev,
+ h2c_size + c2h_size,
+ &h2c_dma_addr,
+ GFP_ATOMIC);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rep_manu_request = data_out;
+ rep_manu_request->smp_frame_type =
+ SMP_REPORT_MANUFACTURER_INFORMATION_FRAME_TYPE;
+ rep_manu_request->function = SMP_REPORT_MANUFACTURER_INFORMATION_FUNC;
+ rep_manu_request->allocated_response_length = 0;
+ rep_manu_request->request_length = 0;
+
+ smp_passthrough_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ memset(smp_passthrough_req, 0,
+ sizeof(struct leapraid_smp_passthrough_req));
+ smp_passthrough_req->func = LEAPRAID_FUNC_SMP_PASSTHROUGH;
+ smp_passthrough_req->physical_port = port_id;
+ smp_passthrough_req->sas_address = cpu_to_le64(sas_address);
+ smp_passthrough_req->req_data_len = cpu_to_le16(h2c_size);
+ psge = &smp_passthrough_req->sgl;
+ c2h_dma_addr = h2c_dma_addr + sizeof(struct leapraid_rep_manu_request);
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size);
+
+ init_completion(&adapter->driver_cmds.transport_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.transport_cmd.done,
+ LEAPRAID_TRANSPORT_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.transport_cmd.status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: smp passthrough to exp timeout\n",
+ __func__);
+ if (!(adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_RESET))
+ issue_reset = true;
+
+ goto hard_reset;
+ }
+
+ if (adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_REPLY_VALID)
+ leapraid_transport_exp_set_edev(adapter, data_out, edev);
+
+hard_reset:
+ if (issue_reset) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+out:
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_NOT_USED;
+ if (data_out)
+ dma_free_coherent(&adapter->pdev->dev, h2c_size + c2h_size,
+ data_out, h2c_dma_addr);
+
+ mutex_unlock(&adapter->driver_cmds.transport_cmd.mutex);
+ return rc;
+}
+
+static void leapraid_transport_del_port(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port)
+{
+ dev_info(&sas_port->port->dev,
+ "remove port: sas addr=0x%016llx\n",
+ (unsigned long long)sas_port->remote_identify.sas_address);
+ switch (sas_port->remote_identify.device_type) {
+ case SAS_END_DEVICE:
+ leapraid_sas_dev_remove_by_sas_address(adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ leapraid_exp_rm(adapter, sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_transport_del_phy(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_card_phy *card_phy)
+{
+ dev_info(&card_phy->phy->dev,
+ "remove phy: sas addr=0x%016llx, phy=%d\n",
+ (unsigned long long)sas_port->remote_identify.sas_address,
+ card_phy->phy_id);
+ list_del(&card_phy->port_siblings);
+ sas_port->phys_num--;
+ sas_port_delete_phy(sas_port->port, card_phy->phy);
+ card_phy->phy_is_assigned = false;
+}
+
+static void leapraid_transport_add_phy(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_card_phy *card_phy)
+{
+ dev_info(&card_phy->phy->dev,
+ "add phy: sas addr=0x%016llx, phy=%d\n",
+ (unsigned long long)sas_port->remote_identify.sas_address,
+ card_phy->phy_id);
+ list_add_tail(&card_phy->port_siblings, &sas_port->phy_list);
+ sas_port->phys_num++;
+ sas_port_add_phy(sas_port->port, card_phy->phy);
+ card_phy->phy_is_assigned = true;
+}
+
+void leapraid_transport_attach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_phy *card_phy,
+ u64 sas_address,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_sas_port *sas_port;
+ struct leapraid_card_phy *card_phy_srch;
+
+ if (card_phy->phy_is_assigned)
+ return;
+
+ if (!card_port)
+ return;
+
+ list_for_each_entry(sas_port, &topo_node->sas_port_list, port_list) {
+ if (sas_port->remote_identify.sas_address != sas_address)
+ continue;
+
+ if (sas_port->card_port != card_port)
+ continue;
+
+ list_for_each_entry(card_phy_srch, &sas_port->phy_list,
+ port_siblings) {
+ if (card_phy_srch == card_phy)
+ return;
+ }
+ leapraid_transport_add_phy(adapter, sas_port, card_phy);
+ return;
+ }
+}
+
+void leapraid_transport_detach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_phy *target_card_phy)
+{
+ struct leapraid_sas_port *sas_port, *sas_port_next;
+ struct leapraid_card_phy *cur_card_phy;
+
+ if (!target_card_phy->phy_is_assigned)
+ return;
+
+ list_for_each_entry_safe(sas_port, sas_port_next,
+ &topo_node->sas_port_list, port_list) {
+ list_for_each_entry(cur_card_phy, &sas_port->phy_list,
+ port_siblings) {
+ if (cur_card_phy != target_card_phy)
+ continue;
+
+ if (sas_port->phys_num == 1 &&
+ !adapter->access_ctrl.shost_recovering)
+ leapraid_transport_del_port(adapter, sas_port);
+ else
+ leapraid_transport_del_phy(adapter, sas_port,
+ target_card_phy);
+ return;
+ }
+ }
+}
+
+static void leapraid_detach_phy_from_old_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ u64 sas_address,
+ struct leapraid_card_port *card_port)
+{
+ int i;
+
+ for (i = 0; i < topo_node->phys_num; i++) {
+ if (topo_node->card_phy[i].remote_identify.sas_address !=
+ sas_address ||
+ topo_node->card_phy[i].card_port != card_port)
+ continue;
+ if (topo_node->card_phy[i].phy_is_assigned)
+ leapraid_transport_detach_phy_to_port(adapter,
+ topo_node,
+ &topo_node->card_phy[i]);
+ }
+}
+
+static struct leapraid_sas_port *leapraid_prepare_sas_port(
+ struct leapraid_adapter *adapter,
+ u16 handle, u64 sas_address,
+ struct leapraid_card_port *card_port,
+ struct leapraid_topo_node **out_topo_node)
+{
+ struct leapraid_topo_node *topo_node;
+ struct leapraid_sas_port *sas_port;
+ unsigned long flags;
+
+ sas_port = kzalloc(sizeof(*sas_port), GFP_KERNEL);
+ if (!sas_port)
+ return NULL;
+
+ INIT_LIST_HEAD(&sas_port->port_list);
+ INIT_LIST_HEAD(&sas_port->phy_list);
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node = leapraid_transport_topo_node_by_sas_addr(adapter,
+ sas_address,
+ card_port);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (!topo_node) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to find parent node for sas addr 0x%016llx!\n",
+ __func__, sas_address);
+ kfree(sas_port);
+ return NULL;
+ }
+
+ if (leapraid_transport_set_identify(adapter, handle,
+ &sas_port->remote_identify)) {
+ kfree(sas_port);
+ return NULL;
+ }
+
+ if (sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ kfree(sas_port);
+ return NULL;
+ }
+
+ sas_port->card_port = card_port;
+ *out_topo_node = topo_node;
+
+ return sas_port;
+}
+
+static int leapraid_bind_phys_and_vphy(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_port *card_port,
+ struct leapraid_vphy **out_vphy)
+{
+ struct leapraid_vphy *vphy = NULL;
+ int i;
+
+ for (i = 0; i < topo_node->phys_num; i++) {
+ if (topo_node->card_phy[i].remote_identify.sas_address !=
+ sas_port->remote_identify.sas_address ||
+ topo_node->card_phy[i].card_port != card_port)
+ continue;
+
+ list_add_tail(&topo_node->card_phy[i].port_siblings,
+ &sas_port->phy_list);
+ sas_port->phys_num++;
+
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num) {
+ if (!topo_node->card_phy[i].vphy) {
+ card_port->phy_mask |= BIT(i);
+ continue;
+ }
+
+ vphy = leapraid_get_vphy_by_phy(card_port, i);
+ if (!vphy)
+ return -1;
+ }
+ }
+
+ *out_vphy = vphy;
+ return sas_port->phys_num ? 0 : -1;
+}
+
+static struct sas_rphy *leapraid_create_and_register_rphy(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_port *card_port,
+ struct leapraid_vphy *vphy)
+{
+ struct leapraid_sas_dev *sas_dev = NULL;
+ struct leapraid_card_phy *card_phy;
+ struct sas_port *port;
+ struct sas_rphy *rphy;
+
+ if (!topo_node->parent_dev)
+ return NULL;
+
+ port = sas_port_alloc_num(topo_node->parent_dev);
+ if (sas_port_add(port))
+ return NULL;
+
+ list_for_each_entry(card_phy, &sas_port->phy_list, port_siblings) {
+ sas_port_add_phy(port, card_phy->phy);
+ card_phy->phy_is_assigned = true;
+ card_phy->card_port = card_port;
+ }
+
+ if (sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter,
+ sas_port->remote_identify.sas_address,
+ card_port);
+ if (!sas_dev)
+ return NULL;
+ sas_dev->pend_sas_rphy_add = 1;
+ rphy = sas_end_device_alloc(port);
+ sas_dev->rphy = rphy;
+
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num) {
+ if (!vphy)
+ card_port->sas_address = sas_dev->sas_addr;
+ else
+ vphy->sas_address = sas_dev->sas_addr;
+ }
+
+ } else {
+ rphy = sas_expander_alloc(port,
+ sas_port->remote_identify.device_type);
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num)
+ card_port->sas_address =
+ sas_port->remote_identify.sas_address;
+ }
+
+ rphy->identify = sas_port->remote_identify;
+
+ if (sas_rphy_add(rphy))
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to add rphy\n", __func__);
+
+ if (sas_dev) {
+ sas_dev->pend_sas_rphy_add = 0;
+ leapraid_sdev_put(sas_dev);
+ }
+
+ sas_port->port = port;
+ return rphy;
+}
+
+struct leapraid_sas_port *leapraid_transport_port_add(
+ struct leapraid_adapter *adapter,
+ u16 hdl, u64 sas_address,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_card_phy *card_phy, *card_phy_next;
+ struct leapraid_topo_node *topo_node = NULL;
+ struct leapraid_sas_port *sas_port = NULL;
+ struct leapraid_vphy *vphy = NULL;
+ struct sas_rphy *rphy = NULL;
+ unsigned long flags;
+
+ if (!card_port)
+ return NULL;
+
+ sas_port = leapraid_prepare_sas_port(adapter, hdl, sas_address,
+ card_port, &topo_node);
+ if (!sas_port)
+ return NULL;
+
+ leapraid_detach_phy_from_old_port(adapter,
+ topo_node,
+ sas_port->remote_identify.sas_address,
+ card_port);
+
+ if (leapraid_bind_phys_and_vphy(adapter, sas_port, topo_node,
+ card_port, &vphy))
+ goto out_fail;
+
+ rphy = leapraid_create_and_register_rphy(adapter, sas_port, topo_node,
+ card_port, vphy);
+ if (!rphy)
+ goto out_fail;
+
+ dev_info(&rphy->dev,
+ "%s: added dev: hdl=0x%04x, sas addr=0x%016llx\n",
+ __func__, hdl,
+ (unsigned long long)sas_port->remote_identify.sas_address);
+
+ sas_port->rphy = rphy;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_add_tail(&sas_port->port_list, &topo_node->sas_port_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (sas_port->remote_identify.device_type ==
+ LEAPRAID_DEVTYP_EDGE_EXPANDER ||
+ sas_port->remote_identify.device_type ==
+ LEAPRAID_DEVTYP_FANOUT_EXPANDER)
+ leapraid_transport_exp_report_manu(adapter,
+ sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy),
+ card_port->port_id);
+
+ return sas_port;
+
+out_fail:
+ list_for_each_entry_safe(card_phy, card_phy_next,
+ &sas_port->phy_list, port_siblings)
+ list_del(&card_phy->port_siblings);
+ kfree(sas_port);
+ return NULL;
+}
+
+static struct leapraid_sas_port *leapraid_find_and_remove_sas_port(
+ struct leapraid_topo_node *topo_node,
+ u64 sas_address,
+ struct leapraid_card_port *remove_card_port,
+ bool *found)
+{
+ struct leapraid_sas_port *sas_port, *sas_port_next;
+
+ list_for_each_entry_safe(sas_port, sas_port_next,
+ &topo_node->sas_port_list, port_list) {
+ if (sas_port->remote_identify.sas_address != sas_address)
+ continue;
+
+ if (sas_port->card_port != remove_card_port)
+ continue;
+
+ *found = true;
+ list_del(&sas_port->port_list);
+ return sas_port;
+ }
+ return NULL;
+}
+
+static void leapraid_cleanup_card_port_and_vphys(
+ struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct leapraid_card_port *remove_card_port)
+{
+ struct leapraid_card_port *card_port, *card_port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+
+ if (remove_card_port->vphys_mask) {
+ list_for_each_entry_safe(vphy, vphy_next,
+ &remove_card_port->vphys_list, list) {
+ if (vphy->sas_address != sas_address)
+ continue;
+
+ dev_info(&adapter->pdev->dev,
+ "%s: remove vphy: %p from port: %p, port_id=%d\n",
+ __func__, vphy, remove_card_port,
+ remove_card_port->port_id);
+
+ remove_card_port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+
+ if (!remove_card_port->vphys_mask &&
+ !remove_card_port->sas_address) {
+ dev_info(&adapter->pdev->dev,
+ "%s: remove empty hba_port: %p, port_id=%d\n",
+ __func__,
+ remove_card_port,
+ remove_card_port->port_id);
+ list_del(&remove_card_port->list);
+ kfree(remove_card_port);
+ remove_card_port = NULL;
+ }
+ }
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (card_port != remove_card_port)
+ continue;
+
+ if (card_port->sas_address != sas_address)
+ continue;
+
+ if (!remove_card_port->vphys_mask) {
+ dev_info(&adapter->pdev->dev,
+ "%s: remove hba_port: %p, port_id=%d\n",
+ __func__, card_port, card_port->port_id);
+ list_del(&card_port->list);
+ kfree(card_port);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: clear sas_address of hba_port: %p, port_id=%d\n",
+ __func__, card_port, card_port->port_id);
+ remove_card_port->sas_address = 0;
+ }
+ break;
+ }
+}
+
+static void leapraid_clear_topo_node_phys(struct leapraid_topo_node *topo_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < topo_node->phys_num; i++) {
+ if (topo_node->card_phy[i].remote_identify.sas_address ==
+ sas_address) {
+ memset(&topo_node->card_phy[i].remote_identify, 0,
+ sizeof(struct sas_identify));
+ topo_node->card_phy[i].vphy = false;
+ }
+ }
+}
+
+void leapraid_transport_port_remove(struct leapraid_adapter *adapter,
+ u64 sas_address, u64 sas_address_parent,
+ struct leapraid_card_port *remove_card_port)
+{
+ struct leapraid_card_phy *card_phy, *card_phy_next;
+ struct leapraid_sas_port *sas_port = NULL;
+ struct leapraid_topo_node *topo_node;
+ unsigned long flags;
+ bool found = false;
+
+ if (!remove_card_port)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+
+ topo_node = leapraid_transport_topo_node_by_sas_addr(adapter,
+ sas_address_parent,
+ remove_card_port);
+ if (!topo_node) {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return;
+ }
+
+ sas_port = leapraid_find_and_remove_sas_port(topo_node, sas_address,
+ remove_card_port, &found);
+
+ if (!found) {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return;
+ }
+
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num &&
+ adapter->adapter_attr.enable_mp)
+ leapraid_cleanup_card_port_and_vphys(adapter, sas_address,
+ remove_card_port);
+
+ leapraid_clear_topo_node_phys(topo_node, sas_address);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ list_for_each_entry_safe(card_phy, card_phy_next,
+ &sas_port->phy_list, port_siblings) {
+ card_phy->phy_is_assigned = false;
+ if (!adapter->access_ctrl.host_removing)
+ sas_port_delete_phy(sas_port->port, card_phy->phy);
+
+ list_del(&card_phy->port_siblings);
+ }
+
+ if (!adapter->access_ctrl.host_removing)
+ sas_port_delete(sas_port->port);
+
+ dev_info(&adapter->pdev->dev,
+ "%s: removed sas_port for sas addr=0x%016llx\n",
+ __func__, (unsigned long long)sas_address);
+
+ kfree(sas_port);
+}
+
+static void leapraid_init_sas_or_exp_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct sas_phy *phy,
+ struct leapraid_sas_phy_p0 *phy_pg0,
+ struct leapraid_exp_p1 *exp_pg1)
+{
+ if (exp_pg1 && phy_pg0)
+ return;
+
+ if (!exp_pg1 && !phy_pg0)
+ return;
+
+ phy->identify = card_phy->identify;
+ phy->identify.phy_identifier = card_phy->phy_id;
+ phy->negotiated_linkrate = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->neg_link_rate &
+ LEAPRAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->neg_link_rate &
+ LEAPRAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->hw_link_rate &
+ LEAPRAID_SAS_HWRATE_MIN_RATE_MASK) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->hw_link_rate &
+ LEAPRAID_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->hw_link_rate >> 4) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->hw_link_rate >> 4);
+ phy->minimum_linkrate = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->p_link_rate &
+ LEAPRAID_SAS_PRATE_MIN_RATE_MASK) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->p_link_rate &
+ LEAPRAID_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->p_link_rate >> 4) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->p_link_rate >> 4);
+ phy->hostdata = card_phy->card_port;
+}
+
+void leapraid_transport_add_card_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_sas_phy_p0 *phy_pg0,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+
+ INIT_LIST_HEAD(&card_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, card_phy->phy_id);
+ if (!phy) {
+ dev_err(&adapter->pdev->dev,
+ "%s sas_phy_alloc failed!\n", __func__);
+ return;
+ }
+
+ if ((leapraid_transport_set_identify(adapter, card_phy->hdl,
+ &card_phy->identify))) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy handle identify failed!\n", __func__);
+ sas_phy_free(phy);
+ return;
+ }
+
+ card_phy->attached_hdl = le16_to_cpu(phy_pg0->attached_dev_hdl);
+ if (card_phy->attached_hdl) {
+ if (leapraid_transport_set_identify(adapter,
+ card_phy->attached_hdl,
+ &card_phy->remote_identify)) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy attached handle identify failed!\n",
+ __func__);
+ sas_phy_free(phy);
+ return;
+ }
+ }
+
+ leapraid_init_sas_or_exp_phy(adapter, card_phy, phy, phy_pg0, NULL);
+
+ if ((sas_phy_add(phy))) {
+ sas_phy_free(phy);
+ return;
+ }
+
+ card_phy->phy = phy;
+}
+
+int leapraid_transport_add_exp_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_exp_p1 *exp_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+
+ INIT_LIST_HEAD(&card_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, card_phy->phy_id);
+ if (!phy) {
+ dev_err(&adapter->pdev->dev,
+ "%s sas_phy_alloc failed!\n", __func__);
+ return -EFAULT;
+ }
+
+ if ((leapraid_transport_set_identify(adapter, card_phy->hdl,
+ &card_phy->identify))) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy hdl identify failed!\n", __func__);
+ sas_phy_free(phy);
+ return -EFAULT;
+ }
+
+ card_phy->attached_hdl = le16_to_cpu(exp_pg1->attached_dev_hdl);
+ if (card_phy->attached_hdl) {
+ if (leapraid_transport_set_identify(adapter,
+ card_phy->attached_hdl,
+ &card_phy->remote_identify)) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy attached hdl identify failed!\n",
+ __func__);
+ sas_phy_free(phy);
+ }
+ }
+
+ leapraid_init_sas_or_exp_phy(adapter, card_phy, phy, NULL, exp_pg1);
+
+ if ((sas_phy_add(phy))) {
+ sas_phy_free(phy);
+ return -EFAULT;
+ }
+
+ card_phy->phy = phy;
+ return 0;
+}
+
+void leapraid_transport_update_links(struct leapraid_adapter *adapter,
+ u64 sas_address, u16 hdl, u8 phy_index,
+ u8 link_rate, struct leapraid_card_port *target_card_port)
+{
+ struct leapraid_topo_node *topo_node;
+ struct leapraid_card_phy *card_phy;
+ struct leapraid_card_port *card_port = NULL;
+ unsigned long flags;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node = leapraid_transport_topo_node_by_sas_addr(adapter,
+ sas_address,
+ target_card_port);
+ if (!topo_node) {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return;
+ }
+
+ card_phy = &topo_node->card_phy[phy_index];
+ card_phy->attached_hdl = hdl;
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (hdl && link_rate >= LEAPRAID_SAS_NEG_LINK_RATE_1_5) {
+ leapraid_transport_set_identify(adapter, hdl,
+ &card_phy->remote_identify);
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num &&
+ adapter->adapter_attr.enable_mp) {
+ list_for_each_entry(card_port,
+ &adapter->dev_topo.card_port_list,
+ list) {
+ if (card_port->sas_address == sas_address &&
+ card_port == target_card_port)
+ card_port->phy_mask |=
+ BIT(card_phy->phy_id);
+ }
+ }
+ leapraid_transport_attach_phy_to_port(adapter, topo_node,
+ card_phy,
+ card_phy->remote_identify.sas_address,
+ target_card_port);
+ } else {
+ memset(&card_phy->remote_identify, 0,
+ sizeof(struct sas_identify));
+ }
+
+ if (card_phy->phy)
+ card_phy->phy->negotiated_linkrate =
+ leapraid_transport_convert_phy_link_rate(link_rate);
+}
+
+static int leapraid_dma_map_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t *dma_addr,
+ size_t *dma_len, void **p)
+{
+ if (buf->sg_cnt > 1) {
+ *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+ GFP_KERNEL);
+ if (!*p)
+ return -ENOMEM;
+
+ *dma_len = buf->payload_len;
+ } else {
+ if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+ return -ENOMEM;
+
+ *dma_addr = sg_dma_address(buf->sg_list);
+ *dma_len = sg_dma_len(buf->sg_list);
+ *p = NULL;
+ }
+ return 0;
+}
+
+static void leapraid_dma_unmap_buffer(struct device *dev,
+ struct bsg_buffer *buf,
+ dma_addr_t dma_addr,
+ void *p)
+{
+ if (p)
+ dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+ else
+ dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
+static void leapraid_build_smp_task(struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size)
+{
+ struct leapraid_smp_passthrough_req *smp_passthrough_req;
+ void *psge;
+
+ smp_passthrough_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ memset(smp_passthrough_req, 0, sizeof(*smp_passthrough_req));
+
+ smp_passthrough_req->func = LEAPRAID_FUNC_SMP_PASSTHROUGH;
+ smp_passthrough_req->physical_port =
+ leapraid_transport_get_port_id_by_rphy(adapter, rphy);
+ smp_passthrough_req->sas_address = (rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(adapter->dev_topo.card.sas_address);
+ smp_passthrough_req->req_data_len =
+ cpu_to_le16(h2c_size - LEAPRAID_SMP_FRAME_HEADER_SIZE);
+ psge = &smp_passthrough_req->sgl;
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr,
+ h2c_size - LEAPRAID_SMP_FRAME_HEADER_SIZE,
+ c2h_dma_addr,
+ c2h_size - LEAPRAID_SMP_FRAME_HEADER_SIZE);
+}
+
+static int leapraid_send_smp_req(struct leapraid_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev,
+ "%s: sending smp request\n", __func__);
+ init_completion(&adapter->driver_cmds.transport_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.transport_cmd.done,
+ LEAPRAID_TRANSPORT_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.transport_cmd.status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev, "%s: timeout\n", __func__);
+ if (!(adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_RESET)) {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ return -ETIMEDOUT;
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "%s: smp request complete\n", __func__);
+ if (!(adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_REPLY_VALID)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: smp request no reply\n", __func__);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void leapraid_handle_smp_rep(struct leapraid_adapter *adapter,
+ struct bsg_job *job, void *addr_in,
+ unsigned int *reslen)
+{
+ struct leapraid_smp_passthrough_rep *smp_passthrough_rep;
+
+ smp_passthrough_rep =
+ (void *)(&adapter->driver_cmds.transport_cmd.reply);
+
+ dev_info(&adapter->pdev->dev, "%s: response data len=%d\n",
+ __func__, le16_to_cpu(smp_passthrough_rep->resp_data_len));
+
+ memcpy(job->reply, smp_passthrough_rep, sizeof(*smp_passthrough_rep));
+ job->reply_len = sizeof(*smp_passthrough_rep);
+ *reslen = le16_to_cpu(smp_passthrough_rep->resp_data_len);
+
+ if (addr_in)
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, addr_in,
+ job->reply_payload.payload_len);
+}
+
+static void leapraid_transport_smp_handler(struct bsg_job *job,
+ struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ dma_addr_t c2h_dma_addr;
+ dma_addr_t h2c_dma_addr;
+ void *addr_in = NULL;
+ void *addr_out = NULL;
+ size_t c2h_size;
+ size_t h2c_size;
+ int rc;
+ unsigned int reslen = 0;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering) {
+ rc = -EFAULT;
+ goto done;
+ }
+
+ rc = mutex_lock_interruptible(&adapter->driver_cmds.transport_cmd.mutex);
+ if (rc)
+ goto done;
+
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_PENDING;
+ rc = leapraid_dma_map_buffer(&adapter->pdev->dev,
+ &job->request_payload,
+ &h2c_dma_addr, &h2c_size, &addr_out);
+ if (rc)
+ goto release_lock;
+
+ if (addr_out)
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, addr_out,
+ job->request_payload.payload_len);
+
+ rc = leapraid_dma_map_buffer(&adapter->pdev->dev, &job->reply_payload,
+ &c2h_dma_addr, &c2h_size, &addr_in);
+ if (rc)
+ goto free_req_buf;
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto free_rep_buf;
+
+ leapraid_build_smp_task(adapter, rphy, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+
+ rc = leapraid_send_smp_req(adapter);
+ if (rc)
+ goto free_rep_buf;
+
+ leapraid_handle_smp_rep(adapter, job, addr_in, &reslen);
+
+free_rep_buf:
+ leapraid_dma_unmap_buffer(&adapter->pdev->dev, &job->reply_payload,
+ c2h_dma_addr, addr_in);
+free_req_buf:
+ leapraid_dma_unmap_buffer(&adapter->pdev->dev, &job->request_payload,
+ h2c_dma_addr, addr_out);
+release_lock:
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.transport_cmd.mutex);
+done:
+ bsg_job_done(job, rc, reslen);
+}
+
+struct sas_function_template leapraid_transport_functions = {
+ .smp_handler = leapraid_transport_smp_handler,
+};
+
+struct scsi_transport_template *leapraid_transport_template;
--
2.25.1
2
1
LeapIO inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8340
------------------------------------------
The LeapRAID driver provides support for LeapRAID PCIe RAID controllers,
enabling communication between the host operating system, firmware, and
hardware for efficient storage management.
The main source files are organized as follows:
leapraid_os.c:
Implements the scsi_host_template functions, PCIe device probing, and
initialization routines, integrating the driver with the Linux SCSI
subsystem.
leapraid_func.c:
Provides the core functional routines that handle low-level interactions
with the controller firmware and hardware, including interrupt handling,
topology management, and reset sequence processing, and other related
operations.
leapraid_app.c:
Implements the ioctl interface, providing user-space tools access to device
management and diagnostic operations.
leapraid_transport.c:
Interacts with the Linux SCSI transport layer to add SAS phys and ports.
leapraid_func.h:
Declares common data structures, constants, and function prototypes shared
across the driver.
leapraid.h:
Provides global constants, register mappings, and interface definitions
that facilitate communication between the driver and the controller
firmware.
The leapraid_probe function is called when the driver detects a supported
LeapRAID PCIe device. It allocates and initializes the Scsi_Host structure,
configures hardware and firmware interfaces, and registers the host adapter
with the Linux SCSI mid-layer.
After registration, the driver invokes scsi_scan_host() to initiate device
discovery. The firmware then reports discovered logical and physical
devices to the host through interrupt-driven events and synchronizes their
operational states.
leapraid_adapter is the core data structure that encapsulates all resources
and runtime state information maintained during driver operation, described
as follows:
/**
* struct leapraid_adapter - Main LeapRaid adapter structure
* @list: List head for adapter management
* @shost: SCSI host structure
* @pdev: PCI device structure
* @iomem_base: I/O memory mapped base address
* @rep_msg_host_idx: Host index for reply messages
* @mask_int: Interrupt masking flag
* @timestamp_sync_cnt: Timestamp synchronization counter
* @adapter_attr: Adapter attributes
* @mem_desc: Memory descriptor
* @driver_cmds: Driver commands
* @dynamic_task_desc: Dynamic task descriptor
* @fw_evt_s: Firmware event structure
* @notification_desc: Notification descriptor
* @reset_desc: Reset descriptor
* @scan_dev_desc: Device scan descriptor
* @access_ctrl: Access control
* @fw_log_desc: Firmware log descriptor
* @dev_topo: Device topology
* @boot_devs: Boot devices
* @smart_poll_desc: SMART polling descriptor
*/
struct leapraid_adapter {
struct list_head list;
struct Scsi_Host *shost;
struct pci_dev *pdev;
struct leapraid_reg_base __iomem *iomem_base;
u32 rep_msg_host_idx;
bool mask_int;
u32 timestamp_sync_cnt;
struct leapraid_adapter_attr adapter_attr;
struct leapraid_mem_desc mem_desc;
struct leapraid_driver_cmds driver_cmds;
struct leapraid_dynamic_task_desc dynamic_task_desc;
struct leapraid_fw_evt_struct fw_evt_s;
struct leapraid_notification_desc notification_desc;
struct leapraid_reset_desc reset_desc;
struct leapraid_scan_dev_desc scan_dev_desc;
struct leapraid_access_ctrl access_ctrl;
struct leapraid_fw_log_desc fw_log_desc;
struct leapraid_dev_topo dev_topo;
struct leapraid_boot_devs boot_devs;
struct leapraid_smart_poll_desc smart_poll_desc;
};
Signed-off-by: haodongdong <doubled(a)leap-io.com>
---
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/openeuler_defconfig | 1 +
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/leapraid/Kconfig | 14 +
drivers/scsi/leapraid/Makefile | 10 +
drivers/scsi/leapraid/leapraid.h | 2070 +++++
drivers/scsi/leapraid/leapraid_app.c | 675 ++
drivers/scsi/leapraid/leapraid_func.c | 8264 ++++++++++++++++++++
drivers/scsi/leapraid/leapraid_func.h | 1423 ++++
drivers/scsi/leapraid/leapraid_os.c | 2271 ++++++
drivers/scsi/leapraid/leapraid_transport.c | 1256 +++
12 files changed, 15987 insertions(+)
create mode 100644 drivers/scsi/leapraid/Kconfig
create mode 100644 drivers/scsi/leapraid/Makefile
create mode 100644 drivers/scsi/leapraid/leapraid.h
create mode 100644 drivers/scsi/leapraid/leapraid_app.c
create mode 100644 drivers/scsi/leapraid/leapraid_func.c
create mode 100644 drivers/scsi/leapraid/leapraid_func.h
create mode 100644 drivers/scsi/leapraid/leapraid_os.c
create mode 100644 drivers/scsi/leapraid/leapraid_transport.c
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index d190cc0cb030..4bac10933bda 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2470,6 +2470,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_PS3STOR=m
+CONFIG_SCSI_LEAPRAID=m
CONFIG_SCSI_SMARTPQI=m
CONFIG_SCSI_HISI_RAID=m
# CONFIG_SCSI_UFSHCD is not set
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index fdd8d59bad01..f6b9c56f8b65 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2398,6 +2398,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_PS3STOR=m
+CONFIG_SCSI_LEAPRAID=m
CONFIG_SCSI_SMARTPQI=m
CONFIG_SCSI_HISI_RAID=m
# CONFIG_SCSI_UFSHCD is not set
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index edec9aa0993e..45d695f62cd4 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -473,6 +473,7 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/sssraid/Kconfig"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/linkdata/Kconfig"
+source "drivers/scsi/leapraid/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/hisi_raid/Kconfig"
source "drivers/scsi/ufs/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b27758db0c02..18dd9b1cd70d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -90,6 +90,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
obj-$(CONFIG_SCSI_HPSA) += hpsa.o
+obj-$(CONFIG_SCSI_LEAPRAID) += leapraid/
obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi/
obj-$(CONFIG_SCSI_3SNIC_SSSRAID) += sssraid/
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
diff --git a/drivers/scsi/leapraid/Kconfig b/drivers/scsi/leapraid/Kconfig
new file mode 100644
index 000000000000..b539183b24a7
--- /dev/null
+++ b/drivers/scsi/leapraid/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config SCSI_LEAPRAID
+ tristate "LeapIO RAID Adapter"
+ depends on PCI && SCSI
+ select SCSI_SAS_ATTRS
+ help
+ This driver supports LeapIO PCIe-based Storage
+ and RAID controllers.
+
+ <http://www.leap-io.com>
+
+ To compile this driver as a module, choose M here: the
+ resulting kernel module will be named leapraid.
diff --git a/drivers/scsi/leapraid/Makefile b/drivers/scsi/leapraid/Makefile
new file mode 100644
index 000000000000..bdafc036cd00
--- /dev/null
+++ b/drivers/scsi/leapraid/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the LEAPRAID drivers.
+#
+
+obj-$(CONFIG_SCSI_LEAPRAID) += leapraid.o
+leapraid-objs += leapraid_func.o \
+ leapraid_os.o \
+ leapraid_transport.o \
+ leapraid_app.o
diff --git a/drivers/scsi/leapraid/leapraid.h b/drivers/scsi/leapraid/leapraid.h
new file mode 100644
index 000000000000..842810d41542
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid.h
@@ -0,0 +1,2070 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+#ifndef LEAPRAID_H
+#define LEAPRAID_H
+
+/* doorbell register definitions */
+#define LEAPRAID_DB_RESET 0x00000000
+#define LEAPRAID_DB_READY 0x10000000
+#define LEAPRAID_DB_OPERATIONAL 0x20000000
+#define LEAPRAID_DB_FAULT 0x40000000
+
+#define LEAPRAID_DB_MASK 0xF0000000
+
+#define LEAPRAID_DB_OVER_TEMPERATURE 0x2810
+
+#define LEAPRAID_DB_USED 0x08000000
+#define LEAPRAID_DB_DATA_MASK 0x0000FFFF
+#define LEAPRAID_DB_FUNC_SHIFT 24
+#define LEAPRAID_DB_ADD_DWORDS_SHIFT 16
+
+/* maximum number of retries waiting for doorbell to become ready */
+#define LEAPRAID_DB_RETRY_COUNT_MAX 10
+/* maximum number of retries waiting for doorbell to become operational */
+#define LEAPRAID_DB_WAIT_OPERATIONAL 10
+/* sleep interval (in seconds) between doorbell polls */
+#define LEAPRAID_DB_POLL_INTERVAL_S 1
+
+/* maximum number of retries waiting for host to end recovery */
+#define LEAPRAID_WAIT_SHOST_RECOVERY 30
+
+/* diagnostic register definitions */
+#define LEAPRAID_DIAG_WRITE_ENABLE 0x00000080
+#define LEAPRAID_DIAG_RESET 0x00000004
+#define LEAPRAID_DIAG_HOLD_ADAPTER_RESET 0x00000002
+
+/* interrupt status register definitions */
+#define LEAPRAID_HOST2ADAPTER_DB_STATUS 0x80000000
+#define LEAPRAID_ADAPTER2HOST_DB_STATUS 0x00000001
+
+/* the number of debug register */
+#define LEAPRAID_DEBUGLOG_SZ_MAX 16
+
+/* reply post host register defines */
+#define REP_POST_HOST_IDX_REG_CNT 16
+#define LEAPRAID_RPHI_MSIX_IDX_SHIFT 24
+
+/* vphy flags */
+#define LEAPRAID_SAS_PHYINFO_VPHY 0x00001000
+
+/* linux driver init fw */
+#define LEAPRAID_WHOINIT_LINUX_DRIVER 0x04
+
+/* rdpq array mode */
+#define LEAPRAID_ADAPTER_INIT_MSGFLG_RDPQ_ARRAY_MODE 0x01
+
+/* request description flags */
+#define LEAPRAID_REQ_DESC_FLG_SCSI_IO 0x00
+#define LEAPRAID_REQ_DESC_FLG_HPR 0x06
+#define LEAPRAID_REQ_DESC_FLG_DFLT_TYPE 0x08
+
+/* reply description flags */
+#define LEAPRAID_RPY_DESC_FLG_TYPE_MASK 0x0F
+#define LEAPRAID_RPY_DESC_FLG_SCSI_IO_SUCCESS 0x00
+#define LEAPRAID_RPY_DESC_FLG_ADDRESS_REPLY 0x01
+#define LEAPRAID_RPY_DESC_FLG_FP_SCSI_IO_SUCCESS 0x06
+#define LEAPRAID_RPY_DESC_FLG_UNUSED 0x0F
+
+/* MPI functions */
+#define LEAPRAID_FUNC_SCSIIO_REQ 0x00
+#define LEAPRAID_FUNC_SCSI_TMF 0x01
+#define LEAPRAID_FUNC_ADAPTER_INIT 0x02
+#define LEAPRAID_FUNC_GET_ADAPTER_FEATURES 0x03
+#define LEAPRAID_FUNC_CONFIG_OP 0x04
+#define LEAPRAID_FUNC_SCAN_DEV 0x06
+#define LEAPRAID_FUNC_EVENT_NOTIFY 0x07
+#define LEAPRAID_FUNC_FW_DOWNLOAD 0x09
+#define LEAPRAID_FUNC_FW_UPLOAD 0x12
+#define LEAPRAID_FUNC_RAID_ACTION 0x15
+#define LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH 0x16
+#define LEAPRAID_FUNC_SCSI_ENC_PROCESSOR 0x18
+#define LEAPRAID_FUNC_SMP_PASSTHROUGH 0x1A
+#define LEAPRAID_FUNC_SAS_IO_UNIT_CTRL 0x1B
+#define LEAPRAID_FUNC_SATA_PASSTHROUGH 0x1C
+#define LEAPRAID_FUNC_ADAPTER_UNIT_RESET 0x40
+#define LEAPRAID_FUNC_HANDSHAKE 0x42
+#define LEAPRAID_FUNC_LOGBUF_INIT 0x57
+
+/* adapter status values */
+#define LEAPRAID_ADAPTER_STATUS_MASK 0x7FFF
+#define LEAPRAID_ADAPTER_STATUS_SUCCESS 0x0000
+#define LEAPRAID_ADAPTER_STATUS_BUSY 0x0002
+#define LEAPRAID_ADAPTER_STATUS_INTERNAL_ERROR 0x0004
+#define LEAPRAID_ADAPTER_STATUS_INSUFFICIENT_RESOURCES 0x0006
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_ACTION 0x0020
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_TYPE 0x0021
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE 0x0022
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_DATA 0x0023
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_NO_DEFAULTS 0x0024
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_CANT_COMMIT 0x0025
+#define LEAPRAID_ADAPTER_STATUS_SCSI_RECOVERED_ERROR 0x0040
+#define LEAPRAID_ADAPTER_STATUS_SCSI_DEVICE_NOT_THERE 0x0043
+#define LEAPRAID_ADAPTER_STATUS_SCSI_DATA_OVERRUN 0x0044
+#define LEAPRAID_ADAPTER_STATUS_SCSI_DATA_UNDERRUN 0x0045
+#define LEAPRAID_ADAPTER_STATUS_SCSI_IO_DATA_ERROR 0x0046
+#define LEAPRAID_ADAPTER_STATUS_SCSI_PROTOCOL_ERROR 0x0047
+#define LEAPRAID_ADAPTER_STATUS_SCSI_TASK_TERMINATED 0x0048
+#define LEAPRAID_ADAPTER_STATUS_SCSI_RESIDUAL_MISMATCH 0x0049
+#define LEAPRAID_ADAPTER_STATUS_SCSI_TASK_MGMT_FAILED 0x004A
+#define LEAPRAID_ADAPTER_STATUS_SCSI_ADAPTER_TERMINATED 0x004B
+#define LEAPRAID_ADAPTER_STATUS_SCSI_EXT_TERMINATED 0x004C
+
+/* sge flags */
+#define LEAPRAID_SGE_FLG_LAST_ONE 0x80
+#define LEAPRAID_SGE_FLG_EOB 0x40
+#define LEAPRAID_SGE_FLG_EOL 0x01
+#define LEAPRAID_SGE_FLG_SHIFT 24
+#define LEAPRAID_SGE_FLG_SIMPLE_ONE 0x10
+#define LEAPRAID_SGE_FLG_SYSTEM_ADDR 0x00
+#define LEAPRAID_SGE_FLG_H2C 0x04
+#define LEAPRAID_SGE_FLG_32 0x00
+#define LEAPRAID_SGE_FLG_64 0x02
+
+#define LEAPRAID_IEEE_SGE_FLG_EOL 0x40
+#define LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE 0x00
+#define LEAPRAID_IEEE_SGE_FLG_CHAIN_ONE 0x80
+#define LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR 0x00
+
+#define LEAPRAID_SGE_OFFSET_SIZE 4
+
+/* page and ext page type */
+#define LEAPRAID_CFG_PT_IO_UNIT 0x00
+#define LEAPRAID_CFG_PT_ADAPTER 0x01
+#define LEAPRAID_CFG_PT_BIOS 0x02
+#define LEAPRAID_CFG_PT_RAID_VOLUME 0x08
+#define LEAPRAID_CFG_PT_RAID_PHYSDISK 0x0A
+#define LEAPRAID_CFG_PT_EXTENDED 0x0F
+#define LEAPRAID_CFG_EXTPT_SAS_IO_UNIT 0x10
+#define LEAPRAID_CFG_EXTPT_SAS_EXP 0x11
+#define LEAPRAID_CFG_EXTPT_SAS_DEV 0x12
+#define LEAPRAID_CFG_EXTPT_SAS_PHY 0x13
+#define LEAPRAID_CFG_EXTPT_ENC 0x15
+#define LEAPRAID_CFG_EXTPT_RAID_CONFIG 0x16
+
+/* config page address */
+#define LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP 0x00000000
+#define LEAPRAID_SAS_ENC_CFG_PGAD_HDL 0x10000000
+#define LEAPRAID_SAS_DEV_CFG_PGAD_HDL 0x20000000
+#define LEAPRAID_SAS_EXP_CFG_PGAD_HDL_PHY_NUM 0x10000000
+#define LEAPRAID_SAS_EXP_CFD_PGAD_HDL 0x20000000
+#define LEAPRAID_SAS_EXP_CFG_PGAD_PHYNUM_SHIFT 16
+#define LEAPRAID_RAID_VOL_CFG_PGAD_HDL 0x10000000
+#define LEAPRAID_SAS_PHY_CFG_PGAD_PHY_NUMBER 0x00000000
+#define LEAPRAID_PHYSDISK_CFG_PGAD_PHYSDISKNUM 0x10000000
+
+/* config page operations */
+#define LEAPRAID_CFG_ACT_PAGE_HEADER 0x00
+#define LEAPRAID_CFG_ACT_PAGE_READ_CUR 0x01
+#define LEAPRAID_CFG_ACT_PAGE_WRITE_CUR 0x02
+
+/* bios pages */
+#define LEAPRAID_CFG_PAGE_NUM_BIOS2 0x2
+#define LEAPRAID_CFG_PAGE_NUM_BIOS3 0x3
+
+/* sas device pages */
+#define LEAPRAID_CFG_PAGE_NUM_DEV0 0x0
+
+/* sas device page 0 flags */
+#define LEAPRAID_SAS_DEV_P0_FLG_FP_CAP 0x2000
+#define LEAPRAID_SAS_DEV_P0_FLG_SATA_SMART 0x0040
+#define LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID 0x0002
+#define LEAPRAID_SAS_DEV_P0_FLG_DEV_PRESENT 0x0001
+
+/* sas IO unit pages */
+#define LEAPRAID_CFG_PAGE_NUM_IOUNIT0 0x0
+#define LEAPRAID_CFG_PAGE_NUM_IOUNIT1 0x1
+
+/* sas expander pages */
+#define LEAPRAID_CFG_PAGE_NUM_EXP0 0x0
+#define LEAPRAID_CFG_PAGE_NUM_EXP1 0x1
+
+/* sas enclosure page */
+#define LEAPRAID_CFG_PAGE_NUM_ENC0 0x0
+
+/* sas phy page */
+#define LEAPRAID_CFG_PAGE_NUM_PHY0 0x0
+
+/* raid volume pages */
+#define LEAPRAID_CFG_PAGE_NUM_VOL0 0x0
+#define LEAPRAID_CFG_PAGE_NUM_VOL1 0x1
+
+/* physical disk page */
+#define LEAPRAID_CFG_PAGE_NUM_PD0 0x0
+
+/* adapter page */
+#define LEAPRAID_CFG_PAGE_NUM_ADAPTER1 0x1
+
+#define LEAPRAID_CFG_UNIT_SIZE 4
+
+/* raid volume type and state */
+#define LEAPRAID_VOL_STATE_MISSING 0x00
+#define LEAPRAID_VOL_STATE_FAILED 0x01
+#define LEAPRAID_VOL_STATE_INITIALIZING 0x02
+#define LEAPRAID_VOL_STATE_ONLINE 0x03
+#define LEAPRAID_VOL_STATE_DEGRADED 0x04
+#define LEAPRAID_VOL_STATE_OPTIMAL 0x05
+#define LEAPRAID_VOL_TYPE_RAID0 0x00
+#define LEAPRAID_VOL_TYPE_RAID1E 0x01
+#define LEAPRAID_VOL_TYPE_RAID1 0x02
+#define LEAPRAID_VOL_TYPE_RAID10 0x05
+#define LEAPRAID_VOL_TYPE_UNKNOWN 0xFF
+
+/* raid volume element flags */
+#define LEAPRAID_RAIDCFG_P0_EFLG_MASK_ELEMENT_TYPE 0x000F
+#define LEAPRAID_RAIDCFG_P0_EFLG_VOL_PHYS_DISK_ELEMENT 0x0001
+#define LEAPRAID_RAIDCFG_P0_EFLG_HOT_SPARE_ELEMENT 0x0002
+#define LEAPRAID_RAIDCFG_P0_EFLG_OCE_ELEMENT 0x0003
+
+/* raid action */
+#define LEAPRAID_RAID_ACT_SYSTEM_SHUTDOWN_INITIATED 0x20
+#define LEAPRAID_RAID_ACT_PHYSDISK_HIDDEN 0x24
+
+/* sas negotiated link rates */
+#define LEAPRAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL 0x0F
+#define LEAPRAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE 0x00
+#define LEAPRAID_SAS_NEG_LINK_RATE_PHY_DISABLED 0x01
+#define LEAPRAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED 0x02
+#define LEAPRAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE 0x03
+#define LEAPRAID_SAS_NEG_LINK_RATE_PORT_SELECTOR 0x04
+#define LEAPRAID_SAS_NEG_LINK_RATE_SMP_RESETTING 0x05
+
+#define LEAPRAID_SAS_NEG_LINK_RATE_1_5 0x08
+#define LEAPRAID_SAS_NEG_LINK_RATE_3_0 0x09
+#define LEAPRAID_SAS_NEG_LINK_RATE_6_0 0x0A
+#define LEAPRAID_SAS_NEG_LINK_RATE_12_0 0x0B
+
+#define LEAPRAID_SAS_PRATE_MIN_RATE_MASK 0x0F
+#define LEAPRAID_SAS_HWRATE_MIN_RATE_MASK 0x0F
+
+/* scsi IO control bits */
+#define LEAPRAID_SCSIIO_CTRL_ADDCDBLEN_SHIFT 26
+#define LEAPRAID_SCSIIO_CTRL_NODATATRANSFER 0x00000000
+#define LEAPRAID_SCSIIO_CTRL_WRITE 0x01000000
+#define LEAPRAID_SCSIIO_CTRL_READ 0x02000000
+#define LEAPRAID_SCSIIO_CTRL_BIDIRECTIONAL 0x03000000
+#define LEAPRAID_SCSIIO_CTRL_SIMPLEQ 0x00000000
+#define LEAPRAID_SCSIIO_CTRL_ORDEREDQ 0x00000200
+#define LEAPRAID_SCSIIO_CTRL_CMDPRI 0x00000800
+
+/* scsi state and status */
+#define LEAPRAID_SCSI_STATUS_BUSY 0x08
+#define LEAPRAID_SCSI_STATUS_RESERVATION_CONFLICT 0x18
+#define LEAPRAID_SCSI_STATUS_TASK_SET_FULL 0x28
+
+#define LEAPRAID_SCSI_STATE_RESPONSE_INFO_VALID 0x10
+#define LEAPRAID_SCSI_STATE_TERMINATED 0x08
+#define LEAPRAID_SCSI_STATE_NO_SCSI_STATUS 0x04
+#define LEAPRAID_SCSI_STATE_AUTOSENSE_FAILED 0x02
+#define LEAPRAID_SCSI_STATE_AUTOSENSE_VALID 0x01
+
+/* scsi task management defines */
+#define LEAPRAID_TM_TASKTYPE_ABORT_TASK 0x01
+#define LEAPRAID_TM_TASKTYPE_ABRT_TASK_SET 0x02
+#define LEAPRAID_TM_TASKTYPE_TARGET_RESET 0x03
+#define LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET 0x05
+#define LEAPRAID_TM_TASKTYPE_CLEAR_TASK_SET 0x06
+#define LEAPRAID_TM_TASKTYPE_QUERY_TASK 0x07
+#define LEAPRAID_TM_TASKTYPE_CLEAR_ACA 0x08
+#define LEAPRAID_TM_TASKTYPE_QUERY_TASK_SET 0x09
+#define LEAPRAID_TM_TASKTYPE_QUERY_ASYNC_EVENT 0x0A
+
+#define LEAPRAID_TM_MSGFLAGS_LINK_RESET 0x00
+#define LEAPRAID_TM_RSP_INVALID_FRAME 0x02
+#define LEAPRAID_TM_RSP_TM_SUCCEEDED 0x08
+#define LEAPRAID_TM_RSP_IO_QUEUED_ON_ADAPTER 0x80
+
+/* scsi sep request defines */
+#define LEAPRAID_SEP_REQ_ACT_WRITE_STATUS 0x00
+#define LEAPRAID_SEP_REQ_FLG_DEVHDL_ADDRESS 0x00
+#define LEAPRAID_SEP_REQ_FLG_ENCLOSURE_SLOT_ADDRESS 0x01
+#define LEAPRAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT 0x00000040
+
+/* the capabilities of the adapter */
+#define LEAPRAID_ADAPTER_FEATURES_CAP_ATOMIC_REQ 0x00080000
+#define LEAPRAID_ADAPTER_FEATURES_CAP_RDPQ_ARRAY_CAPABLE 0x00040000
+#define LEAPRAID_ADAPTER_FEATURES_CAP_EVENT_REPLAY 0x00002000
+#define LEAPRAID_ADAPTER_FEATURES_CAP_INTEGRATED_RAID 0x00001000
+
+/* event code definitions for the firmware */
+#define LEAPRAID_EVT_SAS_DEV_STATUS_CHANGE 0x000F
+#define LEAPRAID_EVT_SAS_DISCOVERY 0x0016
+#define LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST 0x001C
+#define LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE 0x001D
+#define LEAPRAID_EVT_IR_CHANGE 0x0020
+#define LEAPRAID_EVT_TURN_ON_PFA_LED 0xFFFC
+#define LEAPRAID_EVT_SCAN_DEV_DONE 0xFFFD
+#define LEAPRAID_EVT_REMOVE_DEAD_DEV 0xFFFF
+#define LEAPRAID_MAX_EVENT_NUM 128
+
+#define LEAPRAID_EVT_SAS_DEV_STAT_RC_INTERNAL_DEV_RESET 0x08
+#define LEAPRAID_EVT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET 0x0E
+
+/* raid configuration change event */
+#define LEAPRAID_EVT_IR_RC_VOLUME_ADD 0x01
+#define LEAPRAID_EVT_IR_RC_VOLUME_DELETE 0x02
+#define LEAPRAID_EVT_IR_RC_PD_HIDDEN_TO_ADD 0x03
+#define LEAPRAID_EVT_IR_RC_PD_UNHIDDEN_TO_DELETE 0x04
+#define LEAPRAID_EVT_IR_RC_PD_CREATED_TO_HIDE 0x05
+#define LEAPRAID_EVT_IR_RC_PD_DELETED_TO_EXPOSE 0x06
+
+/* sas topology change event */
+#define LEAPRAID_EVT_SAS_TOPO_ES_NO_EXPANDER 0x00
+#define LEAPRAID_EVT_SAS_TOPO_ES_ADDED 0x01
+#define LEAPRAID_EVT_SAS_TOPO_ES_NOT_RESPONDING 0x02
+#define LEAPRAID_EVT_SAS_TOPO_ES_RESPONDING 0x03
+
+#define LEAPRAID_EVT_SAS_TOPO_RC_MASK 0x0F
+#define LEAPRAID_EVT_SAS_TOPO_RC_CLEAR_MASK 0xF0
+#define LEAPRAID_EVT_SAS_TOPO_RC_TARG_ADDED 0x01
+#define LEAPRAID_EVT_SAS_TOPO_RC_TARG_NOT_RESPONDING 0x02
+#define LEAPRAID_EVT_SAS_TOPO_RC_PHY_CHANGED 0x03
+
+/* sas discovery event defines */
+#define LEAPRAID_EVT_SAS_DISC_RC_STARTED 0x01
+#define LEAPRAID_EVT_SAS_DISC_RC_COMPLETED 0x02
+
+/* enclosure device status change event */
+#define LEAPRAID_EVT_SAS_ENCL_RC_ADDED 0x01
+#define LEAPRAID_EVT_SAS_ENCL_RC_NOT_RESPONDING 0x02
+
+/* device type and identifiers */
+#define LEAPRAID_DEVTYP_SEP 0x00004000
+#define LEAPRAID_DEVTYP_SSP_TGT 0x00000400
+#define LEAPRAID_DEVTYP_STP_TGT 0x00000200
+#define LEAPRAID_DEVTYP_SMP_TGT 0x00000100
+#define LEAPRAID_DEVTYP_SATA_DEV 0x00000080
+#define LEAPRAID_DEVTYP_SSP_INIT 0x00000040
+#define LEAPRAID_DEVTYP_STP_INIT 0x00000020
+#define LEAPRAID_DEVTYP_SMP_INIT 0x00000010
+#define LEAPRAID_DEVTYP_SATA_HOST 0x00000008
+
+#define LEAPRAID_DEVTYP_MASK_DEV_TYPE 0x00000007
+#define LEAPRAID_DEVTYP_NO_DEV 0x00000000
+#define LEAPRAID_DEVTYP_END_DEV 0x00000001
+#define LEAPRAID_DEVTYP_EDGE_EXPANDER 0x00000002
+#define LEAPRAID_DEVTYP_FANOUT_EXPANDER 0x00000003
+
+/* sas control operation */
+#define LEAPRAID_SAS_OP_PHY_LINK_RESET 0x06
+#define LEAPRAID_SAS_OP_PHY_HARD_RESET 0x07
+#define LEAPRAID_SAS_OP_SET_PARAMETER 0x0F
+
+/* boot device defines */
+#define LEAPRAID_BOOTDEV_FORM_MASK 0x0F
+#define LEAPRAID_BOOTDEV_FORM_NONE 0x00
+#define LEAPRAID_BOOTDEV_FORM_SAS_WWID 0x05
+#define LEAPRAID_BOOTDEV_FORM_ENC_SLOT 0x06
+#define LEAPRAID_BOOTDEV_FORM_DEV_NAME 0x07
+
+/**
+ * struct leapraid_reg_base - Register layout of the LeapRAID controller
+ *
+ * @db: Doorbell register used to signal commands or status to firmware
+ * @ws: Write sequence register for synchronizing doorbell operations
+ * @host_diag: Diagnostic register used for status or debug reporting
+ * @r1: Reserved
+ * @host_int_status: Interrupt status register reporting active interrupts
+ * @host_int_mask: Interrupt mask register enabling or disabling sources
+ * @r2: Reserved
+ * @rep_msg_host_idx: Reply message index for the next available reply slot
+ * @r3: Reserved
+ * @debug_log: DebugLog registers for firmware debug and diagnostic output
+ * @r4: Reserved
+ * @atomic_req_desc_post: Atomic register for single descriptor posting
+ * @adapter_log_buf_pos: Adapter log buffer write position
+ * @host_log_buf_pos: Host log buffer write position
+ * @r5: Reserved
+ * @rep_post_reg_idx: Array of reply post index registers, one per queue.
+ * The number of entries is defined by
+ * REP_POST_HOST_IDX_REG_CNT.
+ */
+struct leapraid_reg_base {
+ __le32 db;
+ __le32 ws;
+ __le32 host_diag;
+ __le32 r1[9];
+ __le32 host_int_status;
+ __le32 host_int_mask;
+ __le32 r2[4];
+ __le32 rep_msg_host_idx;
+ __le32 r3[13];
+ __le32 debug_log[LEAPRAID_DEBUGLOG_SZ_MAX];
+ __le32 r4[2];
+ __le32 atomic_req_desc_post;
+ __le32 adapter_log_buf_pos;
+ __le32 host_log_buf_pos;
+ __le32 r5[142];
+ struct leapraid_rep_post_reg_idx {
+ __le32 idx;
+ __le32 r1;
+ __le32 r2;
+ __le32 r3;
+ } rep_post_reg_idx[REP_POST_HOST_IDX_REG_CNT];
+} __packed;
+
+/**
+ * struct leapraid_atomic_req_desc - Atomic request descriptor
+ *
+ * @flg: Descriptor flag indicating the type of request (e.g. SCSI I/O)
+ * @msix_idx: MSI-X vector index used for interrupt routing
+ * @taskid: Unique task identifier associated with this request
+ */
+struct leapraid_atomic_req_desc {
+ u8 flg;
+ u8 msix_idx;
+ __le16 taskid;
+};
+
+/**
+ * union leapraid_rep_desc_union - Unified reply descriptor format
+ *
+ * @dflt_rep: Default reply descriptor containing basic completion info
+ * @dflt_rep.rep_flg: Reply flag indicating reply type or status
+ * @dflt_rep.msix_idx: MSI-X index for interrupt routing
+ * @dflt_rep.taskid: Task identifier matching the submitted request
+ * @r1: Reserved
+ *
+ * @addr_rep: Address reply descriptor used when firmware returns a
+ * memory address associated with the reply
+ * @addr_rep.rep_flg: Reply flag indicating reply type or status
+ * @addr_rep.msix_idx: MSI-X index for interrupt routing
+ * @addr_rep.taskid: Task identifier matching the submitted request
+ * @addr_rep.rep_frame_addr: Physical address of the reply frame
+ *
+ * @words: Raw 64-bit representation of the reply descriptor
+ * @u: Alternative access using 32-bit low/high words
+ * @u.low: Lower 32 bits of the descriptor
+ * @u.high: Upper 32 bits of the descriptor
+ */
+union leapraid_rep_desc_union {
+ struct leapraid_rep_desc {
+ u8 rep_flg;
+ u8 msix_idx;
+ __le16 taskid;
+ u8 r1[4];
+ } dflt_rep;
+ struct leapraid_add_rep_desc {
+ u8 rep_flg;
+ u8 msix_idx;
+ __le16 taskid;
+ __le32 rep_frame_addr;
+ } addr_rep;
+ __le64 words;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_req - Generic request header
+ *
+ * @func_dep1: Function-dependent parameter (low 16 bits)
+ * @r1: Reserved
+ * @func: Function code identifying the command type
+ * @r2: Reserved
+ */
+struct leapraid_req {
+ __le16 func_dep1;
+ u8 r1;
+ u8 func;
+ u8 r2[8];
+};
+
+/**
+ * struct leapraid_rep - Generic reply header
+ *
+ * @r1: Reserved
+ * @msg_len: Length of the reply message in bytes
+ * @function: Function code corresponding to the request
+ * @r2: Reserved
+ * @adapter_status: Status code reported by the adapter
+ * @r3: Reserved
+ */
+struct leapraid_rep {
+ u8 r1[2];
+ u8 msg_len;
+ u8 function;
+ u8 r2[10];
+ __le16 adapter_status;
+ u8 r3[4];
+};
+
+/**
+ * struct leapraid_sge_simple32 - 32-bit simple scatter-gather entry
+ *
+ * @flg_and_len: Combined field for flags and segment length
+ * @addr: 32-bit physical address of the data buffer
+ */
+struct leapraid_sge_simple32 {
+ __le32 flg_and_len;
+ __le32 addr;
+};
+
+/**
+ * struct leapraid_sge_simple64 - 64-bit simple scatter-gather entry
+ *
+ * @flg_and_len: Combined field for flags and segment length
+ * @addr: 64-bit physical address of the data buffer
+ */
+struct leapraid_sge_simple64 {
+ __le32 flg_and_len;
+ __le64 addr;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_sge_simple_union - Unified 32/64-bit SGE representation
+ *
+ * @flg_and_len: Combined field for flags and segment length
+ * @u.addr32: 32-bit address field
+ * @u.addr64: 64-bit address field
+ */
+struct leapraid_sge_simple_union {
+ __le32 flg_and_len;
+ union {
+ __le32 addr32;
+ __le64 addr64;
+ } u;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_sge_chain_union - Chained scatter-gather entry
+ *
+ * @len: Length of the chain descriptor
+ * @next_chain_offset: Offset to the next SGE chain
+ * @flg: Flags indicating chain or termination properties
+ * @u.addr32: 32-bit physical address
+ * @u.addr64: 64-bit physical address
+ */
+struct leapraid_sge_chain_union {
+ __le16 len;
+ u8 next_chain_offset;
+ u8 flg;
+ union {
+ __le32 addr32;
+ __le64 addr64;
+ } u;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_ieee_sge_simple32 - IEEE 32-bit simple SGE format
+ *
+ * @addr: 32-bit physical address of the data buffer
+ * @flg_and_len: Combined field for flags and data length
+ */
+struct leapraid_ieee_sge_simple32 {
+ __le32 addr;
+ __le32 flg_and_len;
+};
+
+/**
+ * struct leapraid_ieee_sge_simple64 - IEEE 64-bit simple SGE format
+ *
+ * @addr: 64-bit physical address of the data buffer
+ * @len: Length of the data segment
+ * @r1: Reserved
+ * @flg: Flags indicating transfer properties
+ */
+struct leapraid_ieee_sge_simple64 {
+ __le64 addr;
+ __le32 len;
+ u8 r1[3];
+ u8 flg;
+} __packed __aligned(4);
+
+/**
+ * union leapraid_ieee_sge_simple_union - Unified IEEE SGE format
+ *
+ * @simple32: IEEE 32-bit simple SGE entry
+ * @simple64: IEEE 64-bit simple SGE entry
+ */
+union leapraid_ieee_sge_simple_union {
+ struct leapraid_ieee_sge_simple32 simple32;
+ struct leapraid_ieee_sge_simple64 simple64;
+};
+
+/**
+ * union leapraid_ieee_sge_chain_union - Unified IEEE SGE chain format
+ *
+ * @chain32: IEEE 32-bit chain SGE entry
+ * @chain64: IEEE 64-bit chain SGE entry
+ */
+union leapraid_ieee_sge_chain_union {
+ struct leapraid_ieee_sge_simple32 chain32;
+ struct leapraid_ieee_sge_simple64 chain64;
+};
+
+/**
+ * struct leapraid_chain64_ieee_sg - 64-bit IEEE chain SGE descriptor
+ *
+ * @addr: Physical address of the next chain segment
+ * @len: Length of the current SGE
+ * @r1: Reserved
+ * @next_chain_offset: Offset to the next chain element
+ * @flg: Flags that describe SGE attributes
+ */
+struct leapraid_chain64_ieee_sg {
+ __le64 addr;
+ __le32 len;
+ u8 r1[2];
+ u8 next_chain_offset;
+ u8 flg;
+} __packed __aligned(4);
+
+/**
+ * union leapraid_ieee_sge_io_union - IEEE-style SGE union for I/O
+ *
+ * @ieee_simple: Simple IEEE SGE descriptor
+ * @ieee_chain: IEEE chain SGE descriptor
+ */
+union leapraid_ieee_sge_io_union {
+ struct leapraid_ieee_sge_simple64 ieee_simple;
+ struct leapraid_chain64_ieee_sg ieee_chain;
+};
+
+/**
+ * union leapraid_simple_sge_union - Union of simple SGE descriptors
+ *
+ * @leapio_simple: LeapIO-style simple SGE
+ * @ieee_simple: IEEE-style simple SGE
+ */
+union leapraid_simple_sge_union {
+ struct leapraid_sge_simple_union leapio_simple;
+ union leapraid_ieee_sge_simple_union ieee_simple;
+};
+
+/**
+ * union leapraid_sge_io_union - Combined SGE union for all I/O types
+ *
+ * @leapio_simple: LeapIO simple SGE format
+ * @leapio_chain: LeapIO chain SGE format
+ * @ieee_simple: IEEE simple SGE format
+ * @ieee_chain: IEEE chain SGE format
+ */
+union leapraid_sge_io_union {
+ struct leapraid_sge_simple_union leapio_simple;
+ struct leapraid_sge_chain_union leapio_chain;
+ union leapraid_ieee_sge_simple_union ieee_simple;
+ union leapraid_ieee_sge_chain_union ieee_chain;
+};
+
+/**
+ * struct leapraid_cfg_pg_header - Standard configuration page header
+ *
+ * @r1: Reserved
+ * @page_len: Length of the page in 4-byte units
+ * @page_num: Page number
+ * @page_type: Page type
+ */
+struct leapraid_cfg_pg_header {
+ u8 r1;
+ u8 page_len;
+ u8 page_num;
+ u8 page_type;
+};
+
+/**
+ * struct leapraid_cfg_ext_pg_header - Extended configuration page header
+ *
+ * @r1: Reserved
+ * @r2: Reserved
+ * @page_num: Page number
+ * @page_type: Page type
+ * @ext_page_len: Extended page length
+ * @ext_page_type: Extended page type
+ * @r3: Reserved
+ */
+struct leapraid_cfg_ext_pg_header {
+ u8 r1;
+ u8 r2;
+ u8 page_num;
+ u8 page_type;
+ __le16 ext_page_len;
+ u8 ext_page_type;
+ u8 r3;
+};
+
+/**
+ * struct leapraid_cfg_req - Configuration request message
+ *
+ * @action: Requested action type
+ * @sgl_flag: SGL flag field
+ * @chain_offset: Offset to next chain SGE
+ * @func: Function code
+ * @ext_page_len: Extended page length
+ * @ext_page_type: Extended page type
+ * @msg_flag: Message flags
+ * @r1: Reserved
+ * @header: Configuration page header
+ * @page_addr: Address of the page buffer
+ * @page_buf_sge: SGE describing the page buffer
+ */
+struct leapraid_cfg_req {
+ u8 action;
+ u8 sgl_flag;
+ u8 chain_offset;
+ u8 func;
+ __le16 ext_page_len;
+ u8 ext_page_type;
+ u8 msg_flag;
+ u8 r1[12];
+ struct leapraid_cfg_pg_header header;
+ __le32 page_addr;
+ union leapraid_sge_io_union page_buf_sge;
+};
+
+/**
+ * struct leapraid_cfg_rep - Configuration reply message
+ *
+ * @action: Action type from the request
+ * @r1: Reserved
+ * @msg_len: Message length in bytes
+ * @func: Function code
+ * @ext_page_len: Extended page length
+ * @ext_page_type: Extended page type
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Adapter status code
+ * @r3: Reserved
+ * @header: Configuration page header
+ */
+struct leapraid_cfg_rep {
+ u8 action;
+ u8 r1;
+ u8 msg_len;
+ u8 func;
+ __le16 ext_page_len;
+ u8 ext_page_type;
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+ u8 r3[4];
+ struct leapraid_cfg_pg_header header;
+};
+
+/**
+ * struct leapraid_boot_dev_format_sas_wwid - Boot device identified by wwid
+ *
+ * @sas_addr: SAS address of the device
+ * @lun: Logical unit number
+ * @r1: Reserved
+ */
+struct leapraid_boot_dev_format_sas_wwid {
+ __le64 sas_addr;
+ u8 lun[8];
+ u8 r1[8];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_boot_dev_format_enc_slot - identified by enclosure
+ *
+ * @enc_lid: Enclosure logical ID
+ * @r1: Reserved
+ * @slot_num: Slot number in the enclosure
+ * @r2: Reserved
+ */
+struct leapraid_boot_dev_format_enc_slot {
+ __le64 enc_lid;
+ u8 r1[8];
+ __le16 slot_num;
+ u8 r2[6];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_boot_dev_format_dev_name - Boot device by device name
+ *
+ * @dev_name: Device name identifier
+ * @lun: Logical unit number
+ * @r1: Reserved
+ */
+struct leapraid_boot_dev_format_dev_name {
+ __le64 dev_name;
+ u8 lun[8];
+ u8 r1[8];
+} __packed __aligned(4);
+
+/**
+ * union leapraid_boot_dev_format - Boot device format union
+ *
+ * @sas_wwid: Format using SAS WWID and LUN
+ * @enc_slot: Format using enclosure slot and ID
+ * @dev_name: Format using device name and LUN
+ */
+union leapraid_boot_dev_format {
+ struct leapraid_boot_dev_format_sas_wwid sas_wwid;
+ struct leapraid_boot_dev_format_enc_slot enc_slot;
+ struct leapraid_boot_dev_format_dev_name dev_name;
+};
+
+/**
+ * struct leapraid_bios_page2 - BIOS configuration page 2
+ *
+ * @header: Configuration page header
+ * @r1: Reserved
+ * @requested_boot_dev_form: Format type of the requested boot device
+ * @r2: Reserved
+ * @requested_boot_dev: Boot device requested by BIOS or user
+ * @requested_alt_boot_dev_form: Format of the alternate boot device
+ * @r3: Reserved
+ * @requested_alt_boot_dev: Alternate boot device requested
+ * @current_boot_dev_form: Format type of the active boot device
+ * @r4: Reserved
+ * @current_boot_dev: Currently active boot device in use
+ */
+struct leapraid_bios_page2 {
+ struct leapraid_cfg_pg_header header;
+ u8 r1[24];
+ u8 requested_boot_dev_form;
+ u8 r2[3];
+ union leapraid_boot_dev_format requested_boot_dev;
+ u8 requested_alt_boot_dev_form;
+ u8 r3[3];
+ union leapraid_boot_dev_format requested_alt_boot_dev;
+ u8 current_boot_dev_form;
+ u8 r4[3];
+ union leapraid_boot_dev_format current_boot_dev;
+};
+
+/**
+ * struct leapraid_bios_page3 - BIOS configuration page 3
+ *
+ * @header: Configuration page header
+ * @r1: Reserved
+ * @bios_version: BIOS firmware version number
+ * @r2: Reserved
+ */
+struct leapraid_bios_page3 {
+ struct leapraid_cfg_pg_header header;
+ u8 r1[4];
+ __le32 bios_version;
+ u8 r2[84];
+};
+
+/**
+ * struct leapraid_raidvol0_phys_disk - Physical disk in RAID volume
+ *
+ * @r1: Reserved
+ * @phys_disk_num: Physical disk number within the RAID volume
+ * @r2: Reserved
+ */
+struct leapraid_raidvol0_phys_disk {
+ u8 r1[2];
+ u8 phys_disk_num;
+ u8 r2;
+};
+
+/**
+ * struct leapraid_raidvol_p0 - RAID volume configuration page 0
+ *
+ * @header: Configuration page header
+ * @dev_hdl: Device handle for the RAID volume
+ * @volume_state: State of the RAID volume
+ * @volume_type: RAID type
+ * @r1: Reserved
+ * @num_phys_disks: Number of physical disks in the volume
+ * @r2: Reserved
+ * @phys_disk: Array of physical disks in this volume
+ */
+struct leapraid_raidvol_p0 {
+ struct leapraid_cfg_pg_header header;
+ __le16 dev_hdl;
+ u8 volume_state;
+ u8 volume_type;
+ u8 r1[28];
+ u8 num_phys_disks;
+ u8 r2[3];
+ struct leapraid_raidvol0_phys_disk phys_disk[];
+};
+
+/**
+ * struct leapraid_raidvol_p1 - RAID volume configuration page 1
+ *
+ * @header: Configuration page header
+ * @dev_hdl: Device handle of the RAID volume
+ * @r1: Reserved
+ * @wwid: World-wide identifier for the volume
+ * @r2: Reserved
+ */
+struct leapraid_raidvol_p1 {
+ struct leapraid_cfg_pg_header header;
+ __le16 dev_hdl;
+ u8 r1[42];
+ __le64 wwid;
+ u8 r2[8];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_raidpd_p0 - Physical disk configuration page 0
+ *
+ * @header: Configuration page header
+ * @dev_hdl: Device handle of the physical disk
+ * @r1: Reserved
+ * @phys_disk_num: Physical disk number
+ * @r2: Reserved
+ */
+struct leapraid_raidpd_p0 {
+ struct leapraid_cfg_pg_header header;
+ __le16 dev_hdl;
+ u8 r1;
+ u8 phys_disk_num;
+ u8 r2[112];
+};
+
+/**
+ * struct leapraid_sas_io_unit0_phy_info - PHY info for SAS I/O unit
+ *
+ * @port: Port number the PHY belongs to
+ * @port_flg: Flags describing port status
+ * @phy_flg: Flags describing PHY status
+ * @neg_link_rate: Negotiated link rate of the PHY
+ * @controller_phy_dev_info: Controller PHY device info
+ * @attached_dev_hdl: Handle of attached device
+ * @controller_dev_hdl: Handle of the controller device
+ * @r1: Reserved
+ */
+struct leapraid_sas_io_unit0_phy_info {
+ u8 port;
+ u8 port_flg;
+ u8 phy_flg;
+ u8 neg_link_rate;
+ __le32 controller_phy_dev_info;
+ __le16 attached_dev_hdl;
+ __le16 controller_dev_hdl;
+ u8 r1[8];
+};
+
+/**
+ * struct leapraid_sas_io_unit_p0 - SAS I/O unit configuration page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @phy_num: Number of PHYs in this unit
+ * @r2: Reserved
+ * @phy_info: Array of PHY information
+ */
+struct leapraid_sas_io_unit_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[4];
+ u8 phy_num;
+ u8 r2[3];
+ struct leapraid_sas_io_unit0_phy_info phy_info[];
+};
+
+/**
+ * struct leapraid_sas_io_unit1_phy_info - Placeholder for SAS unit page 1 PHY
+ *
+ * @r1: Reserved
+ */
+struct leapraid_sas_io_unit1_phy_info {
+ u8 r1[12];
+};
+
+/**
+ * struct leapraid_sas_io_unit_page1 - SAS I/O unit configuration page 1
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @narrowport_max_queue_depth: Maximum queue depth for narrow ports
+ * @r2: Reserved
+ * @wideport_max_queue_depth: Maximum queue depth for wide ports
+ * @r3: Reserved
+ * @sata_max_queue_depth: Maximum SATA queue depth
+ * @r4: Reserved
+ * @phy_info: Array of PHY info structures
+ */
+struct leapraid_sas_io_unit_page1 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[2];
+ __le16 narrowport_max_queue_depth;
+ u8 r2[2];
+ __le16 wideport_max_queue_depth;
+ u8 r3;
+ u8 sata_max_queue_depth;
+ u8 r4[2];
+ struct leapraid_sas_io_unit1_phy_info phy_info[];
+};
+
+/**
+ * struct leapraid_exp_p0 - SAS expander page 0
+ *
+ * @header: Extended page header
+ * @physical_port: Physical port number
+ * @r1: Reserved
+ * @enc_hdl: Enclosure handle
+ * @sas_address: SAS address of the expander
+ * @r2: Reserved
+ * @dev_hdl: Device handle of this expander
+ * @parent_dev_hdl: Device handle of parent expander
+ * @r3: Reserved
+ * @phy_num: Number of PHYs
+ * @r4: Reserved
+ */
+struct leapraid_exp_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 physical_port;
+ u8 r1;
+ __le16 enc_hdl;
+ __le64 sas_address;
+ u8 r2[4];
+ __le16 dev_hdl;
+ __le16 parent_dev_hdl;
+ u8 r3[4];
+ u8 phy_num;
+ u8 r4[27];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_exp_p1 - SAS expander page 1
+ *
+ * @header: Extended page header
+ * @r1: Reserved
+ * @p_link_rate: PHY link rate
+ * @hw_link_rate: Hardware supported link rate
+ * @attached_dev_hdl: Attached device handle
+ * @r2: Reserved
+ * @neg_link_rate: Negotiated link rate
+ * @r3: Reserved
+ */
+struct leapraid_exp_p1 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[8];
+ u8 p_link_rate;
+ u8 hw_link_rate;
+ __le16 attached_dev_hdl;
+ u8 r2[11];
+ u8 neg_link_rate;
+ u8 r3[12];
+};
+
+/**
+ * struct leapraid_sas_dev_p0 - SAS device page 0
+ *
+ * @header: Extended configuration page header
+ * @slot: Slot number
+ * @enc_hdl: Enclosure handle
+ * @sas_address: SAS address
+ * @parent_dev_hdl: Parent device handle
+ * @phy_num: Number of PHYs
+ * @r1: Reserved
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ * @dev_info: Device information
+ * @flg: Flags
+ * @physical_port: Physical port number
+ * @max_port_connections: Maximum port connections
+ * @dev_name: Device name
+ * @port_groups: Number of port groups
+ * @r3: Reserved
+ * @enc_level: Enclosure level
+ * @connector_name: Connector identifier
+ * @r4: Reserved
+ */
+struct leapraid_sas_dev_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ __le16 slot;
+ __le16 enc_hdl;
+ __le64 sas_address;
+ __le16 parent_dev_hdl;
+ u8 phy_num;
+ u8 r1;
+ __le16 dev_hdl;
+ u8 r2[2];
+ __le32 dev_info;
+ __le16 flg;
+ u8 physical_port;
+ u8 max_port_connections;
+ __le64 dev_name;
+ u8 port_groups;
+ u8 r3[2];
+ u8 enc_level;
+ u8 connector_name[4];
+ u8 r4[4];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_sas_phy_p0 - SAS PHY configuration page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @attached_dev_hdl: Handle of attached device
+ * @r2: Reserved
+ * @p_link_rate: PHY link rate
+ * @hw_link_rate: Hardware supported link rate
+ * @r3: Reserved
+ * @phy_info: PHY information
+ * @neg_link_rate: Negotiated link rate
+ * @r4: Reserved
+ */
+struct leapraid_sas_phy_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[4];
+ __le16 attached_dev_hdl;
+ u8 r2[6];
+ u8 p_link_rate;
+ u8 hw_link_rate;
+ u8 r3[2];
+ __le32 phy_info;
+ u8 neg_link_rate;
+ u8 r4[3];
+};
+
+/**
+ * struct leapraid_enc_p0 - SAS enclosure page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @enc_lid: Enclosure logical ID
+ * @r2: Reserved
+ * @enc_hdl: Enclosure handle
+ * @r3: Reserved
+ */
+struct leapraid_enc_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[4];
+ __le64 enc_lid;
+ u8 r2[2];
+ __le16 enc_hdl;
+ u8 r3[15];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_raid_cfg_p0_element - RAID configuration element
+ *
+ * @element_flg: Element flags
+ * @vol_dev_hdl: Volume device handle
+ * @r1: Reserved
+ * @phys_disk_dev_hdl: Physical disk device handle
+ */
+struct leapraid_raid_cfg_p0_element {
+ __le16 element_flg;
+ __le16 vol_dev_hdl;
+ u8 r1[2];
+ __le16 phys_disk_dev_hdl;
+};
+
+/**
+ * struct leapraid_raid_cfg_p0 - RAID configuration page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @cfg_num: Configuration number
+ * @r2: Reserved
+ * @elements_num: Number of RAID elements
+ * @r3: Reserved
+ * @cfg_element: Array of RAID elements
+ */
+struct leapraid_raid_cfg_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[3];
+ u8 cfg_num;
+ u8 r2[32];
+ u8 elements_num;
+ u8 r3[3];
+ struct leapraid_raid_cfg_p0_element cfg_element[];
+};
+
+/**
+ * union leapraid_mpi_scsi_io_cdb_union - SCSI I/O CDB or simple SGE
+ *
+ * @cdb32: 32-byte SCSI command descriptor block
+ * @sge: Simple SGE format
+ */
+union leapraid_mpi_scsi_io_cdb_union {
+ u8 cdb32[32];
+ struct leapraid_sge_simple_union sge;
+};
+
+/**
+ * struct leapraid_mpi_scsiio_req - MPI SCSI I/O request
+ *
+ * @dev_hdl: Device handle for the target
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flg: Message flags
+ * @r2: Reserved
+ * @sense_buffer_low_add: Lower 32-bit address of sense buffer
+ * @dma_flag: DMA flags
+ * @r3: Reserved
+ * @sense_buffer_len: Sense buffer length
+ * @r4: Reserved
+ * @sgl_offset0..3: SGL offsets
+ * @skip_count: Bytes to skip before transfer
+ * @data_len: Length of data transfer
+ * @bi_dir_data_len: Bi-directional transfer length
+ * @io_flg: I/O flags
+ * @eedp_flag: EEDP flags
+ * @eedp_block_size: EEDP block size
+ * @r5: Reserved
+ * @secondary_ref_tag: Secondary reference tag
+ * @secondary_app_tag: Secondary application tag
+ * @app_tag_trans_mask: Application tag mask
+ * @lun: Logical Unit Number
+ * @ctrl: Control flags
+ * @cdb: SCSI Command Descriptor Block or simple SGE
+ * @sgl: Scatter-gather list
+ */
+struct leapraid_mpi_scsiio_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flg;
+ u8 r2[4];
+ __le32 sense_buffer_low_add;
+ u8 dma_flag;
+ u8 r3;
+ u8 sense_buffer_len;
+ u8 r4;
+ u8 sgl_offset0;
+ u8 sgl_offset1;
+ u8 sgl_offset2;
+ u8 sgl_offset3;
+ __le32 skip_count;
+ __le32 data_len;
+ __le32 bi_dir_data_len;
+ __le16 io_flg;
+ __le16 eedp_flag;
+ __le16 eedp_block_size;
+ u8 r5[2];
+ __le32 secondary_ref_tag;
+ __le16 secondary_app_tag;
+ __le16 app_tag_trans_mask;
+ u8 lun[8];
+ __le32 ctrl;
+ union leapraid_mpi_scsi_io_cdb_union cdb;
+ union leapraid_sge_io_union sgl;
+};
+
+/**
+ * union leapraid_scsi_io_cdb_union - SCSI I/O CDB or IEEE simple SGE
+ *
+ * @cdb32: 32-byte SCSI CDB
+ * @sge: IEEE simple 64-bit SGE
+ */
+union leapraid_scsi_io_cdb_union {
+ u8 cdb32[32];
+ struct leapraid_ieee_sge_simple64 sge;
+};
+
+/**
+ * struct leapraid_scsiio_req - SCSI I/O request
+ *
+ * @dev_hdl: Device handle
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flg: Message flags
+ * @r2: Reserved
+ * @sense_buffer_low_add: Lower 32-bit address of sense buffer
+ * @dma_flag: DMA flag
+ * @r3: Reserved
+ * @sense_buffer_len: Sense buffer length
+ * @r4: Reserved
+ * @sgl_offset0-3: SGL offsets
+ * @skip_count: Bytes to skip before transfer
+ * @data_len: Length of data transfer
+ * @bi_dir_data_len: Bi-directional transfer length
+ * @io_flg: I/O flags
+ * @eedp_flag: EEDP flags
+ * @eedp_block_size: EEDP block size
+ * @r5: Reserved
+ * @secondary_ref_tag: Secondary reference tag
+ * @secondary_app_tag: Secondary application tag
+ * @app_tag_trans_mask: Application tag mask
+ * @lun: Logical Unit Number
+ * @ctrl: Control flags
+ * @cdb: SCSI Command Descriptor Block or simple SGE
+ * @sgl: Scatter-gather list
+ */
+struct leapraid_scsiio_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flg;
+ u8 r2[4];
+ __le32 sense_buffer_low_add;
+ u8 dma_flag;
+ u8 r3;
+ u8 sense_buffer_len;
+ u8 r4;
+ u8 sgl_offset0;
+ u8 sgl_offset1;
+ u8 sgl_offset2;
+ u8 sgl_offset3;
+ __le32 skip_count;
+ __le32 data_len;
+ __le32 bi_dir_data_len;
+ __le16 io_flg;
+ __le16 eedp_flag;
+ __le16 eedp_block_size;
+ u8 r5[2];
+ __le32 secondary_ref_tag;
+ __le16 secondary_app_tag;
+ __le16 app_tag_trans_mask;
+ u8 lun[8];
+ __le32 ctrl;
+ union leapraid_scsi_io_cdb_union cdb;
+ union leapraid_ieee_sge_io_union sgl;
+};
+
+/**
+ * struct leapraid_scsiio_rep - SCSI I/O response
+ *
+ * @dev_hdl: Device handle
+ * @msg_len: Length of response message
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flg: Message flags
+ * @r2: Reserved
+ * @scsi_status: SCSI status
+ * @scsi_state: SCSI state
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ * @transfer_count: Number of bytes transferred
+ * @sense_count: Number of sense bytes
+ * @resp_info: Additional response info
+ * @task_tag: Task identifier
+ * @scsi_status_qualifier: SCSI status qualifier
+ * @bi_dir_trans_count: Bi-directional transfer count
+ * @r4: Reserved
+ */
+struct leapraid_scsiio_rep {
+ __le16 dev_hdl;
+ u8 msg_len;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flg;
+ u8 r2[4];
+ u8 scsi_status;
+ u8 scsi_state;
+ __le16 adapter_status;
+ u8 r3[4];
+ __le32 transfer_count;
+ __le32 sense_count;
+ __le32 resp_info;
+ __le16 task_tag;
+ __le16 scsi_status_qualifier;
+ __le32 bi_dir_trans_count;
+ __le32 r4[3];
+};
+
+/**
+ * struct leapraid_scsi_tm_req - SCSI Task Management request
+ *
+ * @dev_hdl: Device handle
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r1: Reserved
+ * @task_type: Task management function type
+ * @r2: Reserved
+ * @msg_flg: Message flags
+ * @r3: Reserved
+ * @lun: Logical Unit Number
+ * @r4: Reserved
+ * @task_mid: Task identifier
+ * @r5: Reserved
+ */
+struct leapraid_scsi_tm_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 r1;
+ u8 task_type;
+ u8 r2;
+ u8 msg_flg;
+ u8 r3[4];
+ u8 lun[8];
+ u8 r4[28];
+ __le16 task_mid;
+ u8 r5[2];
+};
+
+/**
+ * struct leapraid_scsi_tm_rep - SCSI Task Management response
+ *
+ * @dev_hdl: Device handle
+ * @msg_len: Length of response message
+ * @func: Function code
+ * @resp_code: Response code
+ * @task_type: Task management type
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ * @termination_count: Count of terminated tasks
+ * @response_info: Additional response info
+ */
+struct leapraid_scsi_tm_rep {
+ __le16 dev_hdl;
+ u8 msg_len;
+ u8 func;
+ u8 resp_code;
+ u8 task_type;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+ u8 r3[4];
+ __le32 termination_count;
+ __le32 response_info;
+};
+
+/**
+ * struct leapraid_sep_req - SEP (SCSI Enclosure Processor) request
+ *
+ * @dev_hdl: Device handle
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @act: Action to perform
+ * @flg: Flags
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @slot_status: Slot status
+ * @r3: Reserved
+ * @slot: Slot number
+ * @enc_hdl: Enclosure handle
+ */
+struct leapraid_sep_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 act;
+ u8 flg;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[4];
+ __le32 slot_status;
+ u8 r3[12];
+ __le16 slot;
+ __le16 enc_hdl;
+};
+
+/**
+ * struct leapraid_sep_rep - SEP response
+ *
+ * @dev_hdl: Device handle
+ * @msg_len: Message length
+ * @func: Function code
+ * @act: Action performed
+ * @flg: Flags
+ * @msg_flag: Message flags
+ * @r1: Reserved
+ * @adapter_status: Adapter status
+ * @r2: Reserved
+ * @slot_status: Slot status
+ * @r3: Reserved
+ * @slot: Slot number
+ * @enc_hdl: Enclosure handle
+ */
+struct leapraid_sep_rep {
+ __le16 dev_hdl;
+ u8 msg_len;
+ u8 func;
+ u8 act;
+ u8 flg;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+ u8 r3[4];
+ __le32 slot_status;
+ u8 r4[4];
+ __le16 slot;
+ __le16 enc_hdl;
+};
+
+/**
+ * struct leapraid_adapter_init_req - Adapter initialization request
+ *
+ * @who_init: Initiator of the initialization
+ * @r1: Reserved
+ * @chain_offset: Chain offset
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flg: Message flags
+ * @r3: Reserved
+ * @msg_ver: Message version
+ * @header_ver: Header version
+ * @host_buf_addr: Host buffer address (non adapter-ref)
+ * @r4: Reserved
+ * @host_buf_size: Host buffer size (non adapter-ref)
+ * @host_msix_vectors: Number of host MSI-X vectors
+ * @r6: Reserved
+ * @req_frame_size: Request frame size
+ * @rep_desc_qd: Reply descriptor queue depth
+ * @rep_msg_qd: Reply message queue depth
+ * @sense_buffer_add_high: High 32-bit of sense buffer address
+ * @rep_msg_dma_high: High 32-bit of reply message DMA address
+ * @task_desc_base_addr: Base address of task descriptors
+ * @rep_desc_q_arr_addr: Address of reply descriptor queue array
+ * @rep_msg_addr_dma: Reply message DMA address
+ * @time_stamp: Timestamp
+ */
+struct leapraid_adapter_init_req {
+ u8 who_init;
+ u8 r1;
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flg;
+ __le32 driver_ver;
+ __le16 msg_ver;
+ __le16 header_ver;
+ __le32 host_buf_addr;
+ u8 r4[2];
+ u8 host_buf_size;
+ u8 host_msix_vectors;
+ u8 r6[2];
+ __le16 req_frame_size;
+ __le16 rep_desc_qd;
+ __le16 rep_msg_qd;
+ __le32 sense_buffer_add_high;
+ __le32 rep_msg_dma_high;
+ __le64 task_desc_base_addr;
+ __le64 rep_desc_q_arr_addr;
+ __le64 rep_msg_addr_dma;
+ __le64 time_stamp;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_rep_desc_q_arr - Reply descriptor queue array
+ *
+ * @rep_desc_base_addr: Base address of the reply descriptors
+ * @r1: Reserved
+ */
+struct leapraid_rep_desc_q_arr {
+ __le64 rep_desc_base_addr;
+ __le64 r1;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_adapter_init_rep - Adapter initialization reply
+ *
+ * @who_init: Initiator of the initialization
+ * @r1: Reserved
+ * @msg_len: Length of reply message
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @adapter_status: Adapter status
+ * @r4: Reserved
+ */
+struct leapraid_adapter_init_rep {
+ u8 who_init;
+ u8 r1;
+ u8 msg_len;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[6];
+ __le16 adapter_status;
+ u8 r4[4];
+};
+
+/**
+ * struct leapraid_adapter_log_req - Adapter log request
+ *
+ * @action: Action code
+ * @type: Log type
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * r1: Reserved
+ * @msg_flag: Message flags
+ * r2: Reserved
+ * @mbox: Mailbox for command-specific parameters
+ * @sge: Scatter-gather entry for data buffer
+ */
+struct leapraid_adapter_log_req {
+ u8 action;
+ u8 type;
+ u8 chain_offset;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flag;
+ u8 r2[4];
+ union {
+ u8 b[12];
+ __le16 s[6];
+ __le32 w[3];
+ } mbox;
+ struct leapraid_sge_simple64 sge;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_adapter_log_rep - Adapter log reply
+ *
+ * @action: Action code echoed
+ * @type: Log type echoed
+ * @msg_len: Length of message
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Status returned by adapter
+ */
+struct leapraid_adapter_log_rep {
+ u8 action;
+ u8 type;
+ u8 msg_len;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+};
+
+/**
+ * struct leapraid_adapter_features_req - Request adapter features
+ *
+ * @r1: Reserved
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ */
+struct leapraid_adapter_features_req {
+ u8 r1[2];
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[4];
+};
+
+/**
+ * struct leapraid_adapter_features_rep - Adapter features reply
+ *
+ * @msg_ver: Message version
+ * @msg_len: Length of reply message
+ * @func: Function code
+ * @header_ver: Header version
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ * @who_init: Who initialized the adapter
+ * @r4: Reserved
+ * @max_msix_vectors: Max MSI-X vectors supported
+ * @req_slot: Number of request slots
+ * @r5: Reserved
+ * @adapter_caps: Adapter capabilities
+ * @fw_version: Firmware version
+ * @sas_wide_max_qdepth: Max wide SAS queue depth
+ * @sas_narrow_max_qdepth: Max narrow SAS queue depth
+ * @r6: Reserved
+ * @hp_slot: Number of high-priority slots
+ * @r7: Reserved
+ * @max_volumes: Maximum supported volumes
+ * @max_dev_hdl: Maximum device handle
+ * @r8: Reserved
+ * @min_dev_hdl: Minimum device handle
+ * @r9: Reserved
+ */
+struct leapraid_adapter_features_rep {
+ u16 msg_ver;
+ u8 msg_len;
+ u8 func;
+ u16 header_ver;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[6];
+ u16 adapter_status;
+ u8 r3[4];
+ u8 sata_max_qdepth;
+ u8 who_init;
+ u8 r4;
+ u8 max_msix_vectors;
+ __le16 req_slot;
+ u8 r5[2];
+ __le32 adapter_caps;
+ __le32 fw_version;
+ __le16 sas_wide_max_qdepth;
+ __le16 sas_narrow_max_qdepth;
+ u8 r6[10];
+ __le16 hp_slot;
+ u8 r7[3];
+ u8 max_volumes;
+ __le16 max_dev_hdl;
+ u8 r8[2];
+ __le16 min_dev_hdl;
+ u8 r9[6];
+};
+
+/**
+ * struct leapraid_scan_dev_req - Request to scan devices
+ *
+ * @r1: Reserved
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ */
+struct leapraid_scan_dev_req {
+ u8 r1[2];
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[4];
+};
+
+/**
+ * struct leapraid_scan_dev_rep - Scan devices reply
+ *
+ * @r1: Reserved
+ * @msg_len: Length of message
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @adapter_status: Adapter status
+ * @r4: Reserved
+ */
+struct leapraid_scan_dev_rep {
+ u8 r1[2];
+ u8 msg_len;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[6];
+ __le16 adapter_status;
+ u8 r4[4];
+};
+
+/**
+ * struct leapraid_evt_notify_req - Event notification request
+ *
+ * @r1: Reserved
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @evt_masks: Event masks to enable notifications
+ * @r4: Reserved
+ */
+struct leapraid_evt_notify_req {
+ u8 r1[2];
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[12];
+ __le32 evt_masks[4];
+ u8 r4[8];
+};
+
+/**
+ * struct leapraid_evt_notify_rep - Event notification reply
+ *
+ * @evt_data_len: Length of event data
+ * @msg_len: Length of message
+ * @func: Function code
+ * @r1: Reserved
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @adapter_status: Adapter status
+ * @r4: Reserved
+ * @evt: Event code
+ * @r5: Reserved
+ * @evt_data: Event data array
+ */
+struct leapraid_evt_notify_rep {
+ __le16 evt_data_len;
+ u8 msg_len;
+ u8 func;
+ u8 r1[2];
+ u8 r2;
+ u8 msg_flag;
+ u8 r3[6];
+ __le16 adapter_status;
+ u8 r4[4];
+ __le16 evt;
+ u8 r5[6];
+ __le32 evt_data[];
+};
+
+/**
+ * struct leapraid_evt_data_sas_dev_status_change - SAS device status change
+ *
+ * @task_tag: Task identifier
+ * @reason_code: Reason for status change
+ * @physical_port: Physical port number
+ * @r1: Reserved
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ * @sas_address: SAS address of device
+ * @lun: Logical Unit Number
+ */
+struct leapraid_evt_data_sas_dev_status_change {
+ __le16 task_tag;
+ u8 reason_code;
+ u8 physical_port;
+ u8 r1[2];
+ __le16 dev_hdl;
+ u8 r2[4];
+ __le64 sas_address;
+ u8 lun[8];
+} __packed __aligned(4);
+/**
+ * struct leapraid_evt_data_ir_change - IR (Integrated RAID) change event data
+ *
+ * @r1: Reserved
+ * @reason_code: Reason for IR change
+ * @r2: Reserved
+ * @vol_dev_hdl: Volume device handle
+ * @phys_disk_dev_hdl: Physical disk device handle
+ */
+struct leapraid_evt_data_ir_change {
+ u8 r1;
+ u8 reason_code;
+ u8 r2[2];
+ __le16 vol_dev_hdl;
+ __le16 phys_disk_dev_hdl;
+};
+
+/**
+ * struct leapraid_evt_data_sas_disc - SAS discovery event data
+ *
+ * @r1: Reserved
+ * @reason_code: Reason for discovery event
+ * @physical_port: Physical port number where event occurred
+ * @r2: Reserved
+ */
+struct leapraid_evt_data_sas_disc {
+ u8 r1;
+ u8 reason_code;
+ u8 physical_port;
+ u8 r2[5];
+};
+
+/**
+ * struct leapraid_evt_sas_topo_phy_entry - SAS topology PHY entry
+ *
+ * @attached_dev_hdl: Device handle attached to PHY
+ * @link_rate: Current link rate
+ * @phy_status: PHY status flags
+ */
+struct leapraid_evt_sas_topo_phy_entry {
+ __le16 attached_dev_hdl;
+ u8 link_rate;
+ u8 phy_status;
+};
+
+/**
+ * struct leapraid_evt_data_sas_topo_change_list - SAS topology change list
+ *
+ * @encl_hdl: Enclosure handle
+ * @exp_dev_hdl: Expander device handle
+ * @num_phys: Number of PHYs in this entry
+ * @r1: Reserved
+ * @entry_num: Entry index
+ * @start_phy_num: Start PHY number
+ * @exp_status: Expander status
+ * @physical_port: Physical port number
+ * @phy: Array of SAS PHY entries
+ */
+struct leapraid_evt_data_sas_topo_change_list {
+ __le16 encl_hdl;
+ __le16 exp_dev_hdl;
+ u8 num_phys;
+ u8 r1[3];
+ u8 entry_num;
+ u8 start_phy_num;
+ u8 exp_status;
+ u8 physical_port;
+ struct leapraid_evt_sas_topo_phy_entry phy[];
+};
+
+/**
+ * struct leapraid_evt_data_sas_enc_dev_status_change - SAS enclosure device status
+ *
+ * @enc_hdl: Enclosure handle
+ * @reason_code: Reason code for status change
+ * @physical_port: Physical port number
+ * @encl_logical_id: Enclosure logical ID
+ * @num_slots: Number of slots in enclosure
+ * @start_slot: First affected slot
+ * @phy_bits: Bitmap of affected PHYs
+ */
+struct leapraid_evt_data_sas_enc_dev_status_change {
+ __le16 enc_hdl;
+ u8 reason_code;
+ u8 physical_port;
+ __le64 encl_logical_id;
+ __le16 num_slots;
+ __le16 start_slot;
+ __le32 phy_bits;
+};
+
+/**
+ * struct leapraid_io_unit_ctrl_req - IO unit control request
+ *
+ * @op: Operation code
+ * @r1: Reserved
+ * @chain_offset: SGE chain offset
+ * @func: Function code
+ * @dev_hdl: Device handle
+ * @adapter_para: Adapter parameter selector
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @phy_num: PHY number
+ * @r3: Reserved
+ * @adapter_para_value: Value for adapter parameter
+ * @adapter_para_value2: Optional second parameter value
+ * @r4: Reserved
+ */
+struct leapraid_io_unit_ctrl_req {
+ u8 op;
+ u8 r1;
+ u8 chain_offset;
+ u8 func;
+ u16 dev_hdl;
+ u8 adapter_para;
+ u8 msg_flag;
+ u8 r2[6];
+ u8 phy_num;
+ u8 r3[17];
+ __le32 adapter_para_value;
+ __le32 adapter_para_value2;
+ u8 r4[4];
+};
+
+/**
+ * struct leapraid_io_unit_ctrl_rep - IO unit control reply
+ *
+ * @op: Operation code echoed
+ * @r1: Reserved
+ * @func: Function code
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ */
+struct leapraid_io_unit_ctrl_rep {
+ u8 op;
+ u8 r1[2];
+ u8 func;
+ __le16 dev_hdl;
+ u8 r2[14];
+};
+
+/**
+ * struct leapraid_raid_act_req - RAID action request
+ *
+ * @act: RAID action code
+ * @r1: Reserved
+ * @func: Function code
+ * @r2: Reserved
+ * @phys_disk_num: Number of physical disks involved
+ * @r3: Reserved
+ * @action_data_sge: SGE describing action-specific data
+ */
+struct leapraid_raid_act_req {
+ u8 act;
+ u8 r1[2];
+ u8 func;
+ u8 r2[2];
+ u8 phys_disk_num;
+ u8 r3[13];
+ struct leapraid_sge_simple_union action_data_sge;
+};
+
+/**
+ * struct leapraid_raid_act_rep - RAID action reply
+ *
+ * @act: RAID action code echoed
+ * @r1: Reserved
+ * @func: Function code
+ * @vol_dev_hdl: Volume device handle
+ * @r2: Reserved
+ * @adapter_status: Status returned by adapter
+ * @r3: Reserved
+ */
+struct leapraid_raid_act_rep {
+ u8 act;
+ u8 r1[2];
+ u8 func;
+ __le16 vol_dev_hdl;
+ u8 r2[8];
+ __le16 adapter_status;
+ u8 r3[76];
+};
+
+/**
+ * struct leapraid_smp_passthrough_req - SMP passthrough request
+ *
+ * @passthrough_flg: Passthrough flags
+ * @physical_port: Target PHY port
+ * @r1: Reserved
+ * @func: Function code
+ * @req_data_len: Request data length
+ * @r2: Reserved
+ * @sas_address: SAS address of target device
+ * @r3: Reserved
+ * @sgl: Scatter-gather list describing request buffer
+ */
+struct leapraid_smp_passthrough_req {
+ u8 passthrough_flg;
+ u8 physical_port;
+ u8 r1;
+ u8 func;
+ __le16 req_data_len;
+ u8 r2[10];
+ __le64 sas_address;
+ u8 r3[8];
+ union leapraid_simple_sge_union sgl;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_smp_passthrough_rep - SMP passthrough reply
+ *
+ * @passthrough_flg: Passthrough flags echoed
+ * @physical_port: Target PHY port
+ * @r1: Reserved
+ * @func: Function code
+ * @resp_data_len: Length of response data
+ * @r2: Reserved
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ */
+struct leapraid_smp_passthrough_rep {
+ u8 passthrough_flg;
+ u8 physical_port;
+ u8 r1;
+ u8 func;
+ __le16 resp_data_len;
+ u8 r2[8];
+ __le16 adapter_status;
+ u8 r3[12];
+};
+
+/**
+ * struct leapraid_sas_io_unit_ctrl_req - SAS IO unit control request
+ *
+ * @op: Operation code
+ * @r1: Reserved
+ * @func: Function code
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ */
+struct leapraid_sas_io_unit_ctrl_req {
+ u8 op;
+ u8 r1[2];
+ u8 func;
+ __le16 dev_hdl;
+ u8 r2[38];
+};
+
+#endif /* LEAPRAID_H */
diff --git a/drivers/scsi/leapraid/leapraid_app.c b/drivers/scsi/leapraid/leapraid_app.c
new file mode 100644
index 000000000000..f838bd5aa20e
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_app.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+
+#include "leapraid_func.h"
+
+/* ioctl device file */
+#define LEAPRAID_DEV_NAME "leapraid_ctl"
+
+/* ioctl version */
+#define LEAPRAID_IOCTL_VERSION 0x07
+
+/* ioctl command */
+#define LEAPRAID_ADAPTER_INFO 17
+#define LEAPRAID_COMMAND 20
+#define LEAPRAID_EVENTQUERY 21
+#define LEAPRAID_EVENTREPORT 23
+
+/**
+ * struct leapraid_ioctl_header - IOCTL command header
+ * @adapter_id : Adapter identifier
+ * @port_number: Port identifier
+ * @max_data_size: Maximum data size for transfer
+ */
+struct leapraid_ioctl_header {
+ u32 adapter_id;
+ u32 port_number;
+ u32 max_data_size;
+};
+
+/**
+ * struct leapraid_ioctl_diag_reset - Diagnostic reset request
+ * @hdr: Common IOCTL header
+ */
+struct leapraid_ioctl_diag_reset {
+ struct leapraid_ioctl_header hdr;
+};
+
+/**
+ * struct leapraid_ioctl_pci_info - PCI device information
+ * @u: Union holding PCI bus/device/function information
+ * @u.bits.dev: PCI device number
+ * @u.bits.func: PCI function number
+ * @u.bits.bus: PCI bus number
+ * @u.word: Combined representation of PCI BDF
+ * @seg_id: PCI segment identifier
+ */
+struct leapraid_ioctl_pci_info {
+ union {
+ struct {
+ u32 dev:5;
+ u32 func:3;
+ u32 bus:24;
+ } bits;
+ u32 word;
+ } u;
+ u32 seg_id;
+};
+
+/**
+ * struct leapraid_ioctl_adapter_info - Adapter information for IOCTL
+ * @hdr: IOCTL header
+ * @adapter_type: Adapter type identifier
+ * @port_number: Port number
+ * @pci_id: PCI device ID
+ * @revision: Revision number
+ * @sub_dev: Subsystem device ID
+ * @sub_vendor: Subsystem vendor ID
+ * @r0: Reserved
+ * @fw_ver: Firmware version
+ * @bios_ver: BIOS version
+ * @driver_ver: Driver version
+ * @r1: Reserved
+ * @scsi_id: SCSI ID
+ * @r2: Reserved
+ * @pci_info: PCI information structure
+ */
+struct leapraid_ioctl_adapter_info {
+ struct leapraid_ioctl_header hdr;
+ u32 adapter_type;
+ u32 port_number;
+ u32 pci_id;
+ u32 revision;
+ u32 sub_dev;
+ u32 sub_vendor;
+ u32 r0;
+ u32 fw_ver;
+ u32 bios_ver;
+ u8 driver_ver[32];
+ u8 r1;
+ u8 scsi_id;
+ u16 r2;
+ struct leapraid_ioctl_pci_info pci_info;
+};
+
+/**
+ * struct leapraid_ioctl_command - IOCTL command structure
+ * @hdr: IOCTL header
+ * @timeout: Command timeout
+ * @rep_msg_buf_ptr: User pointer to reply message buffer
+ * @c2h_buf_ptr: User pointer to card-to-host data buffer
+ * @h2c_buf_ptr: User pointer to host-to-card data buffer
+ * @sense_data_ptr: User pointer to sense data buffer
+ * @max_rep_bytes: Maximum reply bytes
+ * @c2h_size: Card-to-host data size
+ * @h2c_size: Host-to-card data size
+ * @max_sense_bytes: Maximum sense data bytes
+ * @data_sge_offset: Data SGE offset
+ * @mf: Message frame data (flexible array)
+ */
+struct leapraid_ioctl_command {
+ struct leapraid_ioctl_header hdr;
+ u32 timeout;
+ void __user *rep_msg_buf_ptr;
+ void __user *c2h_buf_ptr;
+ void __user *h2c_buf_ptr;
+ void __user *sense_data_ptr;
+ u32 max_rep_bytes;
+ u32 c2h_size;
+ u32 h2c_size;
+ u32 max_sense_bytes;
+ u32 data_sge_offset;
+ u8 mf[];
+};
+
+static struct leapraid_adapter *leapraid_ctl_lookup_adapter(int adapter_id)
+{
+ struct leapraid_adapter *adapter;
+
+ spin_lock(&leapraid_adapter_lock);
+ list_for_each_entry(adapter, &leapraid_adapter_list, list) {
+ if (adapter->adapter_attr.id == adapter_id) {
+ spin_unlock(&leapraid_adapter_lock);
+ return adapter;
+ }
+ }
+ spin_unlock(&leapraid_adapter_lock);
+
+ return NULL;
+}
+
+static void leapraid_cli_scsiio_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_req *ctl_sp_mpi_req, u16 taskid,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size,
+ u16 dev_hdl, void *psge)
+{
+ struct leapraid_mpi_scsiio_req *scsiio_request =
+ (struct leapraid_mpi_scsiio_req *)ctl_sp_mpi_req;
+
+ scsiio_request->sense_buffer_len = SCSI_SENSE_BUFFERSIZE;
+ scsiio_request->sense_buffer_low_add =
+ leapraid_get_sense_buffer_dma(adapter, taskid);
+ memset((void *)(&adapter->driver_cmds.ctl_cmd.sense),
+ 0, SCSI_SENSE_BUFFERSIZE);
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+ if (scsiio_request->func == LEAPRAID_FUNC_SCSIIO_REQ)
+ leapraid_fire_scsi_io(adapter, taskid, dev_hdl);
+ else
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_smp_passthrough_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_req *ctl_sp_mpi_req,
+ u16 taskid,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, void *h2c)
+{
+ struct leapraid_smp_passthrough_req *smp_pt_req =
+ (struct leapraid_smp_passthrough_req *)ctl_sp_mpi_req;
+ u8 *data;
+
+ if (!adapter->adapter_attr.enable_mp)
+ smp_pt_req->physical_port = LEAPRAID_DISABLE_MP_PORT_ID;
+ if (smp_pt_req->passthrough_flg & LEAPRAID_SMP_PT_FLAG_SGL_PTR)
+ data = (u8 *)&smp_pt_req->sgl;
+ else
+ data = h2c;
+
+ if (data[1] == LEAPRAID_SMP_FN_REPORT_PHY_ERR_LOG &&
+ (data[10] == 1 || data[10] == 2))
+ adapter->reset_desc.adapter_link_resetting = true;
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_fire_ieee_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size);
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_sata_passthrough_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_ctl_fire_ieee_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+}
+
+static void leapraid_ctl_load_fw_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_ctl_fire_ieee_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+}
+
+static void leapraid_ctl_fire_mpi_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_build_mpi_sg(adapter, psge, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_sas_io_unit_ctrl_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_req *ctl_sp_mpi_req,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ struct leapraid_sas_io_unit_ctrl_req *sas_io_unit_ctrl_req =
+ (struct leapraid_sas_io_unit_ctrl_req *)ctl_sp_mpi_req;
+
+ if (sas_io_unit_ctrl_req->op == LEAPRAID_SAS_OP_PHY_HARD_RESET ||
+ sas_io_unit_ctrl_req->op == LEAPRAID_SAS_OP_PHY_LINK_RESET)
+ adapter->reset_desc.adapter_link_resetting = true;
+ leapraid_ctl_fire_mpi_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+}
+
+static long leapraid_ctl_do_command(struct leapraid_adapter *adapter,
+ struct leapraid_ioctl_command *karg,
+ void __user *mf)
+{
+ struct leapraid_req *leap_mpi_req = NULL;
+ struct leapraid_req *ctl_sp_mpi_req = NULL;
+ u16 taskid;
+ void *h2c = NULL;
+ size_t h2c_size = 0;
+ dma_addr_t h2c_dma_addr = 0;
+ void *c2h = NULL;
+ size_t c2h_size = 0;
+ dma_addr_t c2h_dma_addr = 0;
+ void *psge;
+ unsigned long timeout;
+ u16 dev_hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ bool issue_reset = false;
+ u32 sz;
+ long rc = 0;
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ leap_mpi_req = kzalloc(LEAPRAID_REQUEST_SIZE, GFP_KERNEL);
+ if (!leap_mpi_req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (karg->data_sge_offset * LEAPRAID_SGE_OFFSET_SIZE > LEAPRAID_REQUEST_SIZE ||
+ karg->data_sge_offset > ((UINT_MAX) / LEAPRAID_SGE_OFFSET_SIZE)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_user(leap_mpi_req, mf,
+ karg->data_sge_offset * LEAPRAID_SGE_OFFSET_SIZE)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ taskid = adapter->driver_cmds.ctl_cmd.taskid;
+
+ adapter->driver_cmds.ctl_cmd.status = LEAPRAID_CMD_PENDING;
+ memset((void *)(&adapter->driver_cmds.ctl_cmd.reply), 0,
+ LEAPRAID_REPLY_SIEZ);
+ ctl_sp_mpi_req = leapraid_get_task_desc(adapter, taskid);
+ memset(ctl_sp_mpi_req, 0, LEAPRAID_REQUEST_SIZE);
+ memcpy(ctl_sp_mpi_req,
+ leap_mpi_req,
+ karg->data_sge_offset * LEAPRAID_SGE_OFFSET_SIZE);
+
+ if (ctl_sp_mpi_req->func == LEAPRAID_FUNC_SCSIIO_REQ ||
+ ctl_sp_mpi_req->func == LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH ||
+ ctl_sp_mpi_req->func == LEAPRAID_FUNC_SATA_PASSTHROUGH) {
+ dev_hdl = le16_to_cpu(ctl_sp_mpi_req->func_dep1);
+ if (!dev_hdl || dev_hdl > adapter->adapter_attr.features.max_dev_handle) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (WARN_ON(ctl_sp_mpi_req->func == LEAPRAID_FUNC_SCSI_TMF))
+ return -EINVAL;
+
+ h2c_size = karg->h2c_size;
+ c2h_size = karg->c2h_size;
+ if (h2c_size) {
+ h2c = dma_alloc_coherent(&adapter->pdev->dev, h2c_size,
+ &h2c_dma_addr, GFP_ATOMIC);
+ if (!h2c) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (copy_from_user(h2c, karg->h2c_buf_ptr, h2c_size)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ }
+ if (c2h_size) {
+ c2h = dma_alloc_coherent(&adapter->pdev->dev,
+ c2h_size, &c2h_dma_addr, GFP_ATOMIC);
+ if (!c2h) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ psge = (void *)ctl_sp_mpi_req + (karg->data_sge_offset *
+ LEAPRAID_SGE_OFFSET_SIZE);
+ init_completion(&adapter->driver_cmds.ctl_cmd.done);
+
+ switch (ctl_sp_mpi_req->func) {
+ case LEAPRAID_FUNC_SCSIIO_REQ:
+ case LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH:
+ if (test_bit(dev_hdl, (unsigned long *)adapter->dev_topo.dev_removing)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ leapraid_cli_scsiio_cmd(adapter, ctl_sp_mpi_req, taskid,
+ h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size,
+ dev_hdl, psge);
+ break;
+ case LEAPRAID_FUNC_SMP_PASSTHROUGH:
+ if (!h2c) {
+ rc = -EINVAL;
+ goto out;
+ }
+ leapraid_ctl_smp_passthrough_cmd(adapter,
+ ctl_sp_mpi_req, taskid,
+ h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size,
+ psge, h2c);
+ break;
+ case LEAPRAID_FUNC_SATA_PASSTHROUGH:
+ if (test_bit(dev_hdl, (unsigned long *)adapter->dev_topo.dev_removing)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ leapraid_ctl_sata_passthrough_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+ break;
+ case LEAPRAID_FUNC_FW_DOWNLOAD:
+ case LEAPRAID_FUNC_FW_UPLOAD:
+ leapraid_ctl_load_fw_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+ break;
+ case LEAPRAID_FUNC_SAS_IO_UNIT_CTRL:
+ leapraid_ctl_sas_io_unit_ctrl_cmd(adapter, ctl_sp_mpi_req,
+ h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size,
+ psge, taskid);
+ break;
+ default:
+ leapraid_ctl_fire_mpi_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+ break;
+ }
+
+ timeout = karg->timeout;
+ if (timeout < LEAPRAID_CTL_CMD_TIMEOUT)
+ timeout = LEAPRAID_CTL_CMD_TIMEOUT;
+ wait_for_completion_timeout(&adapter->driver_cmds.ctl_cmd.done,
+ timeout * HZ);
+
+ if ((leap_mpi_req->func == LEAPRAID_FUNC_SMP_PASSTHROUGH ||
+ leap_mpi_req->func == LEAPRAID_FUNC_SAS_IO_UNIT_CTRL) &&
+ adapter->reset_desc.adapter_link_resetting) {
+ adapter->reset_desc.adapter_link_resetting = false;
+ }
+ if (!(adapter->driver_cmds.ctl_cmd.status & LEAPRAID_CMD_DONE)) {
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.ctl_cmd.status);
+ goto reset;
+ }
+
+ if (c2h_size) {
+ if (copy_to_user(karg->c2h_buf_ptr, c2h, c2h_size)) {
+ rc = -ENODATA;
+ goto out;
+ }
+ }
+ if (karg->max_rep_bytes) {
+ sz = min_t(u32, karg->max_rep_bytes, LEAPRAID_REPLY_SIEZ);
+ if (copy_to_user(karg->rep_msg_buf_ptr,
+ (void *)&adapter->driver_cmds.ctl_cmd.reply,
+ sz)) {
+ rc = -ENODATA;
+ goto out;
+ }
+ }
+
+ if (karg->max_sense_bytes &&
+ (leap_mpi_req->func == LEAPRAID_FUNC_SCSIIO_REQ ||
+ leap_mpi_req->func == LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH)) {
+ if (!karg->sense_data_ptr)
+ goto out;
+
+ sz = min_t(u32, karg->max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ if (copy_to_user(karg->sense_data_ptr,
+ (void *)&adapter->driver_cmds.ctl_cmd.sense,
+ sz)) {
+ rc = -ENODATA;
+ goto out;
+ }
+ }
+reset:
+ if (issue_reset) {
+ rc = -ENODATA;
+ if (leap_mpi_req->func == LEAPRAID_FUNC_SCSIIO_REQ ||
+ leap_mpi_req->func == LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH ||
+ leap_mpi_req->func == LEAPRAID_FUNC_SATA_PASSTHROUGH) {
+ dev_err(&adapter->pdev->dev,
+ "fire tgt reset: hdl=0x%04x\n",
+ le16_to_cpu(leap_mpi_req->func_dep1));
+ leapraid_issue_locked_tm(adapter,
+ le16_to_cpu(leap_mpi_req->func_dep1), 0, 0, 0,
+ LEAPRAID_TM_TASKTYPE_TARGET_RESET, taskid,
+ LEAPRAID_TM_MSGFLAGS_LINK_RESET);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+ }
+out:
+ if (c2h)
+ dma_free_coherent(&adapter->pdev->dev, c2h_size,
+ c2h, c2h_dma_addr);
+ if (h2c)
+ dma_free_coherent(&adapter->pdev->dev, h2c_size,
+ h2c, h2c_dma_addr);
+ kfree(leap_mpi_req);
+ adapter->driver_cmds.ctl_cmd.status = LEAPRAID_CMD_NOT_USED;
+ return rc;
+}
+
+static long leapraid_ctl_get_adapter_info(struct leapraid_adapter *adapter,
+ void __user *arg)
+{
+ struct leapraid_ioctl_adapter_info *karg;
+ ssize_t __maybe_unused ret;
+ u8 revision;
+
+ karg = kzalloc(sizeof(*karg), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+
+ pci_read_config_byte(adapter->pdev, PCI_CLASS_REVISION, &revision);
+ karg->revision = revision;
+ karg->pci_id = adapter->pdev->device;
+ karg->sub_dev = adapter->pdev->subsystem_device;
+ karg->sub_vendor = adapter->pdev->subsystem_vendor;
+ karg->pci_info.u.bits.bus = adapter->pdev->bus->number;
+ karg->pci_info.u.bits.dev = PCI_SLOT(adapter->pdev->devfn);
+ karg->pci_info.u.bits.func = PCI_FUNC(adapter->pdev->devfn);
+ karg->pci_info.seg_id = pci_domain_nr(adapter->pdev->bus);
+ karg->fw_ver = adapter->adapter_attr.features.fw_version;
+ ret = strscpy(karg->driver_ver, LEAPRAID_DRIVER_NAME,
+ sizeof(karg->driver_ver));
+ strcat(karg->driver_ver, "-");
+ strcat(karg->driver_ver, LEAPRAID_DRIVER_VERSION);
+ karg->adapter_type = LEAPRAID_IOCTL_VERSION;
+ karg->bios_ver = adapter->adapter_attr.bios_version;
+ if (copy_to_user(arg, karg,
+ sizeof(struct leapraid_ioctl_adapter_info))) {
+ kfree(karg);
+ return -EFAULT;
+ }
+
+ kfree(karg);
+ return 0;
+}
+
+static long leapraid_ctl_ioctl_main(struct file *file, unsigned int cmd,
+ void __user *arg, u8 compat)
+{
+ struct leapraid_ioctl_header ioctl_header;
+ struct leapraid_adapter *adapter;
+ long rc = -ENOIOCTLCMD;
+ int count;
+
+ if (copy_from_user(&ioctl_header, (char __user *)arg,
+ sizeof(struct leapraid_ioctl_header)))
+ return -EFAULT;
+
+ adapter = leapraid_ctl_lookup_adapter(ioctl_header.adapter_id);
+ if (!adapter)
+ return -EFAULT;
+
+ mutex_lock(&adapter->access_ctrl.pci_access_lock);
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ count = LEAPRAID_WAIT_SHOST_RECOVERY;
+ while (count--) {
+ if (!adapter->access_ctrl.shost_recovering)
+ break;
+ ssleep(1);
+ }
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering ||
+ adapter->scan_dev_desc.driver_loading ||
+ adapter->access_ctrl.host_removing) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (!mutex_trylock(&adapter->driver_cmds.ctl_cmd.mutex)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ } else if (mutex_lock_interruptible(&adapter->driver_cmds.ctl_cmd.mutex)) {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
+ switch (_IOC_NR(cmd)) {
+ case LEAPRAID_ADAPTER_INFO:
+ if (_IOC_SIZE(cmd) == sizeof(struct leapraid_ioctl_adapter_info))
+ rc = leapraid_ctl_get_adapter_info(adapter, arg);
+ break;
+ case LEAPRAID_COMMAND:
+ {
+ struct leapraid_ioctl_command __user *uarg;
+ struct leapraid_ioctl_command karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (karg.hdr.adapter_id != ioctl_header.adapter_id) {
+ rc = -EINVAL;
+ break;
+ }
+
+ if (_IOC_SIZE(cmd) == sizeof(struct leapraid_ioctl_command)) {
+ uarg = arg;
+ rc = leapraid_ctl_do_command(adapter, &karg,
+ &uarg->mf);
+ }
+ break;
+ }
+ case LEAPRAID_EVENTQUERY:
+ case LEAPRAID_EVENTREPORT:
+ rc = 0;
+ break;
+ default:
+ pr_err("unknown ioctl opcode=0x%08x\n", cmd);
+ break;
+ }
+ mutex_unlock(&adapter->driver_cmds.ctl_cmd.mutex);
+
+out:
+ mutex_unlock(&adapter->access_ctrl.pci_access_lock);
+ return rc;
+}
+
+static long leapraid_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return leapraid_ctl_ioctl_main(file, cmd,
+ (void __user *)arg, 0);
+}
+
+static int leapraid_fw_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct leapraid_adapter *adapter;
+ unsigned long length;
+ unsigned long pfn;
+
+ length = vma->vm_end - vma->vm_start;
+
+ adapter = list_first_entry(&leapraid_adapter_list,
+ struct leapraid_adapter, list);
+
+ if (length > (LEAPRAID_SYS_LOG_BUF_SIZE +
+ LEAPRAID_SYS_LOG_BUF_RESERVE)) {
+ dev_err(&adapter->pdev->dev,
+ "requested mapping size is too large!\n");
+ return -EINVAL;
+ }
+
+ if (!adapter->fw_log_desc.fw_log_buffer) {
+ dev_err(&adapter->pdev->dev, "no log buffer!\n");
+ return -EINVAL;
+ }
+
+ pfn = virt_to_phys(adapter->fw_log_desc.fw_log_buffer) >> PAGE_SHIFT;
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, length,
+ vma->vm_page_prot)) {
+ dev_err(&adapter->pdev->dev,
+ "failed to map memory to user space!\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static const struct file_operations leapraid_ctl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = leapraid_ctl_ioctl,
+ .mmap = leapraid_fw_mmap,
+};
+
+static struct miscdevice leapraid_ctl_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = LEAPRAID_DEV_NAME,
+ .fops = &leapraid_ctl_fops,
+};
+
+void leapraid_ctl_init(void)
+{
+ if (misc_register(&leapraid_ctl_dev) < 0)
+ pr_err("%s can't register misc device\n", LEAPRAID_DRIVER_NAME);
+}
+
+void leapraid_ctl_exit(void)
+{
+ misc_deregister(&leapraid_ctl_dev);
+}
diff --git a/drivers/scsi/leapraid/leapraid_func.c b/drivers/scsi/leapraid/leapraid_func.c
new file mode 100644
index 000000000000..c83c30f56805
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_func.c
@@ -0,0 +1,8264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <linux/module.h>
+
+#include "leapraid_func.h"
+
+static int msix_disable;
+module_param(msix_disable, int, 0444);
+MODULE_PARM_DESC(msix_disable,
+ "disable msix routed interrupts (default=0)");
+
+static int smart_poll;
+module_param(smart_poll, int, 0444);
+MODULE_PARM_DESC(smart_poll,
+ "check SATA drive health via SMART polling: (default=0)");
+
+static int interrupt_mode;
+module_param(interrupt_mode, int, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "intr mode: 0 for MSI-X, 1 for MSI, 2 for legacy. (default=0)");
+
+static int max_msix_vectors = -1;
+module_param(max_msix_vectors, int, 0444);
+MODULE_PARM_DESC(max_msix_vectors, " max msix vectors");
+
+static void leapraid_remove_device(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev);
+static void leapraid_set_led(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev, bool on);
+static void leapraid_ublk_io_dev(struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct leapraid_card_port *port);
+static int leapraid_make_adapter_available(struct leapraid_adapter *adapter);
+static int leapraid_fw_log_init(struct leapraid_adapter *adapter);
+static int leapraid_make_adapter_ready(struct leapraid_adapter *adapter,
+ enum reset_type type);
+
+static inline bool leapraid_is_end_dev(u32 dev_type)
+{
+ return (dev_type & LEAPRAID_DEVTYP_END_DEV) &&
+ ((dev_type & LEAPRAID_DEVTYP_SSP_TGT) ||
+ (dev_type & LEAPRAID_DEVTYP_STP_TGT) ||
+ (dev_type & LEAPRAID_DEVTYP_SATA_DEV));
+}
+
+bool leapraid_pci_removed(struct leapraid_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 vendor_id;
+
+ if (pci_bus_read_config_dword(pdev->bus, pdev->devfn, PCI_VENDOR_ID,
+ &vendor_id))
+ return true;
+
+ return ((vendor_id & LEAPRAID_PCI_VENDOR_ID_MASK) !=
+ LEAPRAID_VENDOR_ID);
+}
+
+static bool leapraid_pci_active(struct leapraid_adapter *adapter)
+{
+ return !(adapter->access_ctrl.pcie_recovering ||
+ leapraid_pci_removed(adapter));
+}
+
+void *leapraid_get_reply_vaddr(struct leapraid_adapter *adapter, u32 rep_paddr)
+{
+ if (!rep_paddr)
+ return NULL;
+
+ return adapter->mem_desc.rep_msg +
+ (rep_paddr - (u32)adapter->mem_desc.rep_msg_dma);
+}
+
+void *leapraid_get_task_desc(struct leapraid_adapter *adapter, u16 taskid)
+{
+ return (void *)(adapter->mem_desc.task_desc +
+ (taskid * LEAPRAID_REQUEST_SIZE));
+}
+
+void *leapraid_get_sense_buffer(struct leapraid_adapter *adapter, u16 taskid)
+{
+ return (void *)(adapter->mem_desc.sense_data +
+ ((taskid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+__le32 leapraid_get_sense_buffer_dma(struct leapraid_adapter *adapter,
+ u16 taskid)
+{
+ return cpu_to_le32(adapter->mem_desc.sense_data_dma +
+ ((taskid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+void leapraid_mask_int(struct leapraid_adapter *adapter)
+{
+ u32 reg;
+
+ adapter->mask_int = true;
+ reg = leapraid_readl(&adapter->iomem_base->host_int_mask);
+ reg |= LEAPRAID_TO_SYS_DB_MASK + LEAPRAID_REPLY_INT_MASK +
+ LEAPRAID_RESET_IRQ_MASK;
+ writel(reg, &adapter->iomem_base->host_int_mask);
+ leapraid_readl(&adapter->iomem_base->host_int_mask);
+}
+
+void leapraid_unmask_int(struct leapraid_adapter *adapter)
+{
+ u32 reg;
+
+ reg = leapraid_readl(&adapter->iomem_base->host_int_mask);
+ reg &= ~LEAPRAID_REPLY_INT_MASK;
+ writel(reg, &adapter->iomem_base->host_int_mask);
+ adapter->mask_int = false;
+}
+
+static void leapraid_flush_io_and_panic(struct leapraid_adapter *adapter)
+{
+ adapter->access_ctrl.adapter_thermal_alert = true;
+ leapraid_smart_polling_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+}
+
+static void leapraid_check_panic_needed(struct leapraid_adapter *adapter,
+ u32 db, u32 adapter_state)
+{
+ bool fault_1 = adapter_state == LEAPRAID_DB_MASK;
+ bool fault_2 = (adapter_state == LEAPRAID_DB_FAULT) &&
+ ((db & LEAPRAID_DB_DATA_MASK) == LEAPRAID_DB_OVER_TEMPERATURE);
+
+ if (!fault_1 && !fault_2)
+ return;
+
+ if (fault_1)
+ pr_err("%s, doorbell status 0xFFFF!\n", __func__);
+ else
+ pr_err("%s, adapter overheating detected!\n", __func__);
+
+ leapraid_flush_io_and_panic(adapter);
+ panic("%s overheating detected, panic now!!!\n", __func__);
+}
+
+u32 leapraid_get_adapter_state(struct leapraid_adapter *adapter)
+{
+ u32 db;
+ u32 adapter_state;
+
+ db = leapraid_readl(&adapter->iomem_base->db);
+ adapter_state = db & LEAPRAID_DB_MASK;
+ leapraid_check_panic_needed(adapter, db, adapter_state);
+ return adapter_state;
+}
+
+static bool leapraid_wait_adapter_ready(struct leapraid_adapter *adapter)
+{
+ u32 cur_state;
+ u32 cnt = LEAPRAID_ADAPTER_READY_MAX_RETRY;
+
+ do {
+ cur_state = leapraid_get_adapter_state(adapter);
+ if (cur_state == LEAPRAID_DB_READY)
+ return true;
+ if (cur_state == LEAPRAID_DB_FAULT)
+ break;
+ usleep_range(LEAPRAID_ADAPTER_READY_SLEEP_MIN_US,
+ LEAPRAID_ADAPTER_READY_SLEEP_MAX_US);
+ } while (--cnt);
+
+ return false;
+}
+
+static int leapraid_db_wait_int_host(struct leapraid_adapter *adapter)
+{
+ u32 cnt = LEAPRAID_DB_WAIT_MAX_RETRY;
+
+ do {
+ if (leapraid_readl(&adapter->iomem_base->host_int_status) &
+ LEAPRAID_ADAPTER2HOST_DB_STATUS)
+ return 0;
+ udelay(LEAPRAID_DB_WAIT_DELAY_US);
+ } while (--cnt);
+
+ return -EFAULT;
+}
+
+static int leapraid_db_wait_ack_and_clear_int(struct leapraid_adapter *adapter)
+{
+ u32 adapter_state;
+ u32 int_status;
+ u32 cnt;
+
+ cnt = LEAPRAID_ADAPTER_READY_MAX_RETRY;
+ do {
+ int_status =
+ leapraid_readl(&adapter->iomem_base->host_int_status);
+ if (!(int_status & LEAPRAID_HOST2ADAPTER_DB_STATUS)) {
+ return 0;
+ } else if (int_status & LEAPRAID_ADAPTER2HOST_DB_STATUS) {
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state == LEAPRAID_DB_FAULT)
+ return -EFAULT;
+ } else if (int_status == 0xFFFFFFFF) {
+ goto out;
+ }
+
+ usleep_range(LEAPRAID_ADAPTER_READY_SLEEP_MIN_US,
+ LEAPRAID_ADAPTER_READY_SLEEP_MAX_US);
+ } while (--cnt);
+
+out:
+ return -EFAULT;
+}
+
+static int leapraid_handshake_func(struct leapraid_adapter *adapter,
+ int req_bytes, u32 *req,
+ int rep_bytes, u16 *rep)
+{
+ int failed, i;
+
+ if ((leapraid_readl(&adapter->iomem_base->db) &
+ LEAPRAID_DB_USED)) {
+ dev_err(&adapter->pdev->dev, "doorbell used\n");
+ return -EFAULT;
+ }
+
+ if (leapraid_readl(&adapter->iomem_base->host_int_status) &
+ LEAPRAID_ADAPTER2HOST_DB_STATUS)
+ writel(0, &adapter->iomem_base->host_int_status);
+
+ writel(((LEAPRAID_FUNC_HANDSHAKE << LEAPRAID_DB_FUNC_SHIFT) |
+ ((req_bytes / LEAPRAID_DWORDS_BYTE_SIZE) <<
+ LEAPRAID_DB_ADD_DWORDS_SHIFT)),
+ &adapter->iomem_base->db);
+
+ if (leapraid_db_wait_int_host(adapter)) {
+ dev_err(&adapter->pdev->dev, "%d:wait db interrupt timeout\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ writel(0, &adapter->iomem_base->host_int_status);
+
+ if (leapraid_db_wait_ack_and_clear_int(adapter)) {
+ dev_err(&adapter->pdev->dev, "%d:wait ack failure\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ for (i = 0, failed = 0;
+ i < req_bytes / LEAPRAID_DWORDS_BYTE_SIZE && !failed;
+ i++) {
+ writel((u32)(req[i]), &adapter->iomem_base->db);
+ if (leapraid_db_wait_ack_and_clear_int(adapter))
+ failed = 1;
+ }
+ if (failed) {
+ dev_err(&adapter->pdev->dev, "%d:wait ack failure\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < rep_bytes / LEAPRAID_WORD_BYTE_SIZE; i++) {
+ if (leapraid_db_wait_int_host(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%d:wait db interrupt timeout\n", __LINE__);
+ return -EFAULT;
+ }
+ rep[i] = (u16)(leapraid_readl(&adapter->iomem_base->db)
+ & LEAPRAID_DB_DATA_MASK);
+ writel(0, &adapter->iomem_base->host_int_status);
+ }
+
+ if (leapraid_db_wait_int_host(adapter)) {
+ dev_err(&adapter->pdev->dev, "%d:wait db interrupt timeout\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ writel(0, &adapter->iomem_base->host_int_status);
+
+ return 0;
+}
+
+int leapraid_check_adapter_is_op(struct leapraid_adapter *adapter)
+{
+ int wait_count = LEAPRAID_DB_WAIT_OPERATIONAL;
+
+ do {
+ if (leapraid_pci_removed(adapter))
+ return -EFAULT;
+
+ if (leapraid_get_adapter_state(adapter) ==
+ LEAPRAID_DB_OPERATIONAL)
+ return 0;
+
+ dev_info(&adapter->pdev->dev,
+ "waiting for adapter to become op status(cnt=%d)\n",
+ LEAPRAID_DB_WAIT_OPERATIONAL - wait_count);
+
+ ssleep(1);
+ } while (--wait_count);
+
+ dev_err(&adapter->pdev->dev,
+ "adapter failed to become op state, last state=%d\n",
+ leapraid_get_adapter_state(adapter));
+
+ return -EFAULT;
+}
+
+struct leapraid_io_req_tracker *leapraid_get_io_tracker_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct scsi_cmnd *scmd;
+
+ if (WARN_ON(!taskid))
+ return NULL;
+
+ if (WARN_ON(taskid > adapter->shost->can_queue))
+ return NULL;
+
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (scmd)
+ return leapraid_get_scmd_priv(scmd);
+
+ return NULL;
+}
+
+static u8 leapraid_get_cb_idx(struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_driver_cmd *sp_cmd;
+ u8 cb_idx = 0xFF;
+
+ if (WARN_ON(!taskid))
+ return cb_idx;
+
+ list_for_each_entry(sp_cmd, &adapter->driver_cmds.special_cmd_list,
+ list)
+ if (taskid == sp_cmd->taskid ||
+ taskid == sp_cmd->hp_taskid ||
+ taskid == sp_cmd->inter_taskid)
+ return sp_cmd->cb_idx;
+
+ WARN_ON(cb_idx == 0xFF);
+ return cb_idx;
+}
+
+struct scsi_cmnd *leapraid_get_scmd_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_scsiio_req *leap_mpi_req;
+ struct leapraid_io_req_tracker *st;
+ struct scsi_cmnd *scmd;
+ u32 uniq_tag;
+
+ if (taskid <= 0 || taskid > adapter->shost->can_queue)
+ return NULL;
+
+ uniq_tag = taskid - 1;
+ leap_mpi_req = leapraid_get_task_desc(adapter, taskid);
+ if (!leap_mpi_req->dev_hdl)
+ return NULL;
+
+ scmd = scsi_host_find_tag(adapter->shost, uniq_tag);
+ if (scmd) {
+ st = leapraid_get_scmd_priv(scmd);
+ if (st && st->taskid == taskid)
+ return scmd;
+ }
+
+ return NULL;
+}
+
+u16 leapraid_alloc_scsiio_taskid(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd)
+{
+ struct leapraid_io_req_tracker *request;
+ u16 taskid;
+ u32 tag = scmd->request->tag;
+
+ scmd->host_scribble =
+ (unsigned char *)(&adapter->mem_desc.io_tracker[tag]);
+ request = leapraid_get_scmd_priv(scmd);
+ taskid = tag + 1;
+ request->taskid = taskid;
+ request->scmd = scmd;
+ return taskid;
+}
+
+static void leapraid_check_pending_io(struct leapraid_adapter *adapter)
+{
+ if (adapter->access_ctrl.shost_recovering &&
+ adapter->reset_desc.pending_io_cnt) {
+ if (adapter->reset_desc.pending_io_cnt == 1)
+ wake_up(&adapter->reset_desc.reset_wait_queue);
+ adapter->reset_desc.pending_io_cnt--;
+ }
+}
+
+static void leapraid_clear_io_tracker(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker)
+{
+ if (!io_tracker)
+ return;
+
+ if (WARN_ON(io_tracker->taskid == 0))
+ return;
+
+ io_tracker->scmd = NULL;
+}
+
+static bool leapraid_is_fixed_taskid(struct leapraid_adapter *adapter,
+ u16 taskid)
+{
+ return (taskid == adapter->driver_cmds.ctl_cmd.taskid ||
+ taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid ||
+ taskid == adapter->driver_cmds.tm_cmd.hp_taskid ||
+ taskid == adapter->driver_cmds.ctl_cmd.hp_taskid ||
+ taskid == adapter->driver_cmds.scan_dev_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.timestamp_sync_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.raid_action_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.transport_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.cfg_op_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.enc_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.notify_event_cmd.inter_taskid);
+}
+
+void leapraid_free_taskid(struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_io_req_tracker *io_tracker;
+ void *task_desc;
+
+ if (leapraid_is_fixed_taskid(adapter, taskid))
+ return;
+
+ if (taskid <= adapter->shost->can_queue) {
+ io_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ taskid);
+ if (!io_tracker) {
+ leapraid_check_pending_io(adapter);
+ return;
+ }
+
+ task_desc = leapraid_get_task_desc(adapter, taskid);
+ memset(task_desc, 0, LEAPRAID_REQUEST_SIZE);
+ leapraid_clear_io_tracker(adapter, io_tracker);
+ leapraid_check_pending_io(adapter);
+ }
+}
+
+static u8 leapraid_get_msix_idx(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd)
+{
+ return adapter->notification_desc.msix_cpu_map[raw_smp_processor_id()];
+}
+
+static u8 leapraid_get_and_set_msix_idx_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_io_req_tracker *io_tracker = NULL;
+
+ if (taskid <= adapter->shost->can_queue)
+ io_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ taskid);
+
+ if (!io_tracker)
+ return leapraid_get_msix_idx(adapter, NULL);
+
+ io_tracker->msix_io = leapraid_get_msix_idx(adapter, io_tracker->scmd);
+
+ return io_tracker->msix_io;
+}
+
+void leapraid_fire_scsi_io(struct leapraid_adapter *adapter, u16 taskid,
+ u16 handle)
+{
+ struct leapraid_atomic_req_desc desc;
+
+ desc.flg = LEAPRAID_REQ_DESC_FLG_SCSI_IO;
+ desc.msix_idx = leapraid_get_and_set_msix_idx_from_taskid(adapter,
+ taskid);
+ desc.taskid = cpu_to_le16(taskid);
+ writel((__force u32)cpu_to_le32(*((u32 *)&desc)),
+ &adapter->iomem_base->atomic_req_desc_post);
+}
+
+void leapraid_fire_hpr_task(struct leapraid_adapter *adapter, u16 taskid,
+ u16 msix_task)
+{
+ struct leapraid_atomic_req_desc desc;
+
+ desc.flg = LEAPRAID_REQ_DESC_FLG_HPR;
+ desc.msix_idx = msix_task;
+ desc.taskid = cpu_to_le16(taskid);
+ writel((__force u32)cpu_to_le32(*((u32 *)&desc)),
+ &adapter->iomem_base->atomic_req_desc_post);
+}
+
+void leapraid_fire_task(struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_atomic_req_desc desc;
+
+ desc.flg = LEAPRAID_REQ_DESC_FLG_DFLT_TYPE;
+ desc.msix_idx = leapraid_get_and_set_msix_idx_from_taskid(adapter,
+ taskid);
+ desc.taskid = cpu_to_le16(taskid);
+ writel((__force u32)cpu_to_le32(*((u32 *)&desc)),
+ &adapter->iomem_base->atomic_req_desc_post);
+}
+
+void leapraid_clean_active_scsi_cmds(struct leapraid_adapter *adapter)
+{
+ struct leapraid_io_req_tracker *io_tracker;
+ struct scsi_cmnd *scmd;
+ u16 taskid;
+
+ for (taskid = 1; taskid <= adapter->shost->can_queue; taskid++) {
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (!scmd)
+ continue;
+
+ io_tracker = leapraid_get_scmd_priv(scmd);
+ if (io_tracker && io_tracker->taskid == 0)
+ continue;
+
+ scsi_dma_unmap(scmd);
+ leapraid_clear_io_tracker(adapter, io_tracker);
+ if (!leapraid_pci_active(adapter) ||
+ adapter->reset_desc.adapter_reset_results != 0 ||
+ adapter->access_ctrl.adapter_thermal_alert ||
+ adapter->access_ctrl.host_removing)
+ scmd->result = DID_NO_CONNECT << 16;
+ else
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ scmd->scsi_done(scmd);
+ }
+}
+
+static void leapraid_clean_active_driver_cmd(
+ struct leapraid_driver_cmd *driver_cmd)
+{
+ if (driver_cmd->status & LEAPRAID_CMD_PENDING) {
+ driver_cmd->status |= LEAPRAID_CMD_RESET;
+ complete(&driver_cmd->done);
+ }
+}
+
+static void leapraid_clean_active_driver_cmds(struct leapraid_adapter *adapter)
+{
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.timestamp_sync_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.raid_action_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.driver_scsiio_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.tm_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.transport_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.enc_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.notify_event_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.cfg_op_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.ctl_cmd);
+
+ if (adapter->driver_cmds.scan_dev_cmd.status & LEAPRAID_CMD_PENDING) {
+ adapter->scan_dev_desc.scan_dev_failed = true;
+ adapter->driver_cmds.scan_dev_cmd.status |= LEAPRAID_CMD_RESET;
+ if (adapter->scan_dev_desc.driver_loading) {
+ adapter->scan_dev_desc.scan_start_failed =
+ LEAPRAID_ADAPTER_STATUS_INTERNAL_ERROR;
+ adapter->scan_dev_desc.scan_start = false;
+ } else {
+ complete(&adapter->driver_cmds.scan_dev_cmd.done);
+ }
+ }
+}
+
+static void leapraid_clean_active_cmds(struct leapraid_adapter *adapter)
+{
+ leapraid_clean_active_driver_cmds(adapter);
+ memset(adapter->dev_topo.pending_dev_add, 0,
+ adapter->dev_topo.pending_dev_add_sz);
+ memset(adapter->dev_topo.dev_removing, 0,
+ adapter->dev_topo.dev_removing_sz);
+ leapraid_clean_active_fw_evt(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+}
+
+static void leapraid_tgt_not_responding(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv = NULL;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ unsigned long flags = 0;
+ u32 adapter_state = 0;
+
+ if (adapter->access_ctrl.pcie_recovering)
+ return;
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state != LEAPRAID_DB_OPERATIONAL)
+ return;
+
+ if (test_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls))
+ return;
+
+ clear_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add);
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev && sas_dev->starget && sas_dev->starget->hostdata) {
+ starget_priv = sas_dev->starget->hostdata;
+ starget_priv->deleted = true;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (starget_priv)
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_tgt_rst_send(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv = NULL;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ struct leapraid_card_port *port = NULL;
+ u64 sas_address = 0;
+ unsigned long flags;
+ u32 adapter_state;
+
+ if (adapter->access_ctrl.pcie_recovering)
+ return;
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state != LEAPRAID_DB_OPERATIONAL)
+ return;
+
+ if (test_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls))
+ return;
+
+ clear_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add);
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev && sas_dev->starget && sas_dev->starget->hostdata) {
+ starget_priv = sas_dev->starget->hostdata;
+ starget_priv->deleted = true;
+ sas_address = sas_dev->sas_addr;
+ port = sas_dev->card_port;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (starget_priv) {
+ leapraid_ublk_io_dev(adapter, sas_address, port);
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ }
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static inline void leapraid_single_mpi_sg_append(struct leapraid_adapter *adapter,
+ void *sge, u32 flag_and_len,
+ dma_addr_t dma_addr)
+{
+ if (adapter->adapter_attr.use_32_dma_mask) {
+ ((struct leapraid_sge_simple32 *)sge)->flg_and_len =
+ cpu_to_le32(flag_and_len |
+ (LEAPRAID_SGE_FLG_32 |
+ LEAPRAID_SGE_FLG_SYSTEM_ADDR) <<
+ LEAPRAID_SGE_FLG_SHIFT);
+ ((struct leapraid_sge_simple32 *)sge)->addr =
+ cpu_to_le32(dma_addr);
+ } else {
+ ((struct leapraid_sge_simple64 *)sge)->flg_and_len =
+ cpu_to_le32(flag_and_len |
+ (LEAPRAID_SGE_FLG_64 |
+ LEAPRAID_SGE_FLG_SYSTEM_ADDR) <<
+ LEAPRAID_SGE_FLG_SHIFT);
+ ((struct leapraid_sge_simple64 *)sge)->addr =
+ cpu_to_le64(dma_addr);
+ }
+}
+
+static inline void leapraid_single_ieee_sg_append(void *sge, u8 flag,
+ u8 next_chain_offset,
+ u32 len,
+ dma_addr_t dma_addr)
+{
+ ((struct leapraid_chain64_ieee_sg *)sge)->flg = flag;
+ ((struct leapraid_chain64_ieee_sg *)sge)->next_chain_offset =
+ next_chain_offset;
+ ((struct leapraid_chain64_ieee_sg *)sge)->len = cpu_to_le32(len);
+ ((struct leapraid_chain64_ieee_sg *)sge)->addr = cpu_to_le64(dma_addr);
+}
+
+static void leapraid_build_nodata_mpi_sg(struct leapraid_adapter *adapter,
+ void *sge)
+{
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ (u32)((LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL |
+ LEAPRAID_SGE_FLG_SIMPLE_ONE) <<
+ LEAPRAID_SGE_FLG_SHIFT),
+ -1);
+}
+
+void leapraid_build_mpi_sg(struct leapraid_adapter *adapter, void *sge,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size)
+{
+ if (h2c_size && !c2h_size) {
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL |
+ LEAPRAID_SGE_FLG_H2C) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ h2c_size,
+ h2c_dma_addr);
+ } else if (!h2c_size && c2h_size) {
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ c2h_size,
+ c2h_dma_addr);
+ } else if (h2c_size && c2h_size) {
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_H2C) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ h2c_size,
+ h2c_dma_addr);
+ if (adapter->adapter_attr.use_32_dma_mask)
+ sge += sizeof(struct leapraid_sge_simple32);
+ else
+ sge += sizeof(struct leapraid_sge_simple64);
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ c2h_size,
+ c2h_dma_addr);
+ } else {
+ return leapraid_build_nodata_mpi_sg(adapter, sge);
+ }
+}
+
+void leapraid_build_ieee_nodata_sg(struct leapraid_adapter *adapter, void *sge)
+{
+ leapraid_single_ieee_sg_append(sge,
+ (LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR |
+ LEAPRAID_IEEE_SGE_FLG_EOL),
+ 0, 0, -1);
+}
+
+int leapraid_build_scmd_ieee_sg(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd, u16 taskid)
+{
+ struct leapraid_scsiio_req *scsiio_req;
+ struct leapraid_io_req_tracker *io_tracker;
+ struct scatterlist *scmd_sg_cur;
+ int sg_entries_left;
+ void *sg_entry_cur;
+ void *host_chain;
+ dma_addr_t host_chain_dma;
+ u8 host_chain_cursor;
+ u32 sg_entries_in_cur_seg;
+ u32 chain_offset_in_cur_seg;
+ u32 chain_len_in_cur_seg;
+
+ io_tracker = leapraid_get_scmd_priv(scmd);
+ scsiio_req = leapraid_get_task_desc(adapter, taskid);
+ scmd_sg_cur = scsi_sglist(scmd);
+ sg_entries_left = scsi_dma_map(scmd);
+ if (sg_entries_left < 0)
+ return -ENOMEM;
+ sg_entry_cur = &scsiio_req->sgl;
+ if (sg_entries_left <= LEAPRAID_SGL_INLINE_THRESHOLD)
+ goto fill_last_seg;
+
+ scsiio_req->chain_offset = LEAPRAID_CHAIN_OFFSET_DWORDS;
+ leapraid_single_ieee_sg_append(sg_entry_cur,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0, sg_dma_len(scmd_sg_cur),
+ sg_dma_address(scmd_sg_cur));
+ scmd_sg_cur = sg_next(scmd_sg_cur);
+ sg_entry_cur += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ sg_entries_left--;
+
+ host_chain_cursor = 0;
+ host_chain = io_tracker->chain +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_dma = io_tracker->chain_dma +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_cursor += 1;
+ for (;;) {
+ sg_entries_in_cur_seg =
+ (sg_entries_left <= LEAPRAID_MAX_SGES_IN_CHAIN) ?
+ sg_entries_left : LEAPRAID_MAX_SGES_IN_CHAIN;
+ chain_offset_in_cur_seg =
+ (sg_entries_left == (int)sg_entries_in_cur_seg) ?
+ 0 : sg_entries_in_cur_seg;
+ chain_len_in_cur_seg = sg_entries_in_cur_seg *
+ LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ if (chain_offset_in_cur_seg)
+ chain_len_in_cur_seg += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+
+ leapraid_single_ieee_sg_append(sg_entry_cur,
+ LEAPRAID_IEEE_SGE_FLG_CHAIN_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ chain_offset_in_cur_seg, chain_len_in_cur_seg,
+ host_chain_dma);
+ sg_entry_cur = host_chain;
+ if (!chain_offset_in_cur_seg)
+ goto fill_last_seg;
+
+ while (sg_entries_in_cur_seg) {
+ leapraid_single_ieee_sg_append(sg_entry_cur,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0, sg_dma_len(scmd_sg_cur),
+ sg_dma_address(scmd_sg_cur));
+ scmd_sg_cur = sg_next(scmd_sg_cur);
+ sg_entry_cur += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ sg_entries_left--;
+ sg_entries_in_cur_seg--;
+ }
+ host_chain = io_tracker->chain +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_dma = io_tracker->chain_dma +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_cursor += 1;
+ }
+
+fill_last_seg:
+ while (sg_entries_left > 0) {
+ u32 flags = LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR;
+ if (sg_entries_left == 1)
+ flags |= LEAPRAID_IEEE_SGE_FLG_EOL;
+ leapraid_single_ieee_sg_append(sg_entry_cur, flags,
+ 0, sg_dma_len(scmd_sg_cur),
+ sg_dma_address(scmd_sg_cur));
+ scmd_sg_cur = sg_next(scmd_sg_cur);
+ sg_entry_cur += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ sg_entries_left--;
+ }
+ return 0;
+}
+
+void leapraid_build_ieee_sg(struct leapraid_adapter *adapter, void *sge,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size)
+{
+ if (h2c_size && !c2h_size) {
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_EOL |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0,
+ h2c_size,
+ h2c_dma_addr);
+ } else if (!h2c_size && c2h_size) {
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_EOL |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0,
+ c2h_size,
+ c2h_dma_addr);
+ } else if (h2c_size && c2h_size) {
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0,
+ h2c_size,
+ h2c_dma_addr);
+ sge += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR |
+ LEAPRAID_IEEE_SGE_FLG_EOL,
+ 0,
+ c2h_size,
+ c2h_dma_addr);
+ } else {
+ return leapraid_build_ieee_nodata_sg(adapter, sge);
+ }
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv)
+{
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ if (tgt_priv->sas_dev)
+ leapraid_sdev_get(tgt_priv->sas_dev);
+
+ return tgt_priv->sas_dev;
+}
+
+struct leapraid_sas_dev *leapraid_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_from_tgt(adapter, tgt_priv);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+static struct leapraid_card_port *leapraid_get_port_by_id(
+ struct leapraid_adapter *adapter,
+ u8 port_id, bool skip_dirty)
+{
+ struct leapraid_card_port *port;
+ struct leapraid_card_port *dirty_port = NULL;
+
+ if (!adapter->adapter_attr.enable_mp)
+ port_id = LEAPRAID_DISABLE_MP_PORT_ID;
+
+ list_for_each_entry(port, &adapter->dev_topo.card_port_list, list) {
+ if (port->port_id != port_id)
+ continue;
+
+ if (!(port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY))
+ return port;
+
+ if (skip_dirty && !dirty_port)
+ dirty_port = port;
+ }
+
+ if (dirty_port)
+ return dirty_port;
+
+ if (unlikely(!adapter->adapter_attr.enable_mp)) {
+ port = kzalloc(sizeof(*port), GFP_ATOMIC);
+ if (!port)
+ return NULL;
+
+ port->port_id = LEAPRAID_DISABLE_MP_PORT_ID;
+ list_add_tail(&port->list, &adapter->dev_topo.card_port_list);
+ return port;
+ }
+
+ return NULL;
+}
+
+struct leapraid_vphy *leapraid_get_vphy_by_phy(struct leapraid_card_port *port,
+ u32 phy_seq_num)
+{
+ struct leapraid_vphy *vphy;
+
+ if (!port || !port->vphys_mask)
+ return NULL;
+
+ list_for_each_entry(vphy, &port->vphys_list, list) {
+ if (vphy->phy_mask & BIT(phy_seq_num))
+ return vphy;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->rphy == rphy) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_init_list,
+ list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->rphy == rphy) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ if (!port)
+ return NULL;
+
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->card_port == port) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_init_list,
+ list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->card_port == port) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+
+ if (!port)
+ return NULL;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter, sas_address,
+ port);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list)
+ if (sas_dev->hdl == hdl) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_init_list,
+ list)
+ if (sas_dev->hdl == hdl) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+void leapraid_sas_dev_remove(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ unsigned long flags;
+ bool del_from_list;
+
+ if (!sas_dev)
+ return;
+
+ del_from_list = false;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ del_from_list = true;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (del_from_list)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_sas_dev_remove_by_hdl(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ bool del_from_list;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ del_from_list = false;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev) {
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ del_from_list = true;
+ leapraid_sdev_put(sas_dev);
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (del_from_list) {
+ leapraid_remove_device(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+}
+
+void leapraid_sas_dev_remove_by_sas_address(struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct leapraid_card_port *port)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ bool del_from_list;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ del_from_list = false;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter, sas_address,
+ port);
+ if (sas_dev) {
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ del_from_list = true;
+ leapraid_sdev_put(sas_dev);
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (del_from_list) {
+ leapraid_remove_device(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+}
+
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_id(
+ struct leapraid_adapter *adapter, uint id, uint channel)
+{
+ struct leapraid_raid_volume *raid_volume;
+
+ list_for_each_entry(raid_volume, &adapter->dev_topo.raid_volume_list,
+ list) {
+ if (raid_volume->id == id &&
+ raid_volume->channel == channel) {
+ return raid_volume;
+ }
+ }
+
+ return NULL;
+}
+
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_raid_volume *raid_volume;
+
+ list_for_each_entry(raid_volume, &adapter->dev_topo.raid_volume_list,
+ list) {
+ if (raid_volume->hdl == hdl)
+ return raid_volume;
+ }
+
+ return NULL;
+}
+
+static struct leapraid_raid_volume *leapraid_raid_volume_find_by_wwid(
+ struct leapraid_adapter *adapter, u64 wwid)
+{
+ struct leapraid_raid_volume *raid_volume;
+
+ list_for_each_entry(raid_volume, &adapter->dev_topo.raid_volume_list,
+ list) {
+ if (raid_volume->wwid == wwid)
+ return raid_volume;
+ }
+
+ return NULL;
+}
+
+static void leapraid_raid_volume_add(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ list_add_tail(&raid_volume->list, &adapter->dev_topo.raid_volume_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+void leapraid_raid_volume_remove(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ list_del(&raid_volume->list);
+ kfree(raid_volume);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static struct leapraid_enc_node *leapraid_enc_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_enc_node *enc_dev;
+
+ list_for_each_entry(enc_dev, &adapter->dev_topo.enc_list, list) {
+ if (le16_to_cpu(enc_dev->pg0.enc_hdl) == hdl)
+ return enc_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_topo_node *leapraid_exp_find_by_sas_address(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port)
+{
+ struct leapraid_topo_node *sas_exp;
+
+ if (!port)
+ return NULL;
+
+ list_for_each_entry(sas_exp, &adapter->dev_topo.exp_list, list) {
+ if (sas_exp->sas_address == sas_address &&
+ sas_exp->card_port == port)
+ return sas_exp;
+ }
+
+ return NULL;
+}
+
+bool leapraid_scmd_find_by_tgt(struct leapraid_adapter *adapter, uint id,
+ uint channel)
+{
+ struct scsi_cmnd *scmd;
+ int taskid;
+
+ for (taskid = 1; taskid <= adapter->shost->can_queue; taskid++) {
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (!scmd)
+ continue;
+
+ if (scmd->device->id == id && scmd->device->channel == channel)
+ return true;
+ }
+
+ return false;
+}
+
+bool leapraid_scmd_find_by_lun(struct leapraid_adapter *adapter, uint id,
+ unsigned int lun, uint channel)
+{
+ struct scsi_cmnd *scmd;
+ int taskid;
+
+ for (taskid = 1; taskid <= adapter->shost->can_queue; taskid++) {
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (!scmd)
+ continue;
+
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel &&
+ scmd->device->lun == lun)
+ return true;
+ }
+
+ return false;
+}
+
+static struct leapraid_topo_node *leapraid_exp_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_topo_node *sas_exp;
+
+ list_for_each_entry(sas_exp, &adapter->dev_topo.exp_list, list) {
+ if (sas_exp->hdl == hdl)
+ return sas_exp;
+ }
+
+ return NULL;
+}
+
+static enum leapraid_card_port_checking_flg leapraid_get_card_port_feature(
+ struct leapraid_card_port *old_card_port,
+ struct leapraid_card_port *card_port,
+ struct leapraid_card_port_feature *feature)
+{
+ feature->dirty_flg =
+ old_card_port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY;
+ feature->same_addr =
+ old_card_port->sas_address == card_port->sas_address;
+ feature->exact_phy =
+ old_card_port->phy_mask == card_port->phy_mask;
+ feature->phy_overlap =
+ old_card_port->phy_mask & card_port->phy_mask;
+ feature->same_port =
+ old_card_port->port_id == card_port->port_id;
+ feature->cur_chking_old_port = old_card_port;
+
+ if (!feature->dirty_flg || !feature->same_addr)
+ return CARD_PORT_SKIP_CHECKING;
+
+ return CARD_PORT_FURTHER_CHECKING_NEEDED;
+}
+
+static int leapraid_process_card_port_feature(
+ struct leapraid_card_port_feature *feature)
+{
+ struct leapraid_card_port *old_card_port;
+
+ old_card_port = feature->cur_chking_old_port;
+ if (feature->exact_phy) {
+ feature->checking_state = SAME_PORT_WITH_NOTHING_CHANGED;
+ feature->expected_old_port = old_card_port;
+ return 1;
+ } else if (feature->phy_overlap) {
+ if (feature->same_port) {
+ feature->checking_state =
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS;
+ feature->expected_old_port = old_card_port;
+ } else if (feature->checking_state !=
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS) {
+ feature->checking_state =
+ SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS;
+ feature->expected_old_port = old_card_port;
+ }
+ } else {
+ if (feature->checking_state !=
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS &&
+ feature->checking_state !=
+ SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS) {
+ feature->checking_state = SAME_ADDR_ONLY;
+ feature->expected_old_port = old_card_port;
+ feature->same_addr_port_count++;
+ }
+ }
+
+ return 0;
+}
+
+static int leapraid_check_card_port(struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_card_port **expected_card_port,
+ int *count)
+{
+ struct leapraid_card_port *old_card_port;
+ struct leapraid_card_port_feature feature;
+
+ *expected_card_port = NULL;
+ memset(&feature, 0, sizeof(struct leapraid_card_port_feature));
+ feature.expected_old_port = NULL;
+ feature.same_addr_port_count = 0;
+ feature.checking_state = NEW_CARD_PORT;
+
+ list_for_each_entry(old_card_port, &adapter->dev_topo.card_port_list,
+ list) {
+ if (leapraid_get_card_port_feature(old_card_port, card_port,
+ &feature))
+ continue;
+
+ if (leapraid_process_card_port_feature(&feature))
+ break;
+ }
+
+ if (feature.checking_state == SAME_ADDR_ONLY)
+ *count = feature.same_addr_port_count;
+
+ *expected_card_port = feature.expected_old_port;
+ return feature.checking_state;
+}
+
+static void leapraid_del_phy_part_of_anther_port(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port_table, int index,
+ u8 port_count, int offset)
+{
+ struct leapraid_topo_node *card_topo_node;
+ bool found = false;
+ int i;
+
+ card_topo_node = &adapter->dev_topo.card;
+ for (i = 0; i < port_count; i++) {
+ if (i == index)
+ continue;
+
+ if (card_port_table[i].phy_mask & BIT(offset)) {
+ leapraid_transport_detach_phy_to_port(adapter,
+ card_topo_node,
+ &card_topo_node->card_phy[offset]);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ card_port_table[index].phy_mask |= BIT(offset);
+}
+
+static void leapraid_add_or_del_phys_from_existing_port(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_card_port *card_port_table,
+ int index, u8 port_count)
+{
+ struct leapraid_topo_node *card_topo_node;
+ u32 phy_mask_diff;
+ u32 offset = 0;
+
+ card_topo_node = &adapter->dev_topo.card;
+ phy_mask_diff = card_port->phy_mask ^
+ card_port_table[index].phy_mask;
+ for (offset = 0; offset < adapter->dev_topo.card.phys_num; offset++) {
+ if (!(phy_mask_diff & BIT(offset)))
+ continue;
+
+ if (!(card_port_table[index].phy_mask & BIT(offset))) {
+ leapraid_del_phy_part_of_anther_port(adapter,
+ card_port_table,
+ index, port_count,
+ offset);
+ continue;
+ }
+
+ if (card_topo_node->card_phy[offset].phy_is_assigned)
+ leapraid_transport_detach_phy_to_port(adapter,
+ card_topo_node,
+ &card_topo_node->card_phy[offset]);
+
+ leapraid_transport_attach_phy_to_port(adapter,
+ card_topo_node, &card_topo_node->card_phy[offset],
+ card_port->sas_address,
+ card_port);
+ }
+}
+
+struct leapraid_sas_dev *leapraid_get_next_sas_dev_from_init_list(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_sas_dev *sas_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ if (!list_empty(&adapter->dev_topo.sas_dev_init_list)) {
+ sas_dev = list_first_entry(&adapter->dev_topo.sas_dev_init_list,
+ struct leapraid_sas_dev, list);
+ leapraid_sdev_get(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+static bool leapraid_check_boot_dev_internal(u64 sas_address, u64 dev_name,
+ u64 enc_lid, u16 slot,
+ struct leapraid_boot_dev *boot_dev,
+ u8 form)
+{
+ if (!boot_dev)
+ return false;
+
+ switch (form & LEAPRAID_BOOTDEV_FORM_MASK) {
+ case LEAPRAID_BOOTDEV_FORM_SAS_WWID:
+ if (!sas_address)
+ return false;
+
+ return sas_address ==
+ le64_to_cpu(((struct leapraid_boot_dev_format_sas_wwid *)(
+ boot_dev->pg_dev))->sas_addr);
+ case LEAPRAID_BOOTDEV_FORM_ENC_SLOT:
+ if (!enc_lid)
+ return false;
+
+ return (enc_lid == le64_to_cpu(((struct leapraid_boot_dev_format_enc_slot *)(
+ boot_dev->pg_dev))->enc_lid) &&
+ slot == le16_to_cpu(((struct leapraid_boot_dev_format_enc_slot *)(
+ boot_dev->pg_dev))->slot_num));
+ case LEAPRAID_BOOTDEV_FORM_DEV_NAME:
+ if (!dev_name)
+ return false;
+
+ return dev_name == le64_to_cpu(((struct leapraid_boot_dev_format_dev_name *)(
+ boot_dev->pg_dev))->dev_name);
+ case LEAPRAID_BOOTDEV_FORM_NONE:
+ default:
+ return false;
+ }
+}
+
+static void leapraid_try_set_boot_dev(struct leapraid_boot_dev *boot_dev,
+ u64 sas_addr, u64 dev_name,
+ u64 enc_lid, u16 slot,
+ void *dev, u32 chnl)
+{
+ bool matched = false;
+
+ if (boot_dev->dev)
+ return;
+
+ matched = leapraid_check_boot_dev_internal(sas_addr, dev_name, enc_lid,
+ slot, boot_dev,
+ boot_dev->form);
+ if (matched) {
+ boot_dev->dev = dev;
+ boot_dev->chnl = chnl;
+ }
+}
+
+static void leapraid_check_boot_dev(struct leapraid_adapter *adapter,
+ void *dev, u32 chnl)
+{
+ u64 sas_addr = 0;
+ u64 dev_name = 0;
+ u64 enc_lid = 0;
+ u16 slot = 0;
+
+ if (!adapter->scan_dev_desc.driver_loading)
+ return;
+
+ switch (chnl) {
+ case RAID_CHANNEL:
+ {
+ struct leapraid_raid_volume *raid_volume =
+ (struct leapraid_raid_volume *)dev;
+
+ sas_addr = raid_volume->wwid;
+ break;
+ }
+ default:
+ {
+ struct leapraid_sas_dev *sas_dev =
+ (struct leapraid_sas_dev *)dev;
+ sas_addr = sas_dev->sas_addr;
+ dev_name = sas_dev->dev_name;
+ enc_lid = sas_dev->enc_lid;
+ slot = sas_dev->slot;
+ break;
+ }
+ }
+
+ leapraid_try_set_boot_dev(&adapter->boot_devs.requested_boot_dev,
+ sas_addr, dev_name, enc_lid,
+ slot, dev, chnl);
+ leapraid_try_set_boot_dev(&adapter->boot_devs.requested_alt_boot_dev,
+ sas_addr, dev_name, enc_lid,
+ slot, dev, chnl);
+ leapraid_try_set_boot_dev(&adapter->boot_devs.current_boot_dev,
+ sas_addr, dev_name, enc_lid,
+ slot, dev, chnl);
+}
+
+static void leapraid_build_and_fire_cfg_req(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep)
+{
+ struct leapraid_cfg_req *local_leap_cfg_req;
+
+ memset(leap_mpi_cfgp_rep, 0, sizeof(struct leapraid_cfg_rep));
+ memset((void *)(&adapter->driver_cmds.cfg_op_cmd.reply), 0,
+ sizeof(struct leapraid_cfg_rep));
+ adapter->driver_cmds.cfg_op_cmd.status = LEAPRAID_CMD_PENDING;
+ local_leap_cfg_req = leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.cfg_op_cmd.inter_taskid);
+ memcpy(local_leap_cfg_req, leap_mpi_cfgp_req,
+ sizeof(struct leapraid_cfg_req));
+ init_completion(&adapter->driver_cmds.cfg_op_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.cfg_op_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.cfg_op_cmd.done,
+ LEAPRAID_CFG_OP_TIMEOUT * HZ);
+}
+
+static int leapraid_req_cfg_func(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep,
+ void *target_cfg_pg, void *real_cfg_pg_addr,
+ u16 target_real_cfg_pg_sz)
+{
+ u32 adapter_status = UINT_MAX;
+ bool issue_reset = false;
+ u8 retry_cnt;
+ int rc;
+
+ retry_cnt = 0;
+ mutex_lock(&adapter->driver_cmds.cfg_op_cmd.mutex);
+retry:
+ if (retry_cnt) {
+ if (retry_cnt > LEAPRAID_CFG_REQ_RETRY_TIMES) {
+ rc = -EFAULT;
+ goto out;
+ }
+ dev_warn(&adapter->pdev->dev,
+ "cfg-req: retry request, cnt=%u\n", retry_cnt);
+ }
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: adapter not operational\n");
+ goto out;
+ }
+
+ leapraid_build_and_fire_cfg_req(adapter, leap_mpi_cfgp_req,
+ leap_mpi_cfgp_rep);
+ if (!(adapter->driver_cmds.cfg_op_cmd.status & LEAPRAID_CMD_DONE)) {
+ retry_cnt++;
+ if (adapter->driver_cmds.cfg_op_cmd.status &
+ LEAPRAID_CMD_RESET) {
+ dev_warn(&adapter->pdev->dev,
+ "cfg-req: cmd gg due to hard reset\n");
+ goto retry;
+ }
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering) {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: cmd not done during %s, skip reset\n",
+ adapter->access_ctrl.shost_recovering ?
+ "shost recovery" : "pcie recovery");
+ issue_reset = false;
+ rc = -EFAULT;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: cmd timeout, issuing hard reset\n");
+ issue_reset = true;
+ }
+
+ goto out;
+ }
+
+ if (adapter->driver_cmds.cfg_op_cmd.status &
+ LEAPRAID_CMD_REPLY_VALID) {
+ memcpy(leap_mpi_cfgp_rep,
+ (void *)(&adapter->driver_cmds.cfg_op_cmd.reply),
+ sizeof(struct leapraid_cfg_rep));
+ adapter_status = le16_to_cpu(
+ leap_mpi_cfgp_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status == LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ if (target_cfg_pg && real_cfg_pg_addr &&
+ target_real_cfg_pg_sz)
+ if (leap_mpi_cfgp_req->action ==
+ LEAPRAID_CFG_ACT_PAGE_READ_CUR)
+ memcpy(target_cfg_pg,
+ real_cfg_pg_addr,
+ target_real_cfg_pg_sz);
+ } else {
+ if (adapter_status !=
+ LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE)
+ dev_err(&adapter->pdev->dev,
+ "cfg-rep: adapter_status=0x%x\n",
+ adapter_status);
+ rc = -EFAULT;
+ }
+ } else {
+ dev_err(&adapter->pdev->dev, "cfg-rep: reply invalid\n");
+ rc = -EFAULT;
+ }
+
+out:
+ adapter->driver_cmds.cfg_op_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.cfg_op_cmd.mutex);
+ if (issue_reset) {
+ if (adapter->scan_dev_desc.first_scan_dev_fired) {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d cfg-req: failure, issuing reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ rc = -EFAULT;
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "cfg-req: cmd gg during init, skip reset\n");
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+
+static int leapraid_request_cfg_pg_header(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep)
+{
+ return leapraid_req_cfg_func(adapter, leap_mpi_cfgp_req,
+ leap_mpi_cfgp_rep, NULL, NULL, 0);
+}
+
+static int leapraid_request_cfg_pg(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep,
+ void *target_cfg_pg, void *real_cfg_pg_addr,
+ u16 target_real_cfg_pg_sz)
+{
+ return leapraid_req_cfg_func(adapter, leap_mpi_cfgp_req,
+ leap_mpi_cfgp_rep, target_cfg_pg,
+ real_cfg_pg_addr, target_real_cfg_pg_sz);
+}
+
+int leapraid_op_config_page(struct leapraid_adapter *adapter,
+ void *target_cfg_pg, union cfg_param_1 cfgp1,
+ union cfg_param_2 cfgp2,
+ enum config_page_action cfg_op)
+{
+ struct leapraid_cfg_req leap_mpi_cfgp_req;
+ struct leapraid_cfg_rep leap_mpi_cfgp_rep;
+ u16 real_cfg_pg_sz = 0;
+ void *real_cfg_pg_addr = NULL;
+ dma_addr_t real_cfg_pg_dma = 0;
+ u32 __page_size;
+ int rc;
+
+ memset(&leap_mpi_cfgp_req, 0, sizeof(struct leapraid_cfg_req));
+ leap_mpi_cfgp_req.func = LEAPRAID_FUNC_CONFIG_OP;
+ leap_mpi_cfgp_req.action = LEAPRAID_CFG_ACT_PAGE_HEADER;
+
+ switch (cfg_op) {
+ case GET_BIOS_PG3:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_BIOS;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_BIOS3;
+ __page_size = sizeof(struct leapraid_bios_page3);
+ break;
+ case GET_BIOS_PG2:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_BIOS;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_BIOS2;
+ __page_size = sizeof(struct leapraid_bios_page2);
+ break;
+ case GET_SAS_DEVICE_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_DEV;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_DEV0;
+ __page_size = sizeof(struct leapraid_sas_dev_p0);
+ break;
+ case GET_SAS_IOUNIT_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type =
+ LEAPRAID_CFG_EXTPT_SAS_IO_UNIT;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_IOUNIT0;
+ __page_size = cfgp1.size;
+ break;
+ case GET_SAS_IOUNIT_PG1:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type =
+ LEAPRAID_CFG_EXTPT_SAS_IO_UNIT;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_IOUNIT1;
+ __page_size = cfgp1.size;
+ break;
+ case GET_SAS_EXPANDER_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_EXP;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_EXP0;
+ __page_size = sizeof(struct leapraid_exp_p0);
+ break;
+ case GET_SAS_EXPANDER_PG1:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_EXP;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_EXP1;
+ __page_size = sizeof(struct leapraid_exp_p1);
+ break;
+ case GET_SAS_ENCLOSURE_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_ENC;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_ENC0;
+ __page_size = sizeof(struct leapraid_enc_p0);
+ break;
+ case GET_PHY_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_PHY;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_PHY0;
+ __page_size = sizeof(struct leapraid_sas_phy_p0);
+ break;
+ case GET_RAID_VOLUME_PG0:
+ leap_mpi_cfgp_req.header.page_type =
+ LEAPRAID_CFG_PT_RAID_VOLUME;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_VOL0;
+ __page_size = cfgp1.size;
+ break;
+ case GET_RAID_VOLUME_PG1:
+ leap_mpi_cfgp_req.header.page_type =
+ LEAPRAID_CFG_PT_RAID_VOLUME;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_VOL1;
+ __page_size = sizeof(struct leapraid_raidvol_p1);
+ break;
+ case GET_PHY_DISK_PG0:
+ leap_mpi_cfgp_req.header.page_type =
+ LEAPRAID_CFG_PT_RAID_PHYSDISK;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_PD0;
+ __page_size = sizeof(struct leapraid_raidpd_p0);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "unsupported config page action=%d!\n", cfg_op);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ leapraid_build_nodata_mpi_sg(adapter,
+ &leap_mpi_cfgp_req.page_buf_sge);
+ rc = leapraid_request_cfg_pg_header(adapter,
+ &leap_mpi_cfgp_req,
+ &leap_mpi_cfgp_rep);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: header failed rc=%dn", rc);
+ goto out;
+ }
+
+ if (cfg_op == GET_SAS_DEVICE_PG0 ||
+ cfg_op == GET_SAS_EXPANDER_PG0 ||
+ cfg_op == GET_SAS_ENCLOSURE_PG0 ||
+ cfg_op == GET_RAID_VOLUME_PG1)
+ leap_mpi_cfgp_req.page_addr = cpu_to_le32(cfgp1.form |
+ cfgp2.handle);
+ else if (cfg_op == GET_PHY_DISK_PG0)
+ leap_mpi_cfgp_req.page_addr = cpu_to_le32(cfgp1.form |
+ cfgp2.form_specific);
+ else if (cfg_op == GET_RAID_VOLUME_PG0)
+ leap_mpi_cfgp_req.page_addr =
+ cpu_to_le32(cfgp2.handle |
+ LEAPRAID_RAID_VOL_CFG_PGAD_HDL);
+ else if (cfg_op == GET_SAS_EXPANDER_PG1)
+ leap_mpi_cfgp_req.page_addr =
+ cpu_to_le32(cfgp2.handle |
+ (cfgp1.phy_number <<
+ LEAPRAID_SAS_EXP_CFG_PGAD_PHYNUM_SHIFT) |
+ LEAPRAID_SAS_EXP_CFG_PGAD_HDL_PHY_NUM);
+ else if (cfg_op == GET_PHY_PG0)
+ leap_mpi_cfgp_req.page_addr = cpu_to_le32(cfgp1.phy_number |
+ LEAPRAID_SAS_PHY_CFG_PGAD_PHY_NUMBER);
+
+ leap_mpi_cfgp_req.action = LEAPRAID_CFG_ACT_PAGE_READ_CUR;
+
+ leap_mpi_cfgp_req.header.page_num = leap_mpi_cfgp_rep.header.page_num;
+ leap_mpi_cfgp_req.header.page_type =
+ leap_mpi_cfgp_rep.header.page_type;
+ leap_mpi_cfgp_req.header.page_len = leap_mpi_cfgp_rep.header.page_len;
+ leap_mpi_cfgp_req.ext_page_len = leap_mpi_cfgp_rep.ext_page_len;
+ leap_mpi_cfgp_req.ext_page_type = leap_mpi_cfgp_rep.ext_page_type;
+
+ real_cfg_pg_sz = (leap_mpi_cfgp_req.header.page_len) ?
+ leap_mpi_cfgp_req.header.page_len * 4 :
+ le16_to_cpu(leap_mpi_cfgp_rep.ext_page_len) * 4;
+ real_cfg_pg_addr = dma_alloc_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz,
+ &real_cfg_pg_dma,
+ GFP_KERNEL);
+ if (!real_cfg_pg_addr) {
+ dev_err(&adapter->pdev->dev, "cfg-req: dma alloc failed\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (leap_mpi_cfgp_req.action == LEAPRAID_CFG_ACT_PAGE_WRITE_CUR) {
+ leapraid_single_mpi_sg_append(adapter,
+ &leap_mpi_cfgp_req.page_buf_sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL |
+ LEAPRAID_SGE_FLG_H2C) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ real_cfg_pg_sz,
+ real_cfg_pg_dma);
+ memcpy(real_cfg_pg_addr, target_cfg_pg,
+ min_t(u16, real_cfg_pg_sz, __page_size));
+ } else {
+ memset(target_cfg_pg, 0, __page_size);
+ leapraid_single_mpi_sg_append(adapter,
+ &leap_mpi_cfgp_req.page_buf_sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ real_cfg_pg_sz,
+ real_cfg_pg_dma);
+ memset(real_cfg_pg_addr, 0,
+ min_t(u16, real_cfg_pg_sz, __page_size));
+ }
+
+ rc = leapraid_request_cfg_pg(adapter,
+ &leap_mpi_cfgp_req,
+ &leap_mpi_cfgp_rep,
+ target_cfg_pg,
+ real_cfg_pg_addr,
+ min_t(u16, real_cfg_pg_sz, __page_size));
+ if (rc) {
+ u32 adapter_status;
+
+ adapter_status = le16_to_cpu(leap_mpi_cfgp_rep.adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status !=
+ LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE)
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: rc=%d, pg_info: 0x%x, 0x%x, %d\n",
+ rc, leap_mpi_cfgp_req.header.page_type,
+ leap_mpi_cfgp_req.ext_page_type,
+ leap_mpi_cfgp_req.header.page_num);
+ }
+
+ if (real_cfg_pg_addr)
+ dma_free_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz,
+ real_cfg_pg_addr,
+ real_cfg_pg_dma);
+out:
+ return rc;
+}
+
+static int leapraid_cfg_get_volume_hdl_dispatch(
+ struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *cfg_req,
+ struct leapraid_cfg_rep *cfg_rep,
+ struct leapraid_raid_cfg_p0 *raid_cfg_p0,
+ void *real_cfg_pg_addr,
+ u16 real_cfg_pg_sz,
+ u16 raid_cfg_p0_sz,
+ u16 pd_hdl, u16 *vol_hdl)
+{
+ u16 phys_disk_dev_hdl;
+ u16 adapter_status;
+ u16 element_type;
+ int config_num;
+ int rc, i;
+
+ config_num = 0xff;
+ while (true) {
+ cfg_req->page_addr =
+ cpu_to_le32(config_num +
+ LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP);
+ rc = leapraid_request_cfg_pg(
+ adapter, cfg_req, cfg_rep,
+ raid_cfg_p0, real_cfg_pg_addr,
+ min_t(u16, real_cfg_pg_sz, raid_cfg_p0_sz));
+ adapter_status = le16_to_cpu(cfg_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (rc) {
+ if (adapter_status ==
+ LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE) {
+ *vol_hdl = 0;
+ return 0;
+ }
+ return rc;
+ }
+
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS)
+ return -1;
+
+ for (i = 0; i < raid_cfg_p0->elements_num; i++) {
+ element_type =
+ le16_to_cpu(raid_cfg_p0->cfg_element[i].element_flg) &
+ LEAPRAID_RAIDCFG_P0_EFLG_MASK_ELEMENT_TYPE;
+
+ switch (element_type) {
+ case LEAPRAID_RAIDCFG_P0_EFLG_VOL_PHYS_DISK_ELEMENT:
+ case LEAPRAID_RAIDCFG_P0_EFLG_OCE_ELEMENT:
+ phys_disk_dev_hdl =
+ le16_to_cpu(raid_cfg_p0->cfg_element[i]
+ .phys_disk_dev_hdl);
+ if (phys_disk_dev_hdl == pd_hdl) {
+ *vol_hdl =
+ le16_to_cpu
+ (raid_cfg_p0->cfg_element[i]
+ .vol_dev_hdl);
+ return 0;
+ }
+ break;
+
+ case LEAPRAID_RAIDCFG_P0_EFLG_HOT_SPARE_ELEMENT:
+ *vol_hdl = 0;
+ return 0;
+ default:
+ break;
+ }
+ }
+ config_num = raid_cfg_p0->cfg_num;
+ }
+ return 0;
+}
+
+int leapraid_cfg_get_volume_hdl(struct leapraid_adapter *adapter,
+ u16 pd_hdl, u16 *vol_hdl)
+{
+ struct leapraid_raid_cfg_p0 *raid_cfg_p0 = NULL;
+ struct leapraid_cfg_req cfg_req;
+ struct leapraid_cfg_rep cfg_rep;
+ dma_addr_t real_cfg_pg_dma = 0;
+ void *real_cfg_pg_addr = NULL;
+ u16 real_cfg_pg_sz = 0;
+ int rc, raid_cfg_p0_sz;
+
+ *vol_hdl = 0;
+ memset(&cfg_req, 0, sizeof(struct leapraid_cfg_req));
+ cfg_req.func = LEAPRAID_FUNC_CONFIG_OP;
+ cfg_req.action = LEAPRAID_CFG_ACT_PAGE_HEADER;
+ cfg_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ cfg_req.ext_page_type = LEAPRAID_CFG_EXTPT_RAID_CONFIG;
+ cfg_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_VOL0;
+
+ leapraid_build_nodata_mpi_sg(adapter, &cfg_req.page_buf_sge);
+ rc = leapraid_request_cfg_pg_header(adapter, &cfg_req, &cfg_rep);
+ if (rc)
+ goto out;
+
+ cfg_req.action = LEAPRAID_CFG_ACT_PAGE_READ_CUR;
+ raid_cfg_p0_sz = le16_to_cpu(cfg_rep.ext_page_len) *
+ LEAPRAID_CFG_UNIT_SIZE;
+ raid_cfg_p0 = kmalloc(raid_cfg_p0_sz, GFP_KERNEL);
+ if (!raid_cfg_p0) {
+ rc = -1;
+ goto out;
+ }
+
+ real_cfg_pg_sz = (cfg_req.header.page_len) ?
+ cfg_req.header.page_len * LEAPRAID_CFG_UNIT_SIZE :
+ le16_to_cpu(cfg_rep.ext_page_len) * LEAPRAID_CFG_UNIT_SIZE;
+
+ real_cfg_pg_addr = dma_alloc_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz, &real_cfg_pg_dma,
+ GFP_KERNEL);
+ if (!real_cfg_pg_addr) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memset(raid_cfg_p0, 0, raid_cfg_p0_sz);
+ leapraid_single_mpi_sg_append(adapter,
+ &cfg_req.page_buf_sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ real_cfg_pg_sz,
+ real_cfg_pg_dma);
+ memset(real_cfg_pg_addr, 0,
+ min_t(u16, real_cfg_pg_sz, raid_cfg_p0_sz));
+
+ rc = leapraid_cfg_get_volume_hdl_dispatch(adapter,
+ &cfg_req, &cfg_rep,
+ raid_cfg_p0,
+ real_cfg_pg_addr,
+ real_cfg_pg_sz,
+ raid_cfg_p0_sz,
+ pd_hdl, vol_hdl);
+
+out:
+ if (real_cfg_pg_addr)
+ dma_free_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz, real_cfg_pg_addr,
+ real_cfg_pg_dma);
+ kfree(raid_cfg_p0);
+ return rc;
+}
+
+static int leapraid_get_adapter_phys(struct leapraid_adapter *adapter,
+ u8 *nr_phys)
+{
+ struct leapraid_sas_io_unit_p0 sas_io_unit_page0;
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ int rc = 0;
+
+ *nr_phys = 0;
+ cfgp1.size = sizeof(struct leapraid_sas_io_unit_p0);
+ rc = leapraid_op_config_page(adapter, &sas_io_unit_page0, cfgp1,
+ cfgp2, GET_SAS_IOUNIT_PG0);
+ if (rc)
+ return rc;
+
+ *nr_phys = sas_io_unit_page0.phy_num;
+
+ return 0;
+}
+
+static int leapraid_cfg_get_number_pds(struct leapraid_adapter *adapter,
+ u16 hdl, u8 *num_pds)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_raidvol_p0 raidvol_p0;
+ int rc;
+
+ *num_pds = 0;
+ cfgp1.size = sizeof(struct leapraid_raidvol_p0);
+ cfgp2.handle = hdl;
+ rc = leapraid_op_config_page(adapter, &raidvol_p0, cfgp1,
+ cfgp2, GET_RAID_VOLUME_PG0);
+
+ if (!rc)
+ *num_pds = raidvol_p0.num_phys_disks;
+
+ return rc;
+}
+
+int leapraid_cfg_get_volume_wwid(struct leapraid_adapter *adapter,
+ u16 vol_hdl, u64 *wwid)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_raidvol_p1 raidvol_p1;
+ int rc;
+
+ *wwid = 0;
+ cfgp1.form = LEAPRAID_RAID_VOL_CFG_PGAD_HDL;
+ cfgp2.handle = vol_hdl;
+ rc = leapraid_op_config_page(adapter, &raidvol_p1, cfgp1,
+ cfgp2, GET_RAID_VOLUME_PG1);
+ if (!rc)
+ *wwid = le64_to_cpu(raidvol_p1.wwid);
+
+ return rc;
+}
+
+static int leapraid_get_sas_io_unit_page0(struct leapraid_adapter *adapter,
+ struct leapraid_sas_io_unit_p0 *sas_io_unit_p0,
+ u16 sas_iou_pg0_sz)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+
+ cfgp1.size = sas_iou_pg0_sz;
+ return leapraid_op_config_page(adapter, sas_io_unit_p0, cfgp1,
+ cfgp2, GET_SAS_IOUNIT_PG0);
+}
+
+static int leapraid_get_sas_address(struct leapraid_adapter *adapter,
+ u16 hdl, u64 *sas_address)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+
+ *sas_address = 0;
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0)))
+ return -ENXIO;
+
+ if (hdl <= adapter->dev_topo.card.phys_num &&
+ (!(le32_to_cpu(sas_dev_p0.dev_info) & LEAPRAID_DEVTYP_SEP)))
+ *sas_address = adapter->dev_topo.card.sas_address;
+ else
+ *sas_address = le64_to_cpu(sas_dev_p0.sas_address);
+
+ return 0;
+}
+
+int leapraid_get_volume_cap(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_raidvol_p0 *raidvol_p0;
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_raidpd_p0 raidpd_p0;
+ u8 num_pds;
+ u16 sz;
+
+ if ((leapraid_cfg_get_number_pds(adapter, raid_volume->hdl,
+ &num_pds)) || !num_pds)
+ return -EFAULT;
+
+ raid_volume->pd_num = num_pds;
+ sz = offsetof(struct leapraid_raidvol_p0, phys_disk) +
+ (num_pds * sizeof(struct leapraid_raidvol0_phys_disk));
+ raidvol_p0 = kzalloc(sz, GFP_KERNEL);
+ if (!raidvol_p0)
+ return -EFAULT;
+
+ cfgp1.size = sz;
+ cfgp2.handle = raid_volume->hdl;
+ if ((leapraid_op_config_page(adapter, raidvol_p0, cfgp1, cfgp2,
+ GET_RAID_VOLUME_PG0))) {
+ kfree(raidvol_p0);
+ return -EFAULT;
+ }
+
+ raid_volume->vol_type = raidvol_p0->volume_type;
+ cfgp1.form = LEAPRAID_PHYSDISK_CFG_PGAD_PHYSDISKNUM;
+ cfgp2.form_specific = raidvol_p0->phys_disk[0].phys_disk_num;
+ if (!(leapraid_op_config_page(adapter, &raidpd_p0, cfgp1, cfgp2,
+ GET_PHY_DISK_PG0))) {
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = le16_to_cpu(raidpd_p0.dev_hdl);
+ if (!(leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0))) {
+ raid_volume->dev_info =
+ le32_to_cpu(sas_dev_p0.dev_info);
+ }
+ }
+
+ kfree(raidvol_p0);
+ return 0;
+}
+
+static void leapraid_fw_log_work(struct work_struct *work)
+{
+ struct leapraid_adapter *adapter = container_of(work,
+ struct leapraid_adapter, fw_log_desc.fw_log_work.work);
+ struct leapraid_fw_log_info *infom;
+ unsigned long flags;
+
+ infom = (struct leapraid_fw_log_info *)(adapter->fw_log_desc.fw_log_buffer +
+ LEAPRAID_SYS_LOG_BUF_SIZE);
+
+ if (adapter->fw_log_desc.fw_log_init_flag == 0) {
+ infom->user_position =
+ leapraid_readl(&adapter->iomem_base->host_log_buf_pos);
+ infom->adapter_position =
+ leapraid_readl(&adapter->iomem_base->adapter_log_buf_pos);
+ adapter->fw_log_desc.fw_log_init_flag++;
+ }
+
+ writel(infom->user_position, &adapter->iomem_base->host_log_buf_pos);
+ infom->adapter_position =
+ leapraid_readl(&adapter->iomem_base->adapter_log_buf_pos);
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->fw_log_desc.fw_log_wq)
+ queue_delayed_work(adapter->fw_log_desc.fw_log_wq,
+ &adapter->fw_log_desc.fw_log_work,
+ msecs_to_jiffies(LEAPRAID_PCIE_LOG_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+void leapraid_fw_log_stop(struct leapraid_adapter *adapter)
+{
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ wq = adapter->fw_log_desc.fw_log_wq;
+ adapter->fw_log_desc.fw_log_wq = NULL;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work_sync(&adapter->fw_log_desc.fw_log_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+void leapraid_fw_log_start(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return;
+
+ if (adapter->fw_log_desc.fw_log_wq)
+ return;
+
+ INIT_DELAYED_WORK(&adapter->fw_log_desc.fw_log_work,
+ leapraid_fw_log_work);
+ snprintf(adapter->fw_log_desc.fw_log_wq_name,
+ sizeof(adapter->fw_log_desc.fw_log_wq_name),
+ "poll_%s%u_fw_log",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+ adapter->fw_log_desc.fw_log_wq =
+ create_singlethread_workqueue(
+ adapter->fw_log_desc.fw_log_wq_name);
+ if (!adapter->fw_log_desc.fw_log_wq)
+ return;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->fw_log_desc.fw_log_wq)
+ queue_delayed_work(adapter->fw_log_desc.fw_log_wq,
+ &adapter->fw_log_desc.fw_log_work,
+ msecs_to_jiffies(LEAPRAID_PCIE_LOG_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+static void leapraid_timestamp_sync(struct leapraid_adapter *adapter)
+{
+ struct leapraid_io_unit_ctrl_req *io_unit_ctrl_req;
+ ktime_t current_time;
+ bool issue_reset = false;
+ u64 time_stamp = 0;
+
+ mutex_lock(&adapter->driver_cmds.timestamp_sync_cmd.mutex);
+ adapter->driver_cmds.timestamp_sync_cmd.status = LEAPRAID_CMD_PENDING;
+ io_unit_ctrl_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.timestamp_sync_cmd.inter_taskid);
+ memset(io_unit_ctrl_req, 0, sizeof(struct leapraid_io_unit_ctrl_req));
+ io_unit_ctrl_req->func = LEAPRAID_FUNC_SAS_IO_UNIT_CTRL;
+ io_unit_ctrl_req->op = LEAPRAID_SAS_OP_SET_PARAMETER;
+ io_unit_ctrl_req->adapter_para = LEAPRAID_SET_PARAMETER_SYNC_TIMESTAMP;
+
+ current_time = ktime_get_real();
+ time_stamp = ktime_to_ms(current_time);
+
+ io_unit_ctrl_req->adapter_para_value =
+ cpu_to_le32(time_stamp & 0xFFFFFFFF);
+ io_unit_ctrl_req->adapter_para_value2 =
+ cpu_to_le32(time_stamp >> 32);
+ init_completion(&adapter->driver_cmds.timestamp_sync_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.timestamp_sync_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.timestamp_sync_cmd.done,
+ LEAPRAID_TIMESTAMP_SYNC_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.timestamp_sync_cmd.status &
+ LEAPRAID_CMD_DONE))
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.timestamp_sync_cmd.status);
+
+ if (issue_reset) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+
+ adapter->driver_cmds.timestamp_sync_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.timestamp_sync_cmd.mutex);
+}
+
+static bool leapraid_should_skip_fault_check(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+ bool skip;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ skip = adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering ||
+ adapter->access_ctrl.host_removing;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+
+ return skip;
+}
+
+static void leapraid_check_scheduled_fault_work(struct work_struct *work)
+{
+ struct leapraid_adapter *adapter;
+ unsigned long flags;
+ u32 adapter_state;
+ int rc;
+
+ adapter = container_of(work, struct leapraid_adapter,
+ reset_desc.fault_reset_work.work);
+
+ if (leapraid_should_skip_fault_check(adapter))
+ goto scheduled_timer;
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state != LEAPRAID_DB_OPERATIONAL) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ dev_warn(&adapter->pdev->dev, "%s: hard reset: %s\n",
+ __func__, (rc == 0) ? "success" : "failed");
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (rc && adapter_state != LEAPRAID_DB_OPERATIONAL)
+ return;
+ }
+
+ if (++adapter->timestamp_sync_cnt >=
+ LEAPRAID_TIMESTAMP_SYNC_INTERVAL) {
+ adapter->timestamp_sync_cnt = 0;
+ leapraid_timestamp_sync(adapter);
+ }
+
+scheduled_timer:
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->reset_desc.fault_reset_wq)
+ queue_delayed_work(adapter->reset_desc.fault_reset_wq,
+ &adapter->reset_desc.fault_reset_work,
+ msecs_to_jiffies(LEAPRAID_FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+void leapraid_check_scheduled_fault_start(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+
+ if (adapter->reset_desc.fault_reset_wq)
+ return;
+
+ adapter->timestamp_sync_cnt = 0;
+ INIT_DELAYED_WORK(&adapter->reset_desc.fault_reset_work,
+ leapraid_check_scheduled_fault_work);
+ snprintf(adapter->reset_desc.fault_reset_wq_name,
+ sizeof(adapter->reset_desc.fault_reset_wq_name),
+ "poll_%s%u_status",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+ adapter->reset_desc.fault_reset_wq =
+ create_singlethread_workqueue(
+ adapter->reset_desc.fault_reset_wq_name);
+ if (!adapter->reset_desc.fault_reset_wq) {
+ dev_err(&adapter->pdev->dev,
+ "create single thread workqueue failed!\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->reset_desc.fault_reset_wq)
+ queue_delayed_work(adapter->reset_desc.fault_reset_wq,
+ &adapter->reset_desc.fault_reset_work,
+ msecs_to_jiffies(LEAPRAID_FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+void leapraid_check_scheduled_fault_stop(struct leapraid_adapter *adapter)
+{
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ wq = adapter->reset_desc.fault_reset_wq;
+ adapter->reset_desc.fault_reset_wq = NULL;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+
+ if (!wq)
+ return;
+
+ if (!cancel_delayed_work_sync(&adapter->reset_desc.fault_reset_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+}
+
+static bool leapraid_ready_for_scsi_io(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ if (adapter->access_ctrl.pcie_recovering ||
+ adapter->access_ctrl.shost_recovering)
+ return false;
+
+ if (leapraid_check_adapter_is_op(adapter))
+ return false;
+
+ if (hdl == LEAPRAID_INVALID_DEV_HANDLE)
+ return false;
+
+ if (test_bit(hdl, (unsigned long *)adapter->dev_topo.dev_removing))
+ return false;
+
+ return true;
+}
+
+static int leapraid_dispatch_scsi_io(struct leapraid_adapter *adapter,
+ struct leapraid_scsi_cmd_desc *cmd_desc)
+{
+ struct scsi_device *sdev;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_cmnd *scmd;
+ void *dma_buffer = NULL;
+ dma_addr_t dma_addr = 0;
+ u8 sdev_flg = 0;
+ bool issue_reset = false;
+ int rc = 0;
+
+ if (WARN_ON(!adapter->driver_cmds.internal_scmd))
+ return -EINVAL;
+
+ if (!leapraid_ready_for_scsi_io(adapter, cmd_desc->hdl))
+ return -EINVAL;
+
+ mutex_lock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ if (adapter->driver_cmds.driver_scsiio_cmd.status !=
+ LEAPRAID_CMD_NOT_USED) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ adapter->driver_cmds.driver_scsiio_cmd.status = LEAPRAID_CMD_PENDING;
+
+ __shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (sdev_priv->starget_priv->hdl == cmd_desc->hdl &&
+ sdev_priv->lun == cmd_desc->lun) {
+ sdev_flg = 1;
+ break;
+ }
+ }
+
+ if (!sdev_flg) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (cmd_desc->data_length) {
+ dma_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ cmd_desc->data_length,
+ &dma_addr, GFP_ATOMIC);
+ if (!dma_buffer) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (cmd_desc->dir == DMA_TO_DEVICE)
+ memcpy(dma_buffer, cmd_desc->data_buffer,
+ cmd_desc->data_length);
+ }
+
+ scmd = adapter->driver_cmds.internal_scmd;
+ scmd->device = sdev;
+ scmd->cmd_len = cmd_desc->cdb_length;
+ memcpy(scmd->cmnd, cmd_desc->cdb, cmd_desc->cdb_length);
+ scmd->sc_data_direction = cmd_desc->dir;
+ scmd->sdb.length = cmd_desc->data_length;
+ scmd->sdb.table.nents = 1;
+ scmd->sdb.table.orig_nents = 1;
+ sg_init_one(scmd->sdb.table.sgl, dma_buffer, cmd_desc->data_length);
+ init_completion(&adapter->driver_cmds.driver_scsiio_cmd.done);
+ if (leapraid_queuecommand(adapter->shost, scmd)) {
+ adapter->driver_cmds.driver_scsiio_cmd.status &=
+ ~LEAPRAID_CMD_PENDING;
+ complete(&adapter->driver_cmds.driver_scsiio_cmd.done);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ wait_for_completion_timeout(&adapter->driver_cmds.driver_scsiio_cmd.done,
+ cmd_desc->time_out * HZ);
+
+ if (!(adapter->driver_cmds.driver_scsiio_cmd.status &
+ LEAPRAID_CMD_DONE)) {
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.driver_scsiio_cmd.status);
+ rc = -ENODATA;
+ goto reset;
+ }
+
+ rc = adapter->driver_cmds.internal_scmd->result;
+ if (!rc && cmd_desc->dir == DMA_FROM_DEVICE)
+ memcpy(cmd_desc->data_buffer, dma_buffer,
+ cmd_desc->data_length);
+
+reset:
+ if (issue_reset) {
+ rc = -ENODATA;
+ dev_err(&adapter->pdev->dev, "fire tgt reset: hdl=0x%04x\n",
+ cmd_desc->hdl);
+ leapraid_issue_locked_tm(adapter, cmd_desc->hdl, 0, 0, 0,
+ LEAPRAID_TM_TASKTYPE_TARGET_RESET,
+ adapter->driver_cmds.driver_scsiio_cmd.taskid,
+ LEAPRAID_TM_MSGFLAGS_LINK_RESET);
+ }
+out:
+ if (dma_buffer)
+ dma_free_coherent(&adapter->pdev->dev,
+ cmd_desc->data_length, dma_buffer, dma_addr);
+ adapter->driver_cmds.driver_scsiio_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ return rc;
+}
+
+static int leapraid_dispatch_logsense(struct leapraid_adapter *adapter,
+ u16 hdl, u32 lun)
+{
+ struct leapraid_scsi_cmd_desc *desc;
+ int rc = 0;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->hdl = hdl;
+ desc->lun = lun;
+ desc->data_length = LEAPRAID_LOGSENSE_DATA_LENGTH;
+ desc->dir = DMA_FROM_DEVICE;
+ desc->cdb_length = LEAPRAID_LOGSENSE_CDB_LENGTH;
+ desc->cdb[0] = LOG_SENSE;
+ desc->cdb[2] = LEAPRAID_LOGSENSE_CDB_CODE;
+ desc->cdb[8] = desc->data_length;
+ desc->raid_member = false;
+ desc->time_out = LEAPRAID_LOGSENSE_TIMEOUT;
+
+ desc->data_buffer = kzalloc(desc->data_length, GFP_KERNEL);
+ if (!desc->data_buffer) {
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ rc = leapraid_dispatch_scsi_io(adapter, desc);
+ if (!rc) {
+ if (((char *)desc->data_buffer)[8] ==
+ LEAPRAID_LOGSENSE_SMART_CODE)
+ leapraid_smart_fault_detect(adapter, hdl);
+ }
+
+ kfree(desc->data_buffer);
+ kfree(desc);
+
+ return rc;
+}
+
+static bool leapraid_smart_poll_check(struct leapraid_adapter *adapter,
+ struct leapraid_sdev_priv *sdev_priv,
+ u32 reset_flg)
+{
+ struct leapraid_sas_dev *sas_dev = NULL;
+
+ if (!sdev_priv || !sdev_priv->starget_priv->card_port)
+ goto out;
+
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter,
+ sdev_priv->starget_priv->sas_address,
+ sdev_priv->starget_priv->card_port);
+ if (!sas_dev || !sas_dev->support_smart)
+ goto out;
+
+ if (reset_flg)
+ sas_dev->led_on = false;
+ else if (sas_dev->led_on)
+ goto out;
+
+ if ((sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER) ||
+ (sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME) ||
+ sdev_priv->block)
+ goto out;
+
+ leapraid_sdev_put(sas_dev);
+ return true;
+
+out:
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ return false;
+}
+
+static void leapraid_sata_smart_poll_work(struct work_struct *work)
+{
+ struct leapraid_adapter *adapter =
+ container_of(work, struct leapraid_adapter,
+ smart_poll_desc.smart_poll_work.work);
+ struct scsi_device *sdev;
+ struct leapraid_sdev_priv *sdev_priv;
+ static u32 reset_cnt;
+ bool reset_flg = false;
+
+ if (leapraid_check_adapter_is_op(adapter))
+ goto out;
+
+ reset_flg = (reset_cnt < adapter->reset_desc.reset_cnt);
+ reset_cnt = adapter->reset_desc.reset_cnt;
+
+ __shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (leapraid_smart_poll_check(adapter, sdev_priv, reset_flg))
+ leapraid_dispatch_logsense(adapter,
+ sdev_priv->starget_priv->hdl,
+ sdev_priv->lun);
+ }
+
+out:
+ if (adapter->smart_poll_desc.smart_poll_wq)
+ queue_delayed_work(adapter->smart_poll_desc.smart_poll_wq,
+ &adapter->smart_poll_desc.smart_poll_work,
+ msecs_to_jiffies(LEAPRAID_SMART_POLLING_INTERVAL));
+}
+
+void leapraid_smart_polling_start(struct leapraid_adapter *adapter)
+{
+ if (adapter->smart_poll_desc.smart_poll_wq || !smart_poll)
+ return;
+
+ INIT_DELAYED_WORK(&adapter->smart_poll_desc.smart_poll_work,
+ leapraid_sata_smart_poll_work);
+
+ snprintf(adapter->smart_poll_desc.smart_poll_wq_name,
+ sizeof(adapter->smart_poll_desc.smart_poll_wq_name),
+ "poll_%s%u_smart_poll",
+ LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id);
+ adapter->smart_poll_desc.smart_poll_wq =
+ create_singlethread_workqueue(
+ adapter->smart_poll_desc.smart_poll_wq_name);
+ if (!adapter->smart_poll_desc.smart_poll_wq)
+ return;
+ queue_delayed_work(adapter->smart_poll_desc.smart_poll_wq,
+ &adapter->smart_poll_desc.smart_poll_work,
+ msecs_to_jiffies(LEAPRAID_SMART_POLLING_INTERVAL));
+}
+
+void leapraid_smart_polling_stop(struct leapraid_adapter *adapter)
+{
+ struct workqueue_struct *wq;
+
+ if (!adapter->smart_poll_desc.smart_poll_wq)
+ return;
+
+ wq = adapter->smart_poll_desc.smart_poll_wq;
+ adapter->smart_poll_desc.smart_poll_wq = NULL;
+
+ if (wq) {
+ if (!cancel_delayed_work_sync(&adapter->smart_poll_desc.smart_poll_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+static void leapraid_fw_work(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt);
+
+static void leapraid_fw_evt_free(struct kref *r)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+
+ fw_evt = container_of(r, struct leapraid_fw_evt_work, refcnt);
+
+ kfree(fw_evt->evt_data);
+ kfree(fw_evt);
+}
+
+static void leapraid_fw_evt_get(struct leapraid_fw_evt_work *fw_evt)
+{
+ kref_get(&fw_evt->refcnt);
+}
+
+static void leapraid_fw_evt_put(struct leapraid_fw_evt_work *fw_work)
+{
+ kref_put(&fw_work->refcnt, leapraid_fw_evt_free);
+}
+
+static struct leapraid_fw_evt_work *leapraid_alloc_fw_evt_work(void)
+{
+ struct leapraid_fw_evt_work *fw_evt =
+ kzalloc(sizeof(*fw_evt), GFP_ATOMIC);
+ if (!fw_evt)
+ return NULL;
+
+ kref_init(&fw_evt->refcnt);
+ return fw_evt;
+}
+
+static void leapraid_run_fw_evt_work(struct work_struct *work)
+{
+ struct leapraid_fw_evt_work *fw_evt =
+ container_of(work, struct leapraid_fw_evt_work, work);
+
+ leapraid_fw_work(fw_evt->adapter, fw_evt);
+}
+
+static void leapraid_fw_evt_add(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ unsigned long flags;
+
+ if (!adapter->fw_evt_s.fw_evt_thread)
+ return;
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ leapraid_fw_evt_get(fw_evt);
+ INIT_LIST_HEAD(&fw_evt->list);
+ list_add_tail(&fw_evt->list, &adapter->fw_evt_s.fw_evt_list);
+ INIT_WORK(&fw_evt->work, leapraid_run_fw_evt_work);
+ leapraid_fw_evt_get(fw_evt);
+ queue_work(adapter->fw_evt_s.fw_evt_thread, &fw_evt->work);
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+}
+
+static void leapraid_del_fw_evt_from_list(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (!list_empty(&fw_evt->list)) {
+ list_del_init(&fw_evt->list);
+ leapraid_fw_evt_put(fw_evt);
+ }
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+}
+
+static struct leapraid_fw_evt_work *leapraid_next_fw_evt(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_evt = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (!list_empty(&adapter->fw_evt_s.fw_evt_list)) {
+ fw_evt = list_first_entry(&adapter->fw_evt_s.fw_evt_list,
+ struct leapraid_fw_evt_work, list);
+ list_del_init(&fw_evt->list);
+ leapraid_fw_evt_put(fw_evt);
+ }
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ return fw_evt;
+}
+
+void leapraid_clean_active_fw_evt(struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+ bool rc = false;
+
+ if ((list_empty(&adapter->fw_evt_s.fw_evt_list) &&
+ !adapter->fw_evt_s.cur_evt) || !adapter->fw_evt_s.fw_evt_thread)
+ return;
+
+ adapter->fw_evt_s.fw_evt_cleanup = 1;
+ if (adapter->access_ctrl.shost_recovering &&
+ adapter->fw_evt_s.cur_evt)
+ adapter->fw_evt_s.cur_evt->ignore = 1;
+
+ while ((fw_evt = leapraid_next_fw_evt(adapter)) ||
+ (fw_evt = adapter->fw_evt_s.cur_evt)) {
+ if (fw_evt == adapter->fw_evt_s.cur_evt &&
+ adapter->fw_evt_s.cur_evt->evt_type !=
+ LEAPRAID_EVT_REMOVE_DEAD_DEV) {
+ adapter->fw_evt_s.cur_evt = NULL;
+ continue;
+ }
+
+ rc = cancel_work_sync(&fw_evt->work);
+
+ if (rc)
+ leapraid_fw_evt_put(fw_evt);
+ }
+ adapter->fw_evt_s.fw_evt_cleanup = 0;
+}
+
+static void leapraid_internal_dev_ublk(struct scsi_device *sdev,
+ struct leapraid_sdev_priv *sdev_priv)
+{
+ int rc = 0;
+
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: now internal unblkg dev\n",
+ sdev_priv->starget_priv->hdl);
+ sdev_priv->block = false;
+ rc = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ if (rc == -EINVAL) {
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: unblkg failed, rc=%d\n",
+ sdev_priv->starget_priv->hdl, rc);
+ sdev_priv->block = true;
+ rc = scsi_internal_device_block_nowait(sdev);
+ if (rc)
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: blkg failed: earlier unblkg err, rc=%d\n",
+ sdev_priv->starget_priv->hdl, rc);
+
+ sdev_priv->block = false;
+ rc = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ if (rc)
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: ublkg failed again, rc=%d\n",
+ sdev_priv->starget_priv->hdl, rc);
+ }
+}
+
+static void leapraid_internal_ublk_io_dev_to_running(struct scsi_device *sdev)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+
+ sdev_priv = sdev->hostdata;
+ sdev_priv->block = false;
+ scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ sdev_printk(KERN_WARNING, sdev, "%s: ublk hdl 0x%04x\n",
+ __func__, sdev_priv->starget_priv->hdl);
+}
+
+static void leapraid_ublk_io_dev_to_running(
+ struct leapraid_adapter *adapter, u64 sas_addr,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->sas_address != sas_addr ||
+ sdev_priv->starget_priv->card_port != card_port)
+ continue;
+
+ if (sdev_priv->block)
+ leapraid_internal_ublk_io_dev_to_running(sdev);
+ }
+}
+
+static void leapraid_ublk_io_dev(struct leapraid_adapter *adapter,
+ u64 sas_addr,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv || !sdev_priv->starget_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->sas_address != sas_addr)
+ continue;
+
+ if (sdev_priv->starget_priv->card_port != card_port)
+ continue;
+
+ if (sdev_priv->block)
+ leapraid_internal_dev_ublk(sdev, sdev_priv);
+
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ }
+}
+
+static void leapraid_ublk_io_all_dev(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_starget_priv *stgt_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+
+ if (!sdev_priv)
+ continue;
+
+ stgt_priv = sdev_priv->starget_priv;
+ if (!stgt_priv || stgt_priv->deleted)
+ continue;
+
+ if (!sdev_priv->block)
+ continue;
+
+ sdev_printk(KERN_WARNING, sdev, "hdl 0x%04x: blkg...\n",
+ sdev_priv->starget_priv->hdl);
+ leapraid_internal_dev_ublk(sdev, sdev_priv);
+ continue;
+ }
+}
+
+static void __maybe_unused leapraid_internal_dev_blk(
+ struct scsi_device *sdev,
+ struct leapraid_sdev_priv *sdev_priv)
+{
+ int rc = 0;
+
+ sdev_printk(KERN_INFO, sdev, "internal blkg hdl 0x%04x\n",
+ sdev_priv->starget_priv->hdl);
+ sdev_priv->block = true;
+ rc = scsi_internal_device_block_nowait(sdev);
+ if (rc == -EINVAL)
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: blkg failed, rc=%d\n",
+ rc, sdev_priv->starget_priv->hdl);
+}
+
+static void __maybe_unused leapraid_blkio_dev(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_device *sdev;
+
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter, hdl);
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl != hdl)
+ continue;
+
+ if (sdev_priv->block)
+ continue;
+
+ if (sas_dev && sas_dev->pend_sas_rphy_add)
+ continue;
+
+ if (sdev_priv->sep) {
+ sdev_printk(KERN_INFO, sdev,
+ "sep hdl 0x%04x skip blkg\n",
+ sdev_priv->starget_priv->hdl);
+ continue;
+ }
+
+ leapraid_internal_dev_blk(sdev, sdev_priv);
+ }
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_imm_blkio_to_end_dev(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+
+ if (sas_dev) {
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl != sas_dev->hdl)
+ continue;
+
+ if (sdev_priv->block)
+ continue;
+
+ if (sas_dev && sas_dev->pend_sas_rphy_add)
+ continue;
+
+ if (sdev_priv->sep) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s skip dev blk for sep hdl 0x%04x\n",
+ __func__,
+ sdev_priv->starget_priv->hdl);
+ continue;
+ }
+
+ leapraid_internal_dev_blk(sdev, sdev_priv);
+ }
+
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_imm_blkio_set_end_dev_blk_hdls(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ struct leapraid_sas_port *sas_port;
+
+ list_for_each_entry(sas_port,
+ &topo_node_exp->sas_port_list, port_list) {
+ if (sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE) {
+ leapraid_imm_blkio_to_end_dev(adapter, sas_port);
+ }
+ }
+}
+
+static void leapraid_imm_blkio_to_kids_attchd_to_ex(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp);
+
+static void leapraid_imm_blkio_to_sib_exp(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ struct leapraid_topo_node *topo_node_exp_sib;
+ struct leapraid_sas_port *sas_port;
+
+ list_for_each_entry(sas_port,
+ &topo_node_exp->sas_port_list, port_list) {
+ if (sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE) {
+ topo_node_exp_sib =
+ leapraid_exp_find_by_sas_address(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ leapraid_imm_blkio_to_kids_attchd_to_ex(
+ adapter,
+ topo_node_exp_sib);
+ }
+ }
+}
+
+static void leapraid_imm_blkio_to_kids_attchd_to_ex(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ if (!topo_node_exp)
+ return;
+
+ leapraid_imm_blkio_set_end_dev_blk_hdls(adapter, topo_node_exp);
+
+ leapraid_imm_blkio_to_sib_exp(adapter, topo_node_exp);
+}
+
+static void leapraid_report_sdev_directly(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ struct leapraid_sas_port *sas_port;
+
+ sas_port = leapraid_transport_port_add(adapter,
+ sas_dev->hdl,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ if (!sas_port) {
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ return;
+ }
+
+ if (!sas_dev->starget) {
+ if (!adapter->scan_dev_desc.driver_loading) {
+ leapraid_transport_port_remove(adapter,
+ sas_dev->sas_addr,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ }
+ return;
+ }
+
+ clear_bit(sas_dev->hdl,
+ (unsigned long *)adapter->dev_topo.pending_dev_add);
+}
+
+static struct leapraid_sas_dev *leapraid_init_sas_dev(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev_p0 *sas_dev_pg0,
+ struct leapraid_card_port *card_port, u16 hdl,
+ u64 parent_sas_addr, u64 sas_addr, u32 dev_info)
+{
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_enc_node *enc_dev;
+
+ sas_dev = kzalloc(sizeof(*sas_dev), GFP_KERNEL);
+ if (!sas_dev)
+ return NULL;
+
+ kref_init(&sas_dev->refcnt);
+ sas_dev->hdl = hdl;
+ sas_dev->dev_info = dev_info;
+ sas_dev->sas_addr = sas_addr;
+ sas_dev->card_port = card_port;
+ sas_dev->parent_sas_addr = parent_sas_addr;
+ sas_dev->phy = sas_dev_pg0->phy_num;
+ sas_dev->enc_hdl = le16_to_cpu(sas_dev_pg0->enc_hdl);
+ sas_dev->dev_name = le64_to_cpu(sas_dev_pg0->dev_name);
+ sas_dev->port_type = sas_dev_pg0->max_port_connections;
+ sas_dev->slot = sas_dev->enc_hdl ? le16_to_cpu(sas_dev_pg0->slot) : 0;
+ sas_dev->support_smart = (le16_to_cpu(sas_dev_pg0->flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_SATA_SMART);
+ if (le16_to_cpu(sas_dev_pg0->flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID) {
+ sas_dev->enc_level = sas_dev_pg0->enc_level;
+ memcpy(sas_dev->connector_name, sas_dev_pg0->connector_name, 4);
+ sas_dev->connector_name[4] = '\0';
+ } else {
+ sas_dev->enc_level = 0;
+ sas_dev->connector_name[0] = '\0';
+ }
+ if (le16_to_cpu(sas_dev_pg0->enc_hdl)) {
+ enc_dev = leapraid_enc_find_by_hdl(adapter,
+ le16_to_cpu(sas_dev_pg0->enc_hdl));
+ sas_dev->enc_lid = enc_dev ?
+ le64_to_cpu(enc_dev->pg0.enc_lid) : 0;
+ }
+ dev_info(&adapter->pdev->dev,
+ "add dev: hdl=0x%0x, sas addr=0x%016llx, port_type=0x%0x\n",
+ hdl, sas_dev->sas_addr, sas_dev->port_type);
+
+ return sas_dev;
+}
+
+static void leapraid_add_dev(struct leapraid_adapter *adapter, u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_pg0;
+ struct leapraid_card_port *card_port;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u64 parent_sas_addr;
+ u32 dev_info;
+ u64 sas_addr;
+ u8 port_id;
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_pg0,
+ cfgp1, cfgp2, GET_SAS_DEVICE_PG0)))
+ return;
+
+ dev_info = le32_to_cpu(sas_dev_pg0.dev_info);
+ if (!(leapraid_is_end_dev(dev_info)))
+ return;
+
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add);
+ sas_addr = le64_to_cpu(sas_dev_pg0.sas_address);
+ if (!(le16_to_cpu(sas_dev_pg0.flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_DEV_PRESENT))
+ return;
+
+ port_id = sas_dev_pg0.physical_port;
+ card_port = leapraid_get_port_by_id(adapter, port_id, false);
+ if (!card_port)
+ return;
+
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter, sas_addr, card_port);
+ if (sas_dev) {
+ clear_bit(hdl,
+ (unsigned long *)adapter->dev_topo.pending_dev_add);
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+
+ if (leapraid_get_sas_address(adapter,
+ le16_to_cpu(sas_dev_pg0.parent_dev_hdl),
+ &parent_sas_addr))
+ return;
+
+ sas_dev = leapraid_init_sas_dev(adapter, &sas_dev_pg0, card_port,
+ hdl, parent_sas_addr, sas_addr,
+ dev_info);
+ if (!sas_dev)
+ return;
+ if (adapter->scan_dev_desc.wait_scan_dev_done) {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_sdev_get(sas_dev);
+ list_add_tail(&sas_dev->list,
+ &adapter->dev_topo.sas_dev_init_list);
+ leapraid_check_boot_dev(adapter, sas_dev, 0);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ } else {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_sdev_get(sas_dev);
+ list_add_tail(&sas_dev->list, &adapter->dev_topo.sas_dev_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_report_sdev_directly(adapter, sas_dev);
+ }
+}
+
+static void leapraid_remove_device(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ if (sas_dev->led_on) {
+ leapraid_set_led(adapter, sas_dev, false);
+ sas_dev->led_on = false;
+ }
+
+ if (sas_dev->starget && sas_dev->starget->hostdata) {
+ starget_priv = sas_dev->starget->hostdata;
+ starget_priv->deleted = true;
+ leapraid_ublk_io_dev(adapter,
+ sas_dev->sas_addr, sas_dev->card_port);
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ }
+
+ leapraid_transport_port_remove(adapter,
+ sas_dev->sas_addr,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+
+ dev_info(&adapter->pdev->dev,
+ "remove dev: hdl=0x%04x, sas addr=0x%016llx\n",
+ sas_dev->hdl, (unsigned long long)sas_dev->sas_addr);
+}
+
+static struct leapraid_vphy *leapraid_alloc_vphy(struct leapraid_adapter *adapter,
+ u8 port_id, u8 phy_num)
+{
+ struct leapraid_card_port *port;
+ struct leapraid_vphy *vphy;
+
+ port = leapraid_get_port_by_id(adapter, port_id, false);
+ if (!port)
+ return NULL;
+
+ vphy = leapraid_get_vphy_by_phy(port, phy_num);
+ if (vphy)
+ return vphy;
+
+ vphy = kzalloc(sizeof(*vphy), GFP_KERNEL);
+ if (!vphy)
+ return NULL;
+
+ if (!port->vphys_mask)
+ INIT_LIST_HEAD(&port->vphys_list);
+
+ port->vphys_mask |= BIT(phy_num);
+ vphy->phy_mask |= BIT(phy_num);
+ list_add_tail(&vphy->list, &port->vphys_list);
+ return vphy;
+}
+
+static int leapraid_add_port_to_card_port_list(struct leapraid_adapter *adapter,
+ u8 port_id, bool refresh)
+{
+ struct leapraid_card_port *card_port;
+
+ card_port = leapraid_get_port_by_id(adapter, port_id, false);
+ if (card_port)
+ return 0;
+
+ card_port = kzalloc(sizeof(*card_port), GFP_KERNEL);
+ if (!card_port)
+ return -ENOMEM;
+
+ card_port->port_id = port_id;
+ dev_info(&adapter->pdev->dev,
+ "port: %d is added to card_port list\n",
+ card_port->port_id);
+
+ if (refresh)
+ if (adapter->access_ctrl.shost_recovering)
+ card_port->flg = LEAPRAID_CARD_PORT_FLG_NEW;
+ list_add_tail(&card_port->list, &adapter->dev_topo.card_port_list);
+ return 0;
+}
+
+static void leapraid_sas_host_add(struct leapraid_adapter *adapter,
+ bool refresh)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_phy_p0 phy_pg0;
+ struct leapraid_sas_dev_p0 sas_dev_pg0;
+ struct leapraid_enc_p0 enc_pg0;
+ struct leapraid_sas_io_unit_p0 *sas_iou_pg0;
+ u16 sas_iou_pg0_sz;
+ u16 attached_hdl;
+ u8 phys_num;
+ u8 port_id;
+ u8 link_rate;
+ int i;
+
+ if (!refresh) {
+ if (leapraid_get_adapter_phys(adapter, &phys_num) || !phys_num)
+ return;
+
+ adapter->dev_topo.card.card_phy =
+ kcalloc(phys_num,
+ sizeof(struct leapraid_card_phy), GFP_KERNEL);
+ if (!adapter->dev_topo.card.card_phy)
+ return;
+
+ adapter->dev_topo.card.phys_num = phys_num;
+ }
+
+ sas_iou_pg0_sz = offsetof(struct leapraid_sas_io_unit_p0, phy_info) +
+ (adapter->dev_topo.card.phys_num *
+ sizeof(struct leapraid_sas_io_unit0_phy_info));
+ sas_iou_pg0 = kzalloc(sas_iou_pg0_sz, GFP_KERNEL);
+ if (!sas_iou_pg0)
+ goto out;
+
+ if (leapraid_get_sas_io_unit_page0(adapter,
+ sas_iou_pg0,
+ sas_iou_pg0_sz))
+ goto out;
+
+ adapter->dev_topo.card.parent_dev = &adapter->shost->shost_gendev;
+ adapter->dev_topo.card.hdl =
+ le16_to_cpu(sas_iou_pg0->phy_info[0].controller_dev_hdl);
+ for (i = 0; i < adapter->dev_topo.card.phys_num; i++) {
+ if (!refresh) { /* add */
+ cfgp1.phy_number = i;
+ if (leapraid_op_config_page(adapter, &phy_pg0, cfgp1,
+ cfgp2, GET_PHY_PG0))
+ goto out;
+
+ port_id = sas_iou_pg0->phy_info[i].port;
+ if (leapraid_add_port_to_card_port_list(adapter,
+ port_id,
+ false))
+ goto out;
+
+ if ((le32_to_cpu(phy_pg0.phy_info) &
+ LEAPRAID_SAS_PHYINFO_VPHY) &&
+ (phy_pg0.neg_link_rate >> 4) >=
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5) {
+ if (!leapraid_alloc_vphy(adapter, port_id, i))
+ goto out;
+ adapter->dev_topo.card.card_phy[i].vphy = true;
+ }
+
+ adapter->dev_topo.card.card_phy[i].hdl =
+ adapter->dev_topo.card.hdl;
+ adapter->dev_topo.card.card_phy[i].phy_id = i;
+ adapter->dev_topo.card.card_phy[i].card_port =
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false);
+ leapraid_transport_add_card_phy(
+ adapter,
+ &adapter->dev_topo.card.card_phy[i],
+ &phy_pg0, adapter->dev_topo.card.parent_dev);
+ } else { /* refresh */
+ link_rate = sas_iou_pg0->phy_info[i].neg_link_rate >> 4;
+ port_id = sas_iou_pg0->phy_info[i].port;
+ if (leapraid_add_port_to_card_port_list(adapter,
+ port_id,
+ true))
+ goto out;
+
+ if (le32_to_cpu(sas_iou_pg0->phy_info[i]
+ .controller_phy_dev_info) &
+ LEAPRAID_DEVTYP_SEP &&
+ link_rate >= LEAPRAID_SAS_NEG_LINK_RATE_1_5) {
+ cfgp1.phy_number = i;
+ if ((leapraid_op_config_page(adapter, &phy_pg0,
+ cfgp1, cfgp2,
+ GET_PHY_PG0)))
+ continue;
+
+ if ((le32_to_cpu(phy_pg0.phy_info) &
+ LEAPRAID_SAS_PHYINFO_VPHY)) {
+ if (!leapraid_alloc_vphy(adapter,
+ port_id,
+ i))
+ goto out;
+ adapter->dev_topo.card.card_phy[i].vphy = true;
+ }
+ }
+
+ adapter->dev_topo.card.card_phy[i].hdl =
+ adapter->dev_topo.card.hdl;
+ attached_hdl =
+ le16_to_cpu(sas_iou_pg0->phy_info[i].attached_dev_hdl);
+ if (attached_hdl && link_rate < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ link_rate = LEAPRAID_SAS_NEG_LINK_RATE_1_5;
+
+ adapter->dev_topo.card.card_phy[i].card_port =
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false);
+ if (!adapter->dev_topo.card.card_phy[i].phy) {
+ cfgp1.phy_number = i;
+ if ((leapraid_op_config_page(adapter, &phy_pg0,
+ cfgp1, cfgp2,
+ GET_PHY_PG0)))
+ continue;
+
+ adapter->dev_topo.card.card_phy[i].phy_id = i;
+ leapraid_transport_add_card_phy(adapter,
+ &adapter->dev_topo.card.card_phy[i],
+ &phy_pg0,
+ adapter->dev_topo.card.parent_dev);
+ continue;
+ }
+
+ leapraid_transport_update_links(adapter,
+ adapter->dev_topo.card.sas_address,
+ attached_hdl, i, link_rate,
+ adapter->dev_topo.card.card_phy[i].card_port);
+ }
+ }
+
+ if (!refresh) {
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = adapter->dev_topo.card.hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_pg0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0)))
+ goto out;
+
+ adapter->dev_topo.card.enc_hdl =
+ le16_to_cpu(sas_dev_pg0.enc_hdl);
+ adapter->dev_topo.card.sas_address =
+ le64_to_cpu(sas_dev_pg0.sas_address);
+ dev_info(&adapter->pdev->dev,
+ "add host: devhdl=0x%04x, sas addr=0x%016llx, phynums=%d\n",
+ adapter->dev_topo.card.hdl,
+ (unsigned long long)adapter->dev_topo.card.sas_address,
+ adapter->dev_topo.card.phys_num);
+
+ if (adapter->dev_topo.card.enc_hdl) {
+ cfgp1.form = LEAPRAID_SAS_ENC_CFG_PGAD_HDL;
+ cfgp2.handle = adapter->dev_topo.card.enc_hdl;
+ if (!(leapraid_op_config_page(adapter, &enc_pg0,
+ cfgp1, cfgp2,
+ GET_SAS_ENCLOSURE_PG0)))
+ adapter->dev_topo.card.enc_lid =
+ le64_to_cpu(enc_pg0.enc_lid);
+ }
+ }
+out:
+ kfree(sas_iou_pg0);
+}
+
+static int leapraid_internal_exp_add(struct leapraid_adapter *adapter,
+ struct leapraid_exp_p0 *exp_pg0,
+ union cfg_param_1 *cfgp1,
+ union cfg_param_2 *cfgp2,
+ u16 hdl)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_sas_port *sas_port = NULL;
+ struct leapraid_enc_node *enc_dev;
+ struct leapraid_exp_p1 exp_pg1;
+ int rc = 0;
+ unsigned long flags;
+ u8 port_id;
+ u16 parent_handle;
+ u64 sas_addr_parent = 0;
+ int i;
+
+ port_id = exp_pg0->physical_port;
+ parent_handle = le16_to_cpu(exp_pg0->parent_dev_hdl);
+
+ if (leapraid_get_sas_address(adapter, parent_handle, &sas_addr_parent))
+ return -1;
+
+ topo_node_exp = kzalloc(sizeof(*topo_node_exp), GFP_KERNEL);
+ if (!topo_node_exp)
+ return -1;
+
+ topo_node_exp->hdl = hdl;
+ topo_node_exp->phys_num = exp_pg0->phy_num;
+ topo_node_exp->sas_address_parent = sas_addr_parent;
+ topo_node_exp->sas_address = le64_to_cpu(exp_pg0->sas_address);
+ topo_node_exp->card_port =
+ leapraid_get_port_by_id(adapter, port_id, false);
+ if (!topo_node_exp->card_port) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "add exp: sas addr=0x%016llx, hdl=0x%04x, phdl=0x%04x, phys=%d\n",
+ (unsigned long long)topo_node_exp->sas_address,
+ hdl, parent_handle,
+ topo_node_exp->phys_num);
+ if (!topo_node_exp->phys_num) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ topo_node_exp->card_phy =
+ kcalloc(topo_node_exp->phys_num,
+ sizeof(struct leapraid_card_phy), GFP_KERNEL);
+ if (!topo_node_exp->card_phy) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&topo_node_exp->sas_port_list);
+ sas_port = leapraid_transport_port_add(adapter, hdl, sas_addr_parent,
+ topo_node_exp->card_port);
+ if (!sas_port) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ topo_node_exp->parent_dev = &sas_port->rphy->dev;
+ topo_node_exp->rphy = sas_port->rphy;
+ for (i = 0; i < topo_node_exp->phys_num; i++) {
+ cfgp1->phy_number = i;
+ cfgp2->handle = hdl;
+ if ((leapraid_op_config_page(adapter, &exp_pg1, *cfgp1, *cfgp2,
+ GET_SAS_EXPANDER_PG1))) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ topo_node_exp->card_phy[i].hdl = hdl;
+ topo_node_exp->card_phy[i].phy_id = i;
+ topo_node_exp->card_phy[i].card_port =
+ leapraid_get_port_by_id(adapter, port_id, false);
+ if ((leapraid_transport_add_exp_phy(adapter,
+ &topo_node_exp->card_phy[i],
+ &exp_pg1,
+ topo_node_exp->parent_dev))) {
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (topo_node_exp->enc_hdl) {
+ enc_dev = leapraid_enc_find_by_hdl(adapter,
+ topo_node_exp->enc_hdl);
+ if (enc_dev)
+ topo_node_exp->enc_lid =
+ le64_to_cpu(enc_dev->pg0.enc_lid);
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_add_tail(&topo_node_exp->list, &adapter->dev_topo.exp_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+ return 0;
+
+out_fail:
+ if (sas_port)
+ leapraid_transport_port_remove(adapter,
+ topo_node_exp->sas_address,
+ sas_addr_parent,
+ topo_node_exp->card_port);
+ kfree(topo_node_exp);
+ return rc;
+}
+
+static int leapraid_exp_add(struct leapraid_adapter *adapter, u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_exp_p0 exp_pg0;
+ u16 parent_handle;
+ u64 sas_addr, sas_addr_parent = 0;
+ unsigned long flags;
+ u8 port_id;
+ int rc = 0;
+
+ if (!hdl)
+ return -EPERM;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering)
+ return -EPERM;
+
+ cfgp1.form = LEAPRAID_SAS_EXP_CFD_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &exp_pg0, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG0)))
+ return -EPERM;
+
+ parent_handle = le16_to_cpu(exp_pg0.parent_dev_hdl);
+ if (leapraid_get_sas_address(adapter, parent_handle, &sas_addr_parent))
+ return -EPERM;
+
+ port_id = exp_pg0.physical_port;
+ if (sas_addr_parent != adapter->dev_topo.card.sas_address) {
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp =
+ leapraid_exp_find_by_sas_address(adapter,
+ sas_addr_parent,
+ leapraid_get_port_by_id(adapter, port_id, false));
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+ if (!topo_node_exp) {
+ rc = leapraid_exp_add(adapter, parent_handle);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ sas_addr = le64_to_cpu(exp_pg0.sas_address);
+ topo_node_exp =
+ leapraid_exp_find_by_sas_address(adapter, sas_addr,
+ leapraid_get_port_by_id(adapter, port_id, false));
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (topo_node_exp)
+ return 0;
+
+ return leapraid_internal_exp_add(adapter, &exp_pg0, &cfgp1,
+ &cfgp2, hdl);
+}
+
+static void leapraid_exp_node_rm(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ struct leapraid_sas_port *sas_port, *sas_port_next;
+ unsigned long flags;
+ int port_id;
+
+ list_for_each_entry_safe(sas_port, sas_port_next,
+ &topo_node_exp->sas_port_list,
+ port_list) {
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ switch (sas_port->remote_identify.device_type) {
+ case SAS_END_DEVICE:
+ leapraid_sas_dev_remove_by_sas_address(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ leapraid_exp_rm(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ default:
+ break;
+ }
+ }
+
+ port_id = topo_node_exp->card_port->port_id;
+ leapraid_transport_port_remove(adapter, topo_node_exp->sas_address,
+ topo_node_exp->sas_address_parent,
+ topo_node_exp->card_port);
+ dev_info(&adapter->pdev->dev,
+ "removing exp: port=%d, sas addr=0x%016llx, hdl=0x%04x\n",
+ port_id, (unsigned long long)topo_node_exp->sas_address,
+ topo_node_exp->hdl);
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_del(&topo_node_exp->list);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+ kfree(topo_node_exp->card_phy);
+ kfree(topo_node_exp);
+}
+
+void leapraid_exp_rm(struct leapraid_adapter *adapter, u64 sas_addr,
+ struct leapraid_card_port *port)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ unsigned long flags;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp = leapraid_exp_find_by_sas_address(adapter,
+ sas_addr,
+ port);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (topo_node_exp)
+ leapraid_exp_node_rm(adapter, topo_node_exp);
+}
+
+static void leapraid_check_device(struct leapraid_adapter *adapter,
+ u64 parent_sas_address, u16 handle,
+ u8 phy_number, u8 link_rate)
+{
+ struct leapraid_sas_dev_p0 sas_device_pg0;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ struct leapraid_enc_node *enclosure_dev = NULL;
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct leapraid_starget_priv *sas_target_priv_data;
+ u32 device_info;
+ struct leapraid_card_port *port;
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = handle;
+ if ((leapraid_op_config_page(adapter, &sas_device_pg0, cfgp1, cfgp2,
+ GET_SAS_DEVICE_PG0)))
+ return;
+
+ if (phy_number != sas_device_pg0.phy_num)
+ return;
+
+ device_info = le32_to_cpu(sas_device_pg0.dev_info);
+ if (!(leapraid_is_end_dev(device_info)))
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.sas_address);
+ port = leapraid_get_port_by_id(adapter, sas_device_pg0.physical_port,
+ false);
+ if (!port)
+ goto out_unlock;
+
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter, sas_address,
+ port);
+ if (!sas_dev)
+ goto out_unlock;
+
+ if (unlikely(sas_dev->hdl != handle)) {
+ starget = sas_dev->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget,
+ "hdl changed from 0x%04x to 0x%04x!\n",
+ sas_dev->hdl, handle);
+ sas_target_priv_data->hdl = handle;
+ sas_dev->hdl = handle;
+ if (le16_to_cpu(sas_device_pg0.flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID) {
+ sas_dev->enc_level =
+ sas_device_pg0.enc_level;
+ memcpy(sas_dev->connector_name,
+ sas_device_pg0.connector_name, 4);
+ sas_dev->connector_name[4] = '\0';
+ } else {
+ sas_dev->enc_level = 0;
+ sas_dev->connector_name[0] = '\0';
+ }
+ sas_dev->enc_hdl =
+ le16_to_cpu(sas_device_pg0.enc_hdl);
+ enclosure_dev =
+ leapraid_enc_find_by_hdl(adapter, sas_dev->enc_hdl);
+ if (enclosure_dev) {
+ sas_dev->enc_lid =
+ le64_to_cpu(enclosure_dev->pg0.enc_lid);
+ }
+ }
+
+ if (!(le16_to_cpu(sas_device_pg0.flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_DEV_PRESENT))
+ goto out_unlock;
+
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ leapraid_ublk_io_dev_to_running(adapter, sas_address, port);
+ goto out;
+
+out_unlock:
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+out:
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static int leapraid_internal_sas_topo_chg_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_topo_node *topo_node_exp,
+ struct leapraid_fw_evt_work *fw_evt,
+ u64 sas_addr, u8 max_phys)
+{
+ struct leapraid_evt_data_sas_topo_change_list *evt_data;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u8 phy_number;
+ u8 link_rate, prev_link_rate;
+ u16 reason_code;
+ u16 hdl;
+ int i;
+
+ evt_data = fw_evt->evt_data;
+ for (i = 0; i < evt_data->entry_num; i++) {
+ if (fw_evt->ignore)
+ return 0;
+
+ if (adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return 0;
+
+ phy_number = evt_data->start_phy_num + i;
+ if (phy_number >= max_phys)
+ continue;
+
+ reason_code = evt_data->phy[i].phy_status &
+ LEAPRAID_EVT_SAS_TOPO_RC_MASK;
+
+ hdl = le16_to_cpu(evt_data->phy[i].attached_dev_hdl);
+ if (!hdl)
+ continue;
+
+ link_rate = evt_data->phy[i].link_rate >> 4;
+ prev_link_rate = evt_data->phy[i].link_rate & 0xF;
+ switch (reason_code) {
+ case LEAPRAID_EVT_SAS_TOPO_RC_PHY_CHANGED:
+ if (adapter->access_ctrl.shost_recovering)
+ break;
+
+ if (link_rate == prev_link_rate)
+ break;
+
+ leapraid_transport_update_links(adapter, sas_addr,
+ hdl, phy_number,
+ link_rate, card_port);
+ if (link_rate < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ leapraid_check_device(adapter, sas_addr, hdl,
+ phy_number, link_rate);
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock,
+ flags);
+ sas_dev =
+ leapraid_hold_lock_get_sas_dev_by_hdl(
+ adapter, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock,
+ flags);
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ break;
+ }
+ if (!test_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add))
+ break;
+
+ evt_data->phy[i].phy_status &=
+ LEAPRAID_EVT_SAS_TOPO_RC_CLEAR_MASK;
+ evt_data->phy[i].phy_status |=
+ LEAPRAID_EVT_SAS_TOPO_RC_TARG_ADDED;
+ fallthrough;
+
+ case LEAPRAID_EVT_SAS_TOPO_RC_TARG_ADDED:
+ if (adapter->access_ctrl.shost_recovering)
+ break;
+ leapraid_transport_update_links(adapter, sas_addr,
+ hdl, phy_number,
+ link_rate, card_port);
+ if (link_rate < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ break;
+ leapraid_add_dev(adapter, hdl);
+ break;
+ case LEAPRAID_EVT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ leapraid_sas_dev_remove_by_hdl(adapter, hdl);
+ break;
+ }
+ }
+
+ if (evt_data->exp_status == LEAPRAID_EVT_SAS_TOPO_ES_NOT_RESPONDING &&
+ topo_node_exp)
+ leapraid_exp_rm(adapter, sas_addr, card_port);
+
+ return 0;
+}
+
+static int leapraid_sas_topo_chg_evt(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_card_port *card_port;
+ struct leapraid_evt_data_sas_topo_change_list *evt_data;
+ u16 phdl;
+ u8 max_phys;
+ u64 sas_addr;
+ unsigned long flags;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return 0;
+
+ evt_data = fw_evt->evt_data;
+ leapraid_sas_host_add(adapter, adapter->dev_topo.card.phys_num);
+
+ if (fw_evt->ignore)
+ return 0;
+
+ phdl = le16_to_cpu(evt_data->exp_dev_hdl);
+ card_port = leapraid_get_port_by_id(adapter,
+ evt_data->physical_port,
+ false);
+ if (evt_data->exp_status == LEAPRAID_EVT_SAS_TOPO_ES_ADDED)
+ if (leapraid_exp_add(adapter, phdl) != 0)
+ return 0;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp = leapraid_exp_find_by_hdl(adapter, phdl);
+ if (topo_node_exp) {
+ sas_addr = topo_node_exp->sas_address;
+ max_phys = topo_node_exp->phys_num;
+ card_port = topo_node_exp->card_port;
+ } else if (phdl < adapter->dev_topo.card.phys_num) {
+ sas_addr = adapter->dev_topo.card.sas_address;
+ max_phys = adapter->dev_topo.card.phys_num;
+ } else {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ return leapraid_internal_sas_topo_chg_evt(adapter, card_port,
+ topo_node_exp, fw_evt,
+ sas_addr, max_phys);
+}
+
+static void leapraid_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
+{
+ sdev->no_uld_attach = no_uld_attach ? 1 : 0;
+ sdev_printk(KERN_INFO, sdev,
+ "%s raid component to upper layer\n",
+ sdev->no_uld_attach ? "hide" : "expose");
+ WARN_ON(scsi_device_reprobe(sdev));
+}
+
+static void leapraid_sas_pd_add(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_sas_dev *sas_dev;
+ u64 sas_address;
+ u16 parent_hdl;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ dev_warn(&adapter->pdev->dev,
+ "dev handle 0x%x already exists\n", hdl);
+ return;
+ }
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1, cfgp2,
+ GET_SAS_DEVICE_PG0))) {
+ dev_warn(&adapter->pdev->dev, "failed to read dev page0\n");
+ return;
+ }
+
+ parent_hdl = le16_to_cpu(sas_dev_p0.parent_dev_hdl);
+ if (!leapraid_get_sas_address(adapter, parent_hdl, &sas_address))
+ leapraid_transport_update_links(adapter, sas_address, hdl,
+ sas_dev_p0.phy_num,
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ leapraid_get_port_by_id(adapter,
+ sas_dev_p0.physical_port,
+ false));
+ leapraid_add_dev(adapter, hdl);
+}
+
+static void leapraid_sas_pd_delete(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ leapraid_sas_dev_remove_by_hdl(adapter, hdl);
+}
+
+static void leapraid_sas_pd_hide(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct scsi_target *starget = NULL;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u64 volume_wwid = 0;
+ u16 volume_hdl = 0;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ leapraid_cfg_get_volume_hdl(adapter, hdl, &volume_hdl);
+ if (volume_hdl)
+ leapraid_cfg_get_volume_wwid(adapter,
+ volume_hdl,
+ &volume_wwid);
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (!sas_dev) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return;
+ }
+
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ if (sas_dev->starget && sas_dev->starget->hostdata) {
+ starget = sas_dev->starget;
+ starget_priv = starget->hostdata;
+ starget_priv->flg |= LEAPRAID_TGT_FLG_RAID_MEMBER;
+ sas_dev->volume_hdl = volume_hdl;
+ sas_dev->volume_wwid = volume_wwid;
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ if (starget) {
+ dev_info(&adapter->pdev->dev, "hide sas_dev, hdl=0x%x\n", hdl);
+ starget_for_each_device(starget,
+ (void *)1, leapraid_reprobe_lun);
+ }
+}
+
+static void leapraid_sas_pd_expose(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct scsi_target *starget = NULL;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (!sas_dev) {
+ dev_warn(&adapter->pdev->dev,
+ "%s:%d: sas_dev not found, hdl=0x%x\n",
+ __func__, __LINE__, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return;
+ }
+
+ sas_dev->volume_hdl = 0;
+ sas_dev->volume_wwid = 0;
+ clear_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ if (sas_dev->starget && sas_dev->starget->hostdata) {
+ starget = sas_dev->starget;
+ starget_priv = starget->hostdata;
+ starget_priv->flg &= ~LEAPRAID_TGT_FLG_RAID_MEMBER;
+ sas_dev->led_on = false;
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (starget) {
+ dev_info(&adapter->pdev->dev,
+ "expose sas_dev, hdl=0x%x\n", hdl);
+ starget_for_each_device(starget, NULL, leapraid_reprobe_lun);
+ }
+}
+
+static void leapraid_sas_volume_add(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ struct leapraid_raid_volume *raid_volume;
+ unsigned long flags;
+ u64 wwid;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->vol_dev_hdl);
+
+ if (leapraid_cfg_get_volume_wwid(adapter, hdl, &wwid)) {
+ dev_warn(&adapter->pdev->dev, "failed to read volume page1\n");
+ return;
+ }
+
+ if (!wwid) {
+ dev_warn(&adapter->pdev->dev, "invalid WWID(handle=0x%x)\n",
+ hdl);
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_wwid(adapter, wwid);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+
+ if (raid_volume) {
+ dev_warn(&adapter->pdev->dev,
+ "volume handle 0x%x already exists\n", hdl);
+ return;
+ }
+
+ raid_volume = kzalloc(sizeof(*raid_volume), GFP_KERNEL);
+ if (!raid_volume)
+ return;
+
+ raid_volume->id = adapter->dev_topo.sas_id++;
+ raid_volume->channel = RAID_CHANNEL;
+ raid_volume->hdl = hdl;
+ raid_volume->wwid = wwid;
+ leapraid_raid_volume_add(adapter, raid_volume);
+ if (!adapter->scan_dev_desc.wait_scan_dev_done) {
+ if (scsi_add_device(adapter->shost, RAID_CHANNEL,
+ raid_volume->id, 0))
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ dev_info(&adapter->pdev->dev,
+ "add raid volume: hdl=0x%x, wwid=0x%llx\n", hdl, wwid);
+ } else {
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ leapraid_check_boot_dev(adapter, raid_volume, RAID_CHANNEL);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ }
+}
+
+static void leapraid_sas_volume_delete(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_raid_volume *raid_volume;
+ struct scsi_target *starget = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_hdl(adapter, hdl);
+ if (!raid_volume) {
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ dev_warn(&adapter->pdev->dev,
+ "%s:%d: volume handle 0x%x not found\n",
+ __func__, __LINE__, hdl);
+ return;
+ }
+
+ if (raid_volume->starget) {
+ starget = raid_volume->starget;
+ starget_priv = starget->hostdata;
+ starget_priv->deleted = true;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "delete raid volume: hdl=0x%x, wwid=0x%llx\n",
+ raid_volume->hdl, raid_volume->wwid);
+ list_del(&raid_volume->list);
+ kfree(raid_volume);
+
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+
+ if (starget)
+ scsi_remove_target(&starget->dev);
+}
+
+static void leapraid_sas_ir_chg_evt(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_evt_data_ir_change *evt_data;
+
+ evt_data = fw_evt->evt_data;
+
+ switch (evt_data->reason_code) {
+ case LEAPRAID_EVT_IR_RC_VOLUME_ADD:
+ leapraid_sas_volume_add(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_VOLUME_DELETE:
+ leapraid_sas_volume_delete(adapter,
+ le16_to_cpu(evt_data->vol_dev_hdl));
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_HIDDEN_TO_ADD:
+ leapraid_sas_pd_add(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_UNHIDDEN_TO_DELETE:
+ leapraid_sas_pd_delete(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_CREATED_TO_HIDE:
+ leapraid_sas_pd_hide(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_DELETED_TO_EXPOSE:
+ leapraid_sas_pd_expose(adapter, evt_data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_sas_enc_dev_stat_add_node(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_enc_node *enc_node = NULL;
+ int rc;
+
+ enc_node = kzalloc(sizeof(*enc_node), GFP_KERNEL);
+ if (!enc_node)
+ return;
+
+ cfgp1.form = LEAPRAID_SAS_ENC_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ rc = leapraid_op_config_page(adapter, &enc_node->pg0, cfgp1, cfgp2,
+ GET_SAS_ENCLOSURE_PG0);
+ if (rc) {
+ kfree(enc_node);
+ return;
+ }
+ list_add_tail(&enc_node->list, &adapter->dev_topo.enc_list);
+}
+
+static void leapraid_sas_enc_dev_stat_del_node(
+ struct leapraid_enc_node *enc_node)
+{
+ if (!enc_node)
+ return;
+
+ list_del(&enc_node->list);
+ kfree(enc_node);
+}
+
+static void leapraid_sas_enc_dev_stat_chg_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_enc_node *enc_node = NULL;
+ struct leapraid_evt_data_sas_enc_dev_status_change *evt_data;
+ u16 enc_hdl;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ evt_data = fw_evt->evt_data;
+ enc_hdl = le16_to_cpu(evt_data->enc_hdl);
+ if (enc_hdl)
+ enc_node = leapraid_enc_find_by_hdl(adapter, enc_hdl);
+ switch (evt_data->reason_code) {
+ case LEAPRAID_EVT_SAS_ENCL_RC_ADDED:
+ if (!enc_node)
+ leapraid_sas_enc_dev_stat_add_node(adapter, enc_hdl);
+ break;
+ case LEAPRAID_EVT_SAS_ENCL_RC_NOT_RESPONDING:
+ leapraid_sas_enc_dev_stat_del_node(enc_node);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_remove_unresp_sas_end_dev(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_sas_dev *sas_dev, *sas_dev_next;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ list_for_each_entry_safe(sas_dev, sas_dev_next,
+ &adapter->dev_topo.sas_dev_init_list, list) {
+ list_del_init(&sas_dev->list);
+ leapraid_sdev_put(sas_dev);
+ }
+ list_for_each_entry_safe(sas_dev, sas_dev_next,
+ &adapter->dev_topo.sas_dev_list, list) {
+ if (!sas_dev->resp)
+ list_move_tail(&sas_dev->list, &head);
+ else
+ sas_dev->resp = false;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ list_for_each_entry_safe(sas_dev, sas_dev_next, &head, list) {
+ leapraid_remove_device(adapter, sas_dev);
+ list_del_init(&sas_dev->list);
+ leapraid_sdev_put(sas_dev);
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "unresponding sas end devices removed\n");
+}
+
+static void leapraid_remove_unresp_raid_volumes(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_volume *raid_volume, *raid_volume_next;
+
+ list_for_each_entry_safe(raid_volume, raid_volume_next,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (!raid_volume->resp)
+ leapraid_sas_volume_delete(adapter, raid_volume->hdl);
+ else
+ raid_volume->resp = false;
+ }
+ dev_info(&adapter->pdev->dev,
+ "unresponding raid volumes removed\n");
+}
+
+static void leapraid_remove_unresp_sas_exp(struct leapraid_adapter *adapter)
+{
+ struct leapraid_topo_node *topo_node_exp, *topo_node_exp_next;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_for_each_entry_safe(topo_node_exp, topo_node_exp_next,
+ &adapter->dev_topo.exp_list, list) {
+ if (!topo_node_exp->resp)
+ list_move_tail(&topo_node_exp->list, &head);
+ else
+ topo_node_exp->resp = false;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ list_for_each_entry_safe(topo_node_exp, topo_node_exp_next,
+ &head, list)
+ leapraid_exp_node_rm(adapter, topo_node_exp);
+
+ dev_info(&adapter->pdev->dev,
+ "unresponding sas expanders removed\n");
+}
+
+static void leapraid_remove_unresp_dev(struct leapraid_adapter *adapter)
+{
+ leapraid_remove_unresp_sas_end_dev(adapter);
+ if (adapter->adapter_attr.raid_support)
+ leapraid_remove_unresp_raid_volumes(adapter);
+ leapraid_remove_unresp_sas_exp(adapter);
+ leapraid_ublk_io_all_dev(adapter);
+}
+
+static void leapraid_del_dirty_vphy(struct leapraid_adapter *adapter)
+{
+ struct leapraid_card_port *card_port, *card_port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (!card_port->vphys_mask)
+ continue;
+
+ list_for_each_entry_safe(vphy, vphy_next,
+ &card_port->vphys_list, list) {
+ if (!(vphy->flg & LEAPRAID_VPHY_FLG_DIRTY))
+ continue;
+
+ card_port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+
+ if (!card_port->vphys_mask && !card_port->sas_address)
+ card_port->flg |= LEAPRAID_CARD_PORT_FLG_DIRTY;
+ }
+}
+
+static void leapraid_del_dirty_card_port(struct leapraid_adapter *adapter)
+{
+ struct leapraid_card_port *card_port, *card_port_next;
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (!(card_port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY) ||
+ card_port->flg & LEAPRAID_CARD_PORT_FLG_NEW)
+ continue;
+
+ list_del(&card_port->list);
+ kfree(card_port);
+ }
+}
+
+static void leapraid_update_dev_qdepth(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_device *sdev;
+ u16 qdepth;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv || !sdev_priv->starget_priv)
+ continue;
+ sas_dev = sdev_priv->starget_priv->sas_dev;
+ if (sas_dev && sas_dev->dev_info & LEAPRAID_DEVTYP_SSP_TGT)
+ qdepth = (sas_dev->port_type > 1) ?
+ adapter->adapter_attr.wideport_max_queue_depth :
+ adapter->adapter_attr.narrowport_max_queue_depth;
+ else if (sas_dev && sas_dev->dev_info &
+ LEAPRAID_DEVTYP_SATA_DEV)
+ qdepth = adapter->adapter_attr.sata_max_queue_depth;
+ else
+ continue;
+
+ leapraid_adjust_sdev_queue_depth(sdev, qdepth);
+ }
+}
+
+static void leapraid_update_exp_links(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp,
+ u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_exp_p1 exp_p1;
+ int i;
+
+ cfgp2.handle = hdl;
+ for (i = 0; i < topo_node_exp->phys_num; i++) {
+ cfgp1.phy_number = i;
+ if ((leapraid_op_config_page(adapter, &exp_p1, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG1)))
+ return;
+
+ leapraid_transport_update_links(adapter,
+ topo_node_exp->sas_address,
+ le16_to_cpu(exp_p1.attached_dev_hdl),
+ i,
+ exp_p1.neg_link_rate >> 4,
+ topo_node_exp->card_port);
+ }
+}
+
+static void leapraid_scan_exp_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_exp_p0 exp_p0;
+ unsigned long flags;
+ u16 hdl;
+ u8 port_id;
+
+ dev_info(&adapter->pdev->dev, "begin scanning expanders\n");
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &exp_p0, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG0);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(exp_p0.dev_hdl);
+ port_id = exp_p0.physical_port;
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp =
+ leapraid_exp_find_by_sas_address(adapter,
+ le64_to_cpu(exp_p0.sas_address),
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false));
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+
+ if (topo_node_exp) {
+ leapraid_update_exp_links(adapter, topo_node_exp, hdl);
+ } else {
+ leapraid_exp_add(adapter, hdl);
+
+ dev_info(&adapter->pdev->dev,
+ "add exp: hdl=0x%04x, sas addr=0x%016llx\n",
+ hdl,
+ (unsigned long long)le64_to_cpu(
+ exp_p0.sas_address));
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "expanders scan complete\n");
+}
+
+static void leapraid_scan_phy_disks_after_reset(
+ struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ union cfg_param_1 cfgp1_extra = {0};
+ union cfg_param_2 cfgp2_extra = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_raidpd_p0 raidpd_p0;
+ struct leapraid_sas_dev *sas_dev;
+ u8 phys_disk_num, port_id;
+ u16 hdl, parent_hdl;
+ u64 sas_addr;
+
+ dev_info(&adapter->pdev->dev, "begin scanning phys disk\n");
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (phys_disk_num = 0xFF, cfgp2.form_specific = phys_disk_num;
+ !leapraid_op_config_page(adapter, &raidpd_p0,
+ cfgp1, cfgp2, GET_PHY_DISK_PG0);
+ cfgp2.form_specific = phys_disk_num) {
+ phys_disk_num = raidpd_p0.phys_disk_num;
+ hdl = le16_to_cpu(raidpd_p0.dev_hdl);
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ continue;
+ }
+
+ cfgp1_extra.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2_extra.handle = hdl;
+ if (leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1_extra,
+ cfgp2_extra, GET_SAS_DEVICE_PG0) !=
+ 0)
+ continue;
+
+ parent_hdl = le16_to_cpu(sas_dev_p0.parent_dev_hdl);
+ if (!leapraid_get_sas_address(adapter,
+ parent_hdl,
+ &sas_addr)) {
+ port_id = sas_dev_p0.physical_port;
+ leapraid_transport_update_links(
+ adapter, sas_addr, hdl,
+ sas_dev_p0.phy_num,
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ leapraid_get_port_by_id(
+ adapter, port_id, false));
+ set_bit(hdl,
+ (unsigned long *)adapter->dev_topo.pd_hdls);
+
+ leapraid_add_dev(adapter, hdl);
+
+ dev_info(&adapter->pdev->dev,
+ "add phys disk: hdl=0x%04x, sas addr=0x%016llx\n",
+ hdl,
+ (unsigned long long)le64_to_cpu(
+ sas_dev_p0.sas_address));
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "phys disk scan complete\n");
+}
+
+static void leapraid_scan_vol_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ union cfg_param_1 cfgp1_extra = {0};
+ union cfg_param_2 cfgp2_extra = {0};
+ struct leapraid_evt_data_ir_change evt_data;
+ static struct leapraid_raid_volume *raid_volume;
+ struct leapraid_raidvol_p1 *vol_p1;
+ struct leapraid_raidvol_p0 *vol_p0;
+ unsigned long flags;
+ u16 hdl;
+
+ vol_p0 = kzalloc(sizeof(*vol_p0), GFP_KERNEL);
+ if (!vol_p0)
+ return;
+
+ vol_p1 = kzalloc(sizeof(*vol_p1), GFP_KERNEL);
+ if (!vol_p1) {
+ kfree(vol_p0);
+ return;
+ }
+
+ dev_info(&adapter->pdev->dev, "begin scanning volumes\n");
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, vol_p1, cfgp1,
+ cfgp2, GET_RAID_VOLUME_PG1);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(vol_p1->dev_hdl);
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_wwid(
+ adapter,
+ le64_to_cpu(vol_p1->wwid));
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ if (raid_volume)
+ continue;
+
+ cfgp1_extra.size = sizeof(struct leapraid_raidvol_p0);
+ cfgp2_extra.handle = hdl;
+ if (leapraid_op_config_page(adapter, vol_p0, cfgp1_extra,
+ cfgp2_extra, GET_RAID_VOLUME_PG0))
+ continue;
+
+ if (vol_p0->volume_state == LEAPRAID_VOL_STATE_OPTIMAL ||
+ vol_p0->volume_state == LEAPRAID_VOL_STATE_ONLINE ||
+ vol_p0->volume_state == LEAPRAID_VOL_STATE_DEGRADED) {
+ memset(&evt_data, 0,
+ sizeof(struct leapraid_evt_data_ir_change));
+ evt_data.reason_code = LEAPRAID_EVT_IR_RC_VOLUME_ADD;
+ evt_data.vol_dev_hdl = vol_p1->dev_hdl;
+ leapraid_sas_volume_add(adapter, &evt_data);
+ dev_info(&adapter->pdev->dev,
+ "add volume: hdl=0x%04x\n",
+ vol_p1->dev_hdl);
+ }
+ }
+
+ kfree(vol_p0);
+ kfree(vol_p1);
+
+ dev_info(&adapter->pdev->dev, "volumes scan complete\n");
+}
+
+static void leapraid_scan_sas_dev_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_sas_dev *sas_dev;
+ u16 hdl, parent_hdl;
+ u64 sas_address;
+ u8 port_id;
+
+ dev_info(&adapter->pdev->dev,
+ "begin scanning sas end devices\n");
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1, cfgp2,
+ GET_SAS_DEVICE_PG0);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(sas_dev_p0.dev_hdl);
+ if (!(leapraid_is_end_dev(le32_to_cpu(sas_dev_p0.dev_info))))
+ continue;
+
+ port_id = sas_dev_p0.physical_port;
+ sas_dev = leapraid_get_sas_dev_by_addr(
+ adapter,
+ le64_to_cpu(sas_dev_p0.sas_address),
+ leapraid_get_port_by_id(
+ adapter,
+ port_id,
+ false));
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ continue;
+ }
+
+ parent_hdl = le16_to_cpu(sas_dev_p0.parent_dev_hdl);
+ if (!leapraid_get_sas_address(adapter, parent_hdl,
+ &sas_address)) {
+ leapraid_transport_update_links(
+ adapter,
+ sas_address,
+ hdl,
+ sas_dev_p0.phy_num,
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false));
+ leapraid_add_dev(adapter, hdl);
+ dev_info(&adapter->pdev->dev,
+ "add sas dev: hdl=0x%04x, sas addr=0x%016llx\n",
+ hdl,
+ (unsigned long long)le64_to_cpu(
+ sas_dev_p0.sas_address));
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "sas end devices scan complete\n");
+}
+
+static void leapraid_scan_all_dev_after_reset(struct leapraid_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "begin scanning devices\n");
+
+ leapraid_sas_host_add(adapter, adapter->dev_topo.card.phys_num);
+ leapraid_scan_exp_after_reset(adapter);
+ if (adapter->adapter_attr.raid_support) {
+ leapraid_scan_phy_disks_after_reset(adapter);
+ leapraid_scan_vol_after_reset(adapter);
+ }
+ leapraid_scan_sas_dev_after_reset(adapter);
+
+ dev_info(&adapter->pdev->dev, "devices scan complete\n");
+}
+
+static void leapraid_hardreset_async_logic(struct leapraid_adapter *adapter)
+{
+ leapraid_remove_unresp_dev(adapter);
+ leapraid_del_dirty_vphy(adapter);
+ leapraid_del_dirty_card_port(adapter);
+ leapraid_update_dev_qdepth(adapter);
+ leapraid_scan_all_dev_after_reset(adapter);
+
+ if (adapter->scan_dev_desc.driver_loading)
+ leapraid_scan_dev_done(adapter);
+}
+
+static int leapraid_send_enc_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_sep_rep *sep_rep,
+ struct leapraid_sep_req *sep_req)
+{
+ void *req;
+ bool reset_flg = false;
+ int rc = 0;
+
+ mutex_lock(&adapter->driver_cmds.enc_cmd.mutex);
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ adapter->driver_cmds.enc_cmd.status = LEAPRAID_CMD_PENDING;
+ req = leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.enc_cmd.inter_taskid);
+ memset(req, 0, LEAPRAID_REQUEST_SIZE);
+ memcpy(req, sep_req, sizeof(struct leapraid_sep_req));
+ init_completion(&adapter->driver_cmds.enc_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.enc_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.enc_cmd.done,
+ LEAPRAID_ENC_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.enc_cmd.status & LEAPRAID_CMD_DONE)) {
+ reset_flg =
+ leapraid_check_reset(
+ adapter->driver_cmds.enc_cmd.status);
+ rc = -EFAULT;
+ goto do_hard_reset;
+ }
+
+ if (adapter->driver_cmds.enc_cmd.status & LEAPRAID_CMD_REPLY_VALID)
+ memcpy(sep_rep, (void *)(&adapter->driver_cmds.enc_cmd.reply),
+ sizeof(struct leapraid_sep_rep));
+do_hard_reset:
+ if (reset_flg) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+
+ adapter->driver_cmds.enc_cmd.status = LEAPRAID_CMD_NOT_USED;
+out:
+ mutex_unlock(&adapter->driver_cmds.enc_cmd.mutex);
+ return rc;
+}
+
+static void leapraid_set_led(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev, bool on)
+{
+ struct leapraid_sep_rep sep_rep;
+ struct leapraid_sep_req sep_req;
+
+ if (!sas_dev)
+ return;
+
+ memset(&sep_req, 0, sizeof(struct leapraid_sep_req));
+ memset(&sep_rep, 0, sizeof(struct leapraid_sep_rep));
+ sep_req.func = LEAPRAID_FUNC_SCSI_ENC_PROCESSOR;
+ sep_req.act = LEAPRAID_SEP_REQ_ACT_WRITE_STATUS;
+ if (on) {
+ sep_req.slot_status =
+ cpu_to_le32(LEAPRAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ sep_req.dev_hdl = cpu_to_le16(sas_dev->hdl);
+ sep_req.flg = LEAPRAID_SEP_REQ_FLG_DEVHDL_ADDRESS;
+ if (leapraid_send_enc_cmd(adapter, &sep_rep, &sep_req)) {
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+
+ sas_dev->led_on = true;
+ if (sep_rep.adapter_status)
+ leapraid_sdev_put(sas_dev);
+ } else {
+ sep_req.slot_status = 0;
+ sep_req.slot = cpu_to_le16(sas_dev->slot);
+ sep_req.dev_hdl = 0;
+ sep_req.enc_hdl = cpu_to_le16(sas_dev->enc_hdl);
+ sep_req.flg = LEAPRAID_SEP_REQ_FLG_ENCLOSURE_SLOT_ADDRESS;
+ if ((leapraid_send_enc_cmd(adapter, &sep_rep, &sep_req))) {
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+
+ if (sep_rep.adapter_status) {
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+ }
+}
+
+static void leapraid_fw_work(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ adapter->fw_evt_s.cur_evt = fw_evt;
+ leapraid_del_fw_evt_from_list(adapter, fw_evt);
+ if (adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering) {
+ leapraid_fw_evt_put(fw_evt);
+ adapter->fw_evt_s.cur_evt = NULL;
+ return;
+ }
+ switch (fw_evt->evt_type) {
+ case LEAPRAID_EVT_SAS_DISCOVERY:
+ {
+ struct leapraid_evt_data_sas_disc *evt_data;
+
+ evt_data = fw_evt->evt_data;
+ if (evt_data->reason_code ==
+ LEAPRAID_EVT_SAS_DISC_RC_STARTED &&
+ !adapter->dev_topo.card.phys_num)
+ leapraid_sas_host_add(adapter, 0);
+ break;
+ }
+ case LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST:
+ leapraid_sas_topo_chg_evt(adapter, fw_evt);
+ break;
+ case LEAPRAID_EVT_IR_CHANGE:
+ leapraid_sas_ir_chg_evt(adapter, fw_evt);
+ break;
+ case LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE:
+ leapraid_sas_enc_dev_stat_chg_evt(adapter, fw_evt);
+ break;
+ case LEAPRAID_EVT_REMOVE_DEAD_DEV:
+ while (scsi_host_in_recovery(adapter->shost) ||
+ adapter->access_ctrl.shost_recovering) {
+ if (adapter->access_ctrl.host_removing ||
+ adapter->fw_evt_s.fw_evt_cleanup)
+ goto out;
+
+ ssleep(1);
+ }
+ leapraid_hardreset_async_logic(adapter);
+ break;
+ case LEAPRAID_EVT_TURN_ON_PFA_LED:
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter,
+ fw_evt->dev_handle);
+ leapraid_set_led(adapter, sas_dev, true);
+ break;
+ case LEAPRAID_EVT_SCAN_DEV_DONE:
+ adapter->scan_dev_desc.scan_start = false;
+ break;
+ default:
+ break;
+ }
+out:
+ leapraid_fw_evt_put(fw_evt);
+ adapter->fw_evt_s.cur_evt = NULL;
+}
+
+static void leapraid_sas_dev_stat_chg_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_sas_dev_status_change *event_data)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ u64 sas_address;
+ unsigned long flags;
+
+ switch (event_data->reason_code) {
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_INTERNAL_DEV_RESET:
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ break;
+ default:
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+
+ sas_address = le64_to_cpu(event_data->sas_address);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter,
+ sas_address,
+ leapraid_get_port_by_id(adapter,
+ event_data->physical_port,
+ false));
+
+ if (sas_dev && sas_dev->starget) {
+ starget_priv = sas_dev->starget->hostdata;
+ if (starget_priv) {
+ switch (event_data->reason_code) {
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_INTERNAL_DEV_RESET:
+ starget_priv->tm_busy = true;
+ break;
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ starget_priv->tm_busy = false;
+ break;
+ }
+ }
+ }
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_set_volume_delete_flag(struct leapraid_adapter *adapter,
+ u16 handle)
+{
+ struct leapraid_raid_volume *raid_volume;
+ struct leapraid_starget_priv *sas_target_priv_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_hdl(adapter, handle);
+ if (raid_volume && raid_volume->starget &&
+ raid_volume->starget->hostdata) {
+ sas_target_priv_data = raid_volume->starget->hostdata;
+ sas_target_priv_data->deleted = true;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static void leapraid_check_ir_change_evt(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ u16 phys_disk_dev_hdl;
+
+ switch (evt_data->reason_code) {
+ case LEAPRAID_EVT_IR_RC_VOLUME_DELETE:
+ leapraid_set_volume_delete_flag(adapter,
+ le16_to_cpu(evt_data->vol_dev_hdl));
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_UNHIDDEN_TO_DELETE:
+ phys_disk_dev_hdl =
+ le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ clear_bit(phys_disk_dev_hdl,
+ (unsigned long *)adapter->dev_topo.pd_hdls);
+ leapraid_tgt_rst_send(adapter, phys_disk_dev_hdl);
+ break;
+ }
+}
+
+static void leapraid_topo_del_evts_process_exp_status(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_sas_topo_change_list *evt_data)
+{
+ struct leapraid_fw_evt_work *fw_evt = NULL;
+ struct leapraid_evt_data_sas_topo_change_list *loc_evt_data = NULL;
+ unsigned long flags;
+ u16 exp_hdl;
+
+ exp_hdl = le16_to_cpu(evt_data->exp_dev_hdl);
+
+ switch (evt_data->exp_status) {
+ case LEAPRAID_EVT_SAS_TOPO_ES_NOT_RESPONDING:
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ list_for_each_entry(fw_evt,
+ &adapter->fw_evt_s.fw_evt_list, list) {
+ if (fw_evt->evt_type !=
+ LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST ||
+ fw_evt->ignore)
+ continue;
+
+ loc_evt_data = fw_evt->evt_data;
+ if ((loc_evt_data->exp_status ==
+ LEAPRAID_EVT_SAS_TOPO_ES_ADDED ||
+ loc_evt_data->exp_status ==
+ LEAPRAID_EVT_SAS_TOPO_ES_RESPONDING) &&
+ le16_to_cpu(loc_evt_data->exp_dev_hdl) == exp_hdl)
+ fw_evt->ignore = 1;
+ }
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_check_topo_del_evts(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_sas_topo_change_list *evt_data)
+{
+ int reason_code;
+ u16 hdl;
+ int i;
+
+ for (i = 0; i < evt_data->entry_num; i++) {
+ hdl = le16_to_cpu(evt_data->phy[i].attached_dev_hdl);
+ if (!hdl)
+ continue;
+
+ reason_code = evt_data->phy[i].phy_status &
+ LEAPRAID_EVT_SAS_TOPO_RC_MASK;
+ if (reason_code ==
+ LEAPRAID_EVT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
+ leapraid_tgt_not_responding(adapter, hdl);
+ }
+ leapraid_topo_del_evts_process_exp_status(adapter, evt_data);
+}
+
+static bool leapraid_async_process_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_notify_rep *event_notify_rep)
+{
+ u16 evt = le16_to_cpu(event_notify_rep->evt);
+ bool exit_flag = false;
+
+ switch (evt) {
+ case LEAPRAID_EVT_SAS_DEV_STATUS_CHANGE:
+ leapraid_sas_dev_stat_chg_evt(adapter,
+ (struct leapraid_evt_data_sas_dev_status_change
+ *)event_notify_rep->evt_data);
+ break;
+ case LEAPRAID_EVT_IR_CHANGE:
+ leapraid_check_ir_change_evt(adapter,
+ (struct leapraid_evt_data_ir_change
+ *)event_notify_rep->evt_data);
+ break;
+ case LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST:
+ leapraid_check_topo_del_evts(adapter,
+ (struct leapraid_evt_data_sas_topo_change_list
+ *)event_notify_rep->evt_data);
+ if (adapter->access_ctrl.shost_recovering) {
+ exit_flag = true;
+ return exit_flag;
+ }
+ break;
+ case LEAPRAID_EVT_SAS_DISCOVERY:
+ case LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE:
+ break;
+ default:
+ exit_flag = true;
+ return exit_flag;
+ }
+
+ return exit_flag;
+}
+
+static void leapraid_async_evt_cb_enqueue(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_notify_rep *evt_notify_rep)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+ u16 evt_sz;
+
+ fw_evt = leapraid_alloc_fw_evt_work();
+ if (!fw_evt)
+ return;
+
+ evt_sz = le16_to_cpu(evt_notify_rep->evt_data_len) * 4;
+ fw_evt->evt_data = kmemdup(evt_notify_rep->evt_data,
+ evt_sz, GFP_ATOMIC);
+ if (!fw_evt->evt_data) {
+ leapraid_fw_evt_put(fw_evt);
+ return;
+ }
+ fw_evt->adapter = adapter;
+ fw_evt->evt_type = le16_to_cpu(evt_notify_rep->evt);
+ leapraid_fw_evt_add(adapter, fw_evt);
+ leapraid_fw_evt_put(fw_evt);
+}
+
+static void leapraid_async_evt_cb(struct leapraid_adapter *adapter,
+ u8 msix_index, u32 rep_paddr)
+{
+ struct leapraid_evt_notify_rep *evt_notify_rep;
+
+ if (adapter->access_ctrl.pcie_recovering)
+ return;
+
+ evt_notify_rep = leapraid_get_reply_vaddr(adapter, rep_paddr);
+ if (unlikely(!evt_notify_rep))
+ return;
+
+ if (leapraid_async_process_evt(adapter, evt_notify_rep))
+ return;
+
+ leapraid_async_evt_cb_enqueue(adapter, evt_notify_rep);
+}
+
+static void leapraid_handle_async_event(struct leapraid_adapter *adapter,
+ u8 msix_index, u32 reply)
+{
+ struct leapraid_evt_notify_rep *leap_mpi_rep =
+ leapraid_get_reply_vaddr(adapter, reply);
+
+ if (!leap_mpi_rep)
+ return;
+
+ if (leap_mpi_rep->func != LEAPRAID_FUNC_EVENT_NOTIFY)
+ return;
+
+ leapraid_async_evt_cb(adapter, msix_index, reply);
+}
+
+void leapraid_async_turn_on_led(struct leapraid_adapter *adapter, u16 handle)
+{
+ struct leapraid_fw_evt_work *fw_event;
+
+ fw_event = leapraid_alloc_fw_evt_work();
+ if (!fw_event)
+ return;
+
+ fw_event->dev_handle = handle;
+ fw_event->adapter = adapter;
+ fw_event->evt_type = LEAPRAID_EVT_TURN_ON_PFA_LED;
+ leapraid_fw_evt_add(adapter, fw_event);
+ leapraid_fw_evt_put(fw_event);
+}
+
+static void leapraid_hardreset_barrier(struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_event;
+
+ fw_event = leapraid_alloc_fw_evt_work();
+ if (!fw_event)
+ return;
+
+ fw_event->adapter = adapter;
+ fw_event->evt_type = LEAPRAID_EVT_REMOVE_DEAD_DEV;
+ leapraid_fw_evt_add(adapter, fw_event);
+ leapraid_fw_evt_put(fw_event);
+}
+
+static void leapraid_scan_dev_complete(struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+
+ fw_evt = leapraid_alloc_fw_evt_work();
+ if (!fw_evt)
+ return;
+
+ fw_evt->evt_type = LEAPRAID_EVT_SCAN_DEV_DONE;
+ fw_evt->adapter = adapter;
+ leapraid_fw_evt_add(adapter, fw_evt);
+ leapraid_fw_evt_put(fw_evt);
+}
+
+static u8 leapraid_driver_cmds_done(struct leapraid_adapter *adapter,
+ u16 taskid, u8 msix_index,
+ u32 rep_paddr, u8 cb_idx)
+{
+ struct leapraid_rep *leap_mpi_rep =
+ leapraid_get_reply_vaddr(adapter, rep_paddr);
+ struct leapraid_driver_cmd *sp_cmd, *_sp_cmd = NULL;
+
+ list_for_each_entry(sp_cmd, &adapter->driver_cmds.special_cmd_list,
+ list)
+ if (cb_idx == sp_cmd->cb_idx) {
+ _sp_cmd = sp_cmd;
+ break;
+ }
+
+ if (WARN_ON(!_sp_cmd))
+ return 1;
+ if (WARN_ON(_sp_cmd->status == LEAPRAID_CMD_NOT_USED))
+ return 1;
+ if (WARN_ON(taskid != _sp_cmd->hp_taskid &&
+ taskid != _sp_cmd->taskid &&
+ taskid != _sp_cmd->inter_taskid))
+ return 1;
+
+ _sp_cmd->status |= LEAPRAID_CMD_DONE;
+ if (leap_mpi_rep) {
+ memcpy((void *)(&_sp_cmd->reply), leap_mpi_rep,
+ leap_mpi_rep->msg_len * 4);
+ _sp_cmd->status |= LEAPRAID_CMD_REPLY_VALID;
+
+ if (_sp_cmd->cb_idx == LEAPRAID_SCAN_DEV_CB_IDX) {
+ u16 adapter_status;
+
+ _sp_cmd->status &= ~LEAPRAID_CMD_PENDING;
+ adapter_status =
+ le16_to_cpu(leap_mpi_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS)
+ adapter->scan_dev_desc.scan_dev_failed = true;
+
+ if (_sp_cmd->async_scan_dev) {
+ if (adapter_status ==
+ LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ leapraid_scan_dev_complete(adapter);
+ } else {
+ adapter->scan_dev_desc.scan_start_failed =
+ adapter_status;
+ }
+ return 1;
+ }
+
+ complete(&_sp_cmd->done);
+ return 1;
+ }
+
+ if (_sp_cmd->cb_idx == LEAPRAID_CTL_CB_IDX) {
+ struct leapraid_scsiio_rep *scsiio_reply;
+
+ if (leap_mpi_rep->function ==
+ LEAPRAID_FUNC_SCSIIO_REQ ||
+ leap_mpi_rep->function ==
+ LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH) {
+ scsiio_reply =
+ (struct leapraid_scsiio_rep *)leap_mpi_rep;
+ if (scsiio_reply->scsi_state &
+ LEAPRAID_SCSI_STATE_AUTOSENSE_VALID)
+ memcpy((void *)(&adapter->driver_cmds.ctl_cmd.sense),
+ leapraid_get_sense_buffer(adapter, taskid),
+ min_t(u32,
+ SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_reply->sense_count)));
+ }
+ }
+ }
+
+ _sp_cmd->status &= ~LEAPRAID_CMD_PENDING;
+ complete(&_sp_cmd->done);
+
+ return 1;
+}
+
+static void leapraid_request_descript_handler(struct leapraid_adapter *adapter,
+ union leapraid_rep_desc_union *rpf,
+ u8 req_desc_type, u8 msix_idx)
+{
+ u32 rep;
+ u16 taskid;
+
+ rep = 0;
+ taskid = le16_to_cpu(rpf->dflt_rep.taskid);
+ switch (req_desc_type) {
+ case LEAPRAID_RPY_DESC_FLG_FP_SCSI_IO_SUCCESS:
+ case LEAPRAID_RPY_DESC_FLG_SCSI_IO_SUCCESS:
+ if (taskid <= adapter->shost->can_queue ||
+ taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid) {
+ leapraid_scsiio_done(adapter, taskid, msix_idx, 0);
+ } else {
+ if (leapraid_driver_cmds_done(adapter, taskid,
+ msix_idx, 0,
+ leapraid_get_cb_idx(adapter,
+ taskid)))
+ leapraid_free_taskid(adapter, taskid);
+ }
+ break;
+ case LEAPRAID_RPY_DESC_FLG_ADDRESS_REPLY:
+ rep = le32_to_cpu(rpf->addr_rep.rep_frame_addr);
+ if (rep > ((u32)adapter->mem_desc.rep_msg_dma +
+ adapter->adapter_attr.rep_msg_qd * LEAPRAID_REPLY_SIEZ) ||
+ rep < ((u32)adapter->mem_desc.rep_msg_dma))
+ rep = 0;
+ if (taskid) {
+ if (taskid <= adapter->shost->can_queue ||
+ taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid) {
+ leapraid_scsiio_done(adapter, taskid,
+ msix_idx, rep);
+ } else {
+ if (leapraid_driver_cmds_done(adapter, taskid,
+ msix_idx, rep,
+ leapraid_get_cb_idx(adapter,
+ taskid)))
+ leapraid_free_taskid(adapter, taskid);
+ }
+ } else {
+ leapraid_handle_async_event(adapter, msix_idx, rep);
+ }
+
+ if (rep) {
+ adapter->rep_msg_host_idx =
+ (adapter->rep_msg_host_idx ==
+ (adapter->adapter_attr.rep_msg_qd - 1)) ?
+ 0 : adapter->rep_msg_host_idx + 1;
+ adapter->mem_desc.rep_msg_addr[adapter->rep_msg_host_idx] =
+ cpu_to_le32(rep);
+ wmb(); /* Make sure that all write ops are in order */
+ writel(adapter->rep_msg_host_idx,
+ &adapter->iomem_base->rep_msg_host_idx);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+int leapraid_rep_queue_handler(struct leapraid_rq *rq)
+{
+ struct leapraid_adapter *adapter = rq->adapter;
+ union leapraid_rep_desc_union *rep_desc;
+ u8 req_desc_type;
+ u64 finish_cmds;
+ u8 msix_idx;
+
+ msix_idx = rq->msix_idx;
+ finish_cmds = 0;
+ if (!atomic_add_unless(&rq->busy, LEAPRAID_BUSY_LIMIT,
+ LEAPRAID_BUSY_LIMIT))
+ return finish_cmds;
+
+ rep_desc = &rq->rep_desc[rq->rep_post_host_idx];
+ req_desc_type = rep_desc->dflt_rep.rep_flg &
+ LEAPRAID_RPY_DESC_FLG_TYPE_MASK;
+ if (req_desc_type == LEAPRAID_RPY_DESC_FLG_UNUSED) {
+ atomic_dec(&rq->busy);
+ return finish_cmds;
+ }
+
+ for (;;) {
+ if (rep_desc->u.low == UINT_MAX ||
+ rep_desc->u.high == UINT_MAX)
+ break;
+
+ leapraid_request_descript_handler(adapter, rep_desc,
+ req_desc_type, msix_idx);
+ dev_dbg(&adapter->pdev->dev,
+ "LEAPRAID_SCSIIO: Handled Desc taskid %d, msix %d\n",
+ rep_desc->dflt_rep.taskid, msix_idx);
+ rep_desc->words = cpu_to_le64(ULLONG_MAX);
+ rq->rep_post_host_idx =
+ (rq->rep_post_host_idx ==
+ (adapter->adapter_attr.rep_desc_qd -
+ LEAPRAID_BUSY_LIMIT)) ?
+ 0 : rq->rep_post_host_idx + 1;
+ req_desc_type =
+ rq->rep_desc[rq->rep_post_host_idx].dflt_rep.rep_flg &
+ LEAPRAID_RPY_DESC_FLG_TYPE_MASK;
+ finish_cmds++;
+ if (req_desc_type == LEAPRAID_RPY_DESC_FLG_UNUSED)
+ break;
+ rep_desc = rq->rep_desc + rq->rep_post_host_idx;
+ }
+
+ if (!finish_cmds) {
+ atomic_dec(&rq->busy);
+ return finish_cmds;
+ }
+
+ wmb(); /* Make sure that all write ops are in order */
+ writel(rq->rep_post_host_idx | ((msix_idx & LEAPRAID_MSIX_GROUP_MASK) <<
+ LEAPRAID_RPHI_MSIX_IDX_SHIFT),
+ &adapter->iomem_base->rep_post_reg_idx[msix_idx /
+ LEAPRAID_MSIX_GROUP_SIZE].idx);
+ atomic_dec(&rq->busy);
+ return finish_cmds;
+}
+
+static irqreturn_t leapraid_irq_handler(int irq, void *bus_id)
+{
+ struct leapraid_rq *rq = bus_id;
+ struct leapraid_adapter *adapter = rq->adapter;
+
+ dev_dbg(&adapter->pdev->dev,
+ "LEAPRAID_SCSIIO: Receive a interrupt, irq %d msix %d\n",
+ irq, rq->msix_idx);
+
+ if (adapter->mask_int)
+ return IRQ_NONE;
+
+ return ((leapraid_rep_queue_handler(rq) > 0) ?
+ IRQ_HANDLED : IRQ_NONE);
+}
+
+void leapraid_sync_irqs(struct leapraid_adapter *adapter, bool poll)
+{
+ struct leapraid_int_rq *int_rq;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ unsigned int i;
+
+ if (!adapter->notification_desc.msix_enable)
+ return;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ if (int_rq->rq.msix_idx == 0)
+ continue;
+
+ synchronize_irq(pci_irq_vector(adapter->pdev, int_rq->rq.msix_idx));
+ if (poll)
+ leapraid_rep_queue_handler(&int_rq->rq);
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ blk_mq_poll_rq =
+ &adapter->notification_desc.blk_mq_poll_rqs[i];
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ if (blk_mq_poll_rq->rq.msix_idx == 0)
+ continue;
+
+ leapraid_rep_queue_handler(&blk_mq_poll_rq->rq);
+ }
+}
+
+void leapraid_mq_polling_pause(struct leapraid_adapter *adapter)
+{
+ int iopoll_q_count =
+ adapter->adapter_attr.rq_cnt -
+ adapter->notification_desc.iopoll_qdex;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[qid].pause, 1);
+
+ for (qid = 0; qid < iopoll_q_count; qid++) {
+ while (atomic_read(&adapter->notification_desc.blk_mq_poll_rqs[qid].busy)) {
+ cpu_relax();
+ udelay(LEAPRAID_IO_POLL_DELAY_US);
+ }
+ }
+}
+
+void leapraid_mq_polling_resume(struct leapraid_adapter *adapter)
+{
+ int iopoll_q_count =
+ adapter->adapter_attr.rq_cnt -
+ adapter->notification_desc.iopoll_qdex;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[qid].pause, 0);
+}
+
+static int leapraid_unlock_host_diag(struct leapraid_adapter *adapter,
+ u32 *host_diag)
+{
+ const u32 unlock_seq[] = { 0x0, 0xF, 0x4, 0xB, 0x2, 0x7, 0xD };
+ const int max_retries = LEAPRAID_UNLOCK_RETRY_LIMIT;
+ int retry = 0;
+ unsigned int i;
+
+ *host_diag = 0;
+ while (retry++ <= max_retries) {
+ for (i = 0; i < ARRAY_SIZE(unlock_seq); i++)
+ writel(unlock_seq[i], &adapter->iomem_base->ws);
+
+ msleep(LEAPRAID_UNLOCK_SLEEP_MS);
+
+ *host_diag = leapraid_readl(&adapter->iomem_base->host_diag);
+ if (*host_diag & LEAPRAID_DIAG_WRITE_ENABLE)
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev, "try host reset timeout!\n");
+ return -EFAULT;
+}
+
+static int leapraid_host_diag_reset(struct leapraid_adapter *adapter)
+{
+ u32 host_diag;
+ u32 cnt;
+
+ dev_info(&adapter->pdev->dev, "entering host diag reset!\n");
+ pci_cfg_access_lock(adapter->pdev);
+
+ mutex_lock(&adapter->reset_desc.host_diag_mutex);
+ if (leapraid_unlock_host_diag(adapter, &host_diag))
+ goto out;
+
+ writel(host_diag | LEAPRAID_DIAG_RESET,
+ &adapter->iomem_base->host_diag);
+
+ msleep(LEAPRAID_MSLEEP_NORMAL_MS);
+ for (cnt = 0; cnt < LEAPRAID_RESET_LOOP_COUNT_DEFAULT; cnt++) {
+ host_diag = leapraid_readl(&adapter->iomem_base->host_diag);
+ if (host_diag == LEAPRAID_INVALID_HOST_DIAG_VAL)
+ goto out;
+
+ if (!(host_diag & LEAPRAID_DIAG_RESET))
+ break;
+
+ msleep(LEAPRAID_RESET_POLL_INTERVAL_MS);
+ }
+
+ writel(host_diag & ~LEAPRAID_DIAG_HOLD_ADAPTER_RESET,
+ &adapter->iomem_base->host_diag);
+ writel(0x0, &adapter->iomem_base->ws);
+ mutex_unlock(&adapter->reset_desc.host_diag_mutex);
+ if (!leapraid_wait_adapter_ready(adapter))
+ goto out;
+
+ pci_cfg_access_unlock(adapter->pdev);
+ dev_info(&adapter->pdev->dev, "host diag success!\n");
+ return 0;
+out:
+ pci_cfg_access_unlock(adapter->pdev);
+ dev_info(&adapter->pdev->dev, "host diag failed!\n");
+ mutex_unlock(&adapter->reset_desc.host_diag_mutex);
+ return -EFAULT;
+}
+
+static int leapraid_find_matching_port(
+ struct leapraid_card_port *card_port_table,
+ u8 count, u8 port_id, u64 sas_addr)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (card_port_table[i].port_id == port_id &&
+ card_port_table[i].sas_address == sas_addr)
+ return i;
+ }
+ return -1;
+}
+
+static u8 leapraid_fill_card_port_table(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_io_unit_p0 *sas_iounit_p0,
+ struct leapraid_card_port *new_card_port_table)
+{
+ u8 port_entry_num = 0, port_id;
+ u16 attached_hdl;
+ u64 attached_sas_addr;
+ int i, idx;
+
+ for (i = 0; i < adapter->dev_topo.card.phys_num; i++) {
+ if ((sas_iounit_p0->phy_info[i].neg_link_rate >> 4)
+ < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ continue;
+
+ attached_hdl =
+ le16_to_cpu(sas_iounit_p0->phy_info[i].attached_dev_hdl);
+ if (leapraid_get_sas_address(adapter,
+ attached_hdl,
+ &attached_sas_addr) != 0)
+ continue;
+
+ port_id = sas_iounit_p0->phy_info[i].port;
+
+ idx = leapraid_find_matching_port(new_card_port_table,
+ port_entry_num,
+ port_id,
+ attached_sas_addr);
+ if (idx >= 0) {
+ new_card_port_table[idx].phy_mask |= BIT(i);
+ } else {
+ new_card_port_table[port_entry_num].port_id = port_id;
+ new_card_port_table[port_entry_num].phy_mask = BIT(i);
+ new_card_port_table[port_entry_num].sas_address =
+ attached_sas_addr;
+ port_entry_num++;
+ }
+ }
+
+ return port_entry_num;
+}
+
+static u8 leapraid_set_new_card_port_table_after_reset(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *new_card_port_table)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_io_unit_p0 *sas_iounit_p0 = NULL;
+ u8 port_entry_num = 0;
+ u16 sz;
+
+ sz = offsetof(struct leapraid_sas_io_unit_p0, phy_info) +
+ (adapter->dev_topo.card.phys_num *
+ sizeof(struct leapraid_sas_io_unit0_phy_info));
+ sas_iounit_p0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_p0)
+ return port_entry_num;
+
+ cfgp1.size = sz;
+ if ((leapraid_op_config_page(adapter, sas_iounit_p0, cfgp1, cfgp2,
+ GET_SAS_IOUNIT_PG0)) != 0)
+ goto out;
+
+ port_entry_num = leapraid_fill_card_port_table(adapter,
+ sas_iounit_p0,
+ new_card_port_table);
+out:
+ kfree(sas_iounit_p0);
+ return port_entry_num;
+}
+
+static void leapraid_update_existing_port(struct leapraid_adapter *adapter,
+ struct leapraid_card_port *new_table,
+ int entry_idx, int port_entry_num)
+{
+ struct leapraid_card_port *matched_card_port = NULL;
+ int matched_code;
+ int count = 0, lcount = 0;
+ u64 sas_addr;
+ int i;
+
+ matched_code = leapraid_check_card_port(adapter,
+ &new_table[entry_idx],
+ &matched_card_port,
+ &count);
+
+ if (!matched_card_port)
+ return;
+
+ if (matched_code == SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS ||
+ matched_code == SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS) {
+ leapraid_add_or_del_phys_from_existing_port(adapter,
+ matched_card_port,
+ new_table,
+ entry_idx,
+ port_entry_num);
+ } else if (matched_code == SAME_ADDR_ONLY) {
+ sas_addr = new_table[entry_idx].sas_address;
+ for (i = 0; i < port_entry_num; i++) {
+ if (new_table[i].sas_address == sas_addr)
+ lcount++;
+ }
+ if (count > 1 || lcount > 1)
+ return;
+
+ leapraid_add_or_del_phys_from_existing_port(adapter,
+ matched_card_port,
+ new_table,
+ entry_idx,
+ port_entry_num);
+ }
+
+ if (matched_card_port->port_id != new_table[entry_idx].port_id)
+ matched_card_port->port_id = new_table[entry_idx].port_id;
+
+ matched_card_port->flg &= ~LEAPRAID_CARD_PORT_FLG_DIRTY;
+ matched_card_port->phy_mask = new_table[entry_idx].phy_mask;
+}
+
+static void leapraid_update_card_port_after_reset(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_card_port *new_card_port_table;
+ struct leapraid_card_port *matched_card_port = NULL;
+ u8 port_entry_num = 0;
+ u8 nr_phys;
+ int i;
+
+ if (leapraid_get_adapter_phys(adapter, &nr_phys) || !nr_phys)
+ return;
+
+ adapter->dev_topo.card.phys_num = nr_phys;
+ new_card_port_table = kcalloc(adapter->dev_topo.card.phys_num,
+ sizeof(struct leapraid_card_port),
+ GFP_KERNEL);
+ if (!new_card_port_table)
+ return;
+
+ port_entry_num =
+ leapraid_set_new_card_port_table_after_reset(adapter,
+ new_card_port_table);
+ if (!port_entry_num)
+ return;
+
+ list_for_each_entry(matched_card_port,
+ &adapter->dev_topo.card_port_list, list) {
+ matched_card_port->flg |= LEAPRAID_CARD_PORT_FLG_DIRTY;
+ }
+
+ matched_card_port = NULL;
+ for (i = 0; i < port_entry_num; i++)
+ leapraid_update_existing_port(adapter,
+ new_card_port_table,
+ i, port_entry_num);
+}
+
+static bool leapraid_is_valid_vphy(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_io_unit_p0 *sas_io_unit_p0,
+ int phy_index)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_phy_p0 phy_p0;
+
+ if ((sas_io_unit_p0->phy_info[phy_index].neg_link_rate >> 4) <
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ return false;
+
+ if (!(le32_to_cpu(sas_io_unit_p0->phy_info[phy_index].controller_phy_dev_info) &
+ LEAPRAID_DEVTYP_SEP))
+ return false;
+
+ cfgp1.phy_number = phy_index;
+ if (leapraid_op_config_page(adapter, &phy_p0, cfgp1, cfgp2,
+ GET_PHY_PG0))
+ return false;
+
+ if (!(le32_to_cpu(phy_p0.phy_info) & LEAPRAID_SAS_PHYINFO_VPHY))
+ return false;
+
+ return true;
+}
+
+static void leapraid_update_vphy_binding(struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_vphy *vphy,
+ int phy_index, u8 may_new_port_id,
+ u64 attached_sas_addr)
+{
+ struct leapraid_card_port *may_new_card_port;
+ struct leapraid_sas_dev *sas_dev;
+
+ may_new_card_port = leapraid_get_port_by_id(adapter,
+ may_new_port_id,
+ true);
+ if (!may_new_card_port) {
+ may_new_card_port = kzalloc(sizeof(*may_new_card_port),
+ GFP_KERNEL);
+ if (!may_new_card_port)
+ return;
+ may_new_card_port->port_id = may_new_port_id;
+ dev_err(&adapter->pdev->dev,
+ "%s: new card port %p added, port=%d\n",
+ __func__, may_new_card_port, may_new_port_id);
+ list_add_tail(&may_new_card_port->list,
+ &adapter->dev_topo.card_port_list);
+ }
+
+ if (card_port != may_new_card_port) {
+ if (!may_new_card_port->vphys_mask)
+ INIT_LIST_HEAD(&may_new_card_port->vphys_list);
+ may_new_card_port->vphys_mask |= BIT(phy_index);
+ card_port->vphys_mask &= ~BIT(phy_index);
+ list_move(&vphy->list, &may_new_card_port->vphys_list);
+
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter,
+ attached_sas_addr,
+ card_port);
+ if (sas_dev)
+ sas_dev->card_port = may_new_card_port;
+ }
+
+ if (may_new_card_port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY) {
+ may_new_card_port->sas_address = 0;
+ may_new_card_port->phy_mask = 0;
+ may_new_card_port->flg &= ~LEAPRAID_CARD_PORT_FLG_DIRTY;
+ }
+ vphy->flg &= ~LEAPRAID_VPHY_FLG_DIRTY;
+}
+
+static void leapraid_update_vphys_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_io_unit_p0 *sas_iounit_p0 = NULL;
+ struct leapraid_card_port *card_port, *card_port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+ u64 attached_sas_addr;
+ u16 sz;
+ u16 attached_hdl;
+ bool found = false;
+ u8 port_id;
+ int i;
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (!card_port->vphys_mask)
+ continue;
+
+ list_for_each_entry_safe(vphy, vphy_next,
+ &card_port->vphys_list, list) {
+ vphy->flg |= LEAPRAID_VPHY_FLG_DIRTY;
+ }
+ }
+
+ sz = offsetof(struct leapraid_sas_io_unit_p0, phy_info) +
+ (adapter->dev_topo.card.phys_num *
+ sizeof(struct leapraid_sas_io_unit0_phy_info));
+ sas_iounit_p0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_p0)
+ return;
+
+ cfgp1.size = sz;
+ if ((leapraid_op_config_page(adapter, sas_iounit_p0, cfgp1, cfgp2,
+ GET_SAS_IOUNIT_PG0)) != 0)
+ goto out;
+
+ for (i = 0; i < adapter->dev_topo.card.phys_num; i++) {
+ if (!leapraid_is_valid_vphy(adapter, sas_iounit_p0, i))
+ continue;
+
+ attached_hdl =
+ le16_to_cpu(sas_iounit_p0->phy_info[i].attached_dev_hdl);
+ if (leapraid_get_sas_address(adapter, attached_hdl,
+ &attached_sas_addr) != 0)
+ continue;
+
+ found = false;
+ card_port = NULL;
+ card_port_next = NULL;
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list,
+ list) {
+ if (!card_port->vphys_mask)
+ continue;
+
+ list_for_each_entry_safe(vphy, vphy_next,
+ &card_port->vphys_list,
+ list) {
+ if (!(vphy->flg & LEAPRAID_VPHY_FLG_DIRTY))
+ continue;
+
+ if (vphy->sas_address != attached_sas_addr)
+ continue;
+
+ if (!(vphy->phy_mask & BIT(i)))
+ vphy->phy_mask = BIT(i);
+
+ port_id = sas_iounit_p0->phy_info[i].port;
+
+ leapraid_update_vphy_binding(adapter,
+ card_port,
+ vphy,
+ i,
+ port_id,
+ attached_sas_addr);
+
+ found = true;
+ break;
+ }
+ if (found)
+ break;
+ }
+ }
+out:
+ kfree(sas_iounit_p0);
+}
+
+static void leapraid_mark_all_dev_deleted(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (sdev_priv && sdev_priv->starget_priv)
+ sdev_priv->starget_priv->deleted = true;
+ }
+}
+
+static void leapraid_free_enc_list(struct leapraid_adapter *adapter)
+{
+ struct leapraid_enc_node *enc_dev, *enc_dev_next;
+
+ list_for_each_entry_safe(enc_dev, enc_dev_next,
+ &adapter->dev_topo.enc_list,
+ list) {
+ list_del(&enc_dev->list);
+ kfree(enc_dev);
+ }
+}
+
+static void leapraid_rebuild_enc_list_after_reset(
+ struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_enc_node *enc_node;
+ u16 enc_hdl;
+ int rc;
+
+ leapraid_free_enc_list(adapter);
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (enc_hdl = 0xFFFF; ; enc_hdl = le16_to_cpu(enc_node->pg0.enc_hdl)) {
+ enc_node = kzalloc(sizeof(*enc_node),
+ GFP_KERNEL);
+ if (!enc_node)
+ return;
+
+ cfgp2.handle = enc_hdl;
+ rc = leapraid_op_config_page(adapter, &enc_node->pg0, cfgp1,
+ cfgp2, GET_SAS_ENCLOSURE_PG0);
+ if (rc) {
+ kfree(enc_node);
+ return;
+ }
+
+ list_add_tail(&enc_node->list, &adapter->dev_topo.enc_list);
+ }
+}
+
+static void leapraid_mark_resp_sas_dev(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev_p0 *sas_dev_p0)
+{
+ struct leapraid_starget_priv *starget_priv = NULL;
+ struct leapraid_enc_node *enc_node = NULL;
+ struct leapraid_card_port *card_port;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_target *starget;
+ unsigned long flags;
+
+ card_port = leapraid_get_port_by_id(adapter, sas_dev_p0->physical_port,
+ false);
+ if (sas_dev_p0->enc_hdl) {
+ enc_node = leapraid_enc_find_by_hdl(adapter,
+ le16_to_cpu(
+ sas_dev_p0->enc_hdl));
+ if (!enc_node)
+ dev_info(&adapter->pdev->dev,
+ "enc hdl 0x%04x has no matched enc dev\n",
+ le16_to_cpu(sas_dev_p0->enc_hdl));
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list) {
+ if (sas_dev->sas_addr == le64_to_cpu(sas_dev_p0->sas_address) &&
+ sas_dev->slot == le16_to_cpu(sas_dev_p0->slot) &&
+ sas_dev->card_port == card_port) {
+ sas_dev->resp = true;
+ starget = sas_dev->starget;
+ if (starget && starget->hostdata) {
+ starget_priv = starget->hostdata;
+ starget_priv->tm_busy = false;
+ starget_priv->deleted = false;
+ } else {
+ starget_priv = NULL;
+ }
+
+ if (starget) {
+ starget_printk(KERN_INFO, starget,
+ "dev: hdl=0x%04x, sas addr=0x%016llx, port_id=%d\n",
+ sas_dev->hdl,
+ (unsigned long long)sas_dev->sas_addr,
+ sas_dev->card_port->port_id);
+ if (sas_dev->enc_hdl != 0)
+ starget_printk(KERN_INFO, starget,
+ "enc info: enc_lid=0x%016llx, slot=%d\n",
+ (unsigned long long)sas_dev->enc_lid,
+ sas_dev->slot);
+ }
+
+ if (le16_to_cpu(sas_dev_p0->flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID) {
+ sas_dev->enc_level = sas_dev_p0->enc_level;
+ memcpy(sas_dev->connector_name,
+ sas_dev_p0->connector_name, 4);
+ sas_dev->connector_name[4] = '\0';
+ } else {
+ sas_dev->enc_level = 0;
+ sas_dev->connector_name[0] = '\0';
+ }
+
+ sas_dev->enc_hdl =
+ le16_to_cpu(sas_dev_p0->enc_hdl);
+ if (enc_node) {
+ sas_dev->enc_lid =
+ le64_to_cpu(enc_node->pg0.enc_lid);
+ }
+ if (sas_dev->hdl == le16_to_cpu(sas_dev_p0->dev_hdl))
+ goto out;
+
+ dev_info(&adapter->pdev->dev,
+ "hdl changed: 0x%04x -> 0x%04x\n",
+ sas_dev->hdl, sas_dev_p0->dev_hdl);
+ sas_dev->hdl = le16_to_cpu(sas_dev_p0->dev_hdl);
+ if (starget_priv)
+ starget_priv->hdl =
+ le16_to_cpu(sas_dev_p0->dev_hdl);
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_search_resp_sas_dev(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ u32 device_info;
+
+ dev_info(&adapter->pdev->dev,
+ "begin searching for sas end devices\n");
+
+ if (list_empty(&adapter->dev_topo.sas_dev_list))
+ goto out;
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (cfgp2.handle = 0xFFFF;
+ !leapraid_op_config_page(adapter, &sas_dev_p0,
+ cfgp1, cfgp2, GET_SAS_DEVICE_PG0);
+ cfgp2.handle = le16_to_cpu(sas_dev_p0.dev_hdl)) {
+ device_info = le32_to_cpu(sas_dev_p0.dev_info);
+ if (!(leapraid_is_end_dev(device_info)))
+ continue;
+
+ leapraid_mark_resp_sas_dev(adapter, &sas_dev_p0);
+ }
+out:
+ dev_info(&adapter->pdev->dev,
+ "sas end devices searching complete\n");
+}
+
+static void leapraid_mark_resp_raid_volume(struct leapraid_adapter *adapter,
+ u64 wwid, u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_raid_volume *raid_volume;
+ struct scsi_target *starget;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ list_for_each_entry(raid_volume,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (raid_volume->wwid == wwid && raid_volume->starget) {
+ starget = raid_volume->starget;
+ if (starget && starget->hostdata) {
+ starget_priv = starget->hostdata;
+ starget_priv->deleted = false;
+ } else {
+ starget_priv = NULL;
+ }
+
+ raid_volume->resp = true;
+ spin_unlock_irqrestore(
+ &adapter->dev_topo.raid_volume_lock,
+ flags);
+
+ starget_printk(
+ KERN_INFO, raid_volume->starget,
+ "raid volume: hdl=0x%04x, wwid=0x%016llx\n",
+ hdl, (unsigned long long)raid_volume->wwid);
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ if (raid_volume->hdl == hdl) {
+ spin_unlock_irqrestore(
+ &adapter->dev_topo.raid_volume_lock,
+ flags);
+ return;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "hdl changed: 0x%04x -> 0x%04x\n",
+ raid_volume->hdl, hdl);
+
+ raid_volume->hdl = hdl;
+ if (starget_priv)
+ starget_priv->hdl = hdl;
+ spin_unlock_irqrestore(
+ &adapter->dev_topo.raid_volume_lock,
+ flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static void leapraid_search_resp_raid_volume(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_1 cfgp1_extra = {0};
+ union cfg_param_2 cfgp2 = {0};
+ union cfg_param_2 cfgp2_extra = {0};
+ struct leapraid_raidvol_p1 raidvol_p1;
+ struct leapraid_raidvol_p0 raidvol_p0;
+ struct leapraid_raidpd_p0 raidpd_p0;
+ u16 hdl;
+ u8 phys_disk_num;
+
+ if (!adapter->adapter_attr.raid_support)
+ return;
+
+ dev_info(&adapter->pdev->dev,
+ "begin searching for raid volumes\n");
+
+ if (list_empty(&adapter->dev_topo.raid_volume_list))
+ goto out;
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &raidvol_p1, cfgp1, cfgp2,
+ GET_RAID_VOLUME_PG1);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(raidvol_p1.dev_hdl);
+ cfgp1_extra.size = sizeof(struct leapraid_raidvol_p0);
+ cfgp2_extra.handle = hdl;
+ if (leapraid_op_config_page(adapter, &raidvol_p0, cfgp1_extra,
+ cfgp2_extra, GET_RAID_VOLUME_PG0))
+ continue;
+
+ if (raidvol_p0.volume_state == LEAPRAID_VOL_STATE_OPTIMAL ||
+ raidvol_p0.volume_state == LEAPRAID_VOL_STATE_ONLINE ||
+ raidvol_p0.volume_state == LEAPRAID_VOL_STATE_DEGRADED)
+ leapraid_mark_resp_raid_volume(
+ adapter,
+ le64_to_cpu(raidvol_p1.wwid),
+ hdl);
+ }
+
+ memset(adapter->dev_topo.pd_hdls, 0, adapter->dev_topo.pd_hdls_sz);
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (phys_disk_num = 0xFF, cfgp2.form_specific = phys_disk_num;
+ !leapraid_op_config_page(adapter, &raidpd_p0, cfgp1, cfgp2,
+ GET_PHY_DISK_PG0);
+ cfgp2.form_specific = phys_disk_num) {
+ phys_disk_num = raidpd_p0.phys_disk_num;
+ hdl = le16_to_cpu(raidpd_p0.dev_hdl);
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ }
+out:
+ dev_info(&adapter->pdev->dev,
+ "raid volumes searching complete\n");
+}
+
+static void leapraid_mark_resp_exp(struct leapraid_adapter *adapter,
+ struct leapraid_exp_p0 *exp_pg0)
+{
+ struct leapraid_enc_node *enc_node = NULL;
+ struct leapraid_topo_node *topo_node_exp;
+ u16 enc_hdl = le16_to_cpu(exp_pg0->enc_hdl);
+ u64 sas_address = le64_to_cpu(exp_pg0->sas_address);
+ u16 hdl = le16_to_cpu(exp_pg0->dev_hdl);
+ u8 port_id = exp_pg0->physical_port;
+ struct leapraid_card_port *card_port = leapraid_get_port_by_id(adapter,
+ port_id,
+ false);
+ unsigned long flags;
+ int i;
+
+ if (enc_hdl)
+ enc_node = leapraid_enc_find_by_hdl(adapter, enc_hdl);
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_for_each_entry(topo_node_exp, &adapter->dev_topo.exp_list, list) {
+ if (topo_node_exp->sas_address != sas_address ||
+ topo_node_exp->card_port != card_port)
+ continue;
+
+ topo_node_exp->resp = true;
+ if (enc_node) {
+ topo_node_exp->enc_lid =
+ le64_to_cpu(enc_node->pg0.enc_lid);
+ topo_node_exp->enc_hdl = le16_to_cpu(exp_pg0->enc_hdl);
+ }
+ if (topo_node_exp->hdl == hdl)
+ goto out;
+
+ dev_info(&adapter->pdev->dev,
+ "hdl changed: 0x%04x -> 0x%04x\n",
+ topo_node_exp->hdl, hdl);
+ topo_node_exp->hdl = hdl;
+ for (i = 0; i < topo_node_exp->phys_num; i++)
+ topo_node_exp->card_phy[i].hdl = hdl;
+ goto out;
+ }
+out:
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+}
+
+static void leapraid_search_resp_exp(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_exp_p0 exp_p0;
+ u64 sas_address;
+ u16 hdl;
+ u8 port;
+
+ dev_info(&adapter->pdev->dev,
+ "begin searching for expanders\n");
+ if (list_empty(&adapter->dev_topo.exp_list))
+ goto out;
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &exp_p0, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG0);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(exp_p0.dev_hdl);
+ sas_address = le64_to_cpu(exp_p0.sas_address);
+ port = exp_p0.physical_port;
+
+ dev_info(&adapter->pdev->dev,
+ "exp detected: hdl=0x%04x, sas=0x%016llx, port=%u",
+ hdl, (unsigned long long)sas_address,
+ ((adapter->adapter_attr.enable_mp) ? (port) :
+ (LEAPRAID_DISABLE_MP_PORT_ID)));
+ leapraid_mark_resp_exp(adapter, &exp_p0);
+ }
+out:
+ dev_info(&adapter->pdev->dev,
+ "expander searching complete\n");
+}
+
+void leapraid_wait_cmds_done(struct leapraid_adapter *adapter)
+{
+ struct leapraid_io_req_tracker *io_req_tracker;
+ unsigned long flags;
+ u16 i;
+
+ adapter->reset_desc.pending_io_cnt = 0;
+ if (!leapraid_pci_active(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s %s: pci error, device reset or unplugged!\n",
+ adapter->adapter_attr.name, __func__);
+ return;
+ }
+
+ if (leapraid_get_adapter_state(adapter) != LEAPRAID_DB_OPERATIONAL)
+ return;
+
+ spin_lock_irqsave(&adapter->dynamic_task_desc.task_lock, flags);
+ for (i = 1; i <= adapter->shost->can_queue; i++) {
+ io_req_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ i);
+ if (io_req_tracker && io_req_tracker->taskid != 0)
+ if (io_req_tracker->scmd)
+ adapter->reset_desc.pending_io_cnt++;
+ }
+ spin_unlock_irqrestore(&adapter->dynamic_task_desc.task_lock, flags);
+
+ if (!adapter->reset_desc.pending_io_cnt)
+ return;
+
+ wait_event_timeout(adapter->reset_desc.reset_wait_queue,
+ adapter->reset_desc.pending_io_cnt == 0, 10 * HZ);
+}
+
+int leapraid_hard_reset_handler(struct leapraid_adapter *adapter,
+ enum reset_type type)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!mutex_trylock(&adapter->reset_desc.adapter_reset_mutex)) {
+ do {
+ ssleep(1);
+ } while (adapter->access_ctrl.shost_recovering);
+ return adapter->reset_desc.adapter_reset_results;
+ }
+
+ if (!leapraid_pci_active(adapter)) {
+ if (leapraid_pci_removed(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "pci_dev removed, pausing polling and cleaning cmds\n");
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+ leapraid_mq_polling_resume(adapter);
+ }
+ rc = 0;
+ goto exit_pci_unavailable;
+ }
+
+ dev_info(&adapter->pdev->dev, "starting hard reset\n");
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ adapter->access_ctrl.shost_recovering = true;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+
+ leapraid_wait_cmds_done(adapter);
+ leapraid_mask_int(adapter);
+ leapraid_mq_polling_pause(adapter);
+ rc = leapraid_make_adapter_ready(adapter, type);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to make adapter ready, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = leapraid_fw_log_init(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "firmware log init failed\n");
+ goto out;
+ }
+
+ leapraid_clean_active_cmds(adapter);
+ if (adapter->scan_dev_desc.driver_loading &&
+ adapter->scan_dev_desc.scan_dev_failed) {
+ dev_err(&adapter->pdev->dev,
+ "Previous device scan failed or driver loading\n");
+ adapter->access_ctrl.host_removing = true;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = leapraid_make_adapter_available(adapter);
+ if (!rc) {
+ dev_info(&adapter->pdev->dev,
+ "adapter is now available, rebuilding topology\n");
+ if (adapter->adapter_attr.enable_mp) {
+ leapraid_update_card_port_after_reset(adapter);
+ leapraid_update_vphys_after_reset(adapter);
+ }
+ leapraid_mark_all_dev_deleted(adapter);
+ leapraid_rebuild_enc_list_after_reset(adapter);
+ leapraid_search_resp_sas_dev(adapter);
+ leapraid_search_resp_raid_volume(adapter);
+ leapraid_search_resp_exp(adapter);
+ leapraid_hardreset_barrier(adapter);
+ }
+out:
+ dev_info(&adapter->pdev->dev, "hard reset %s\n",
+ ((rc == 0) ? "SUCCESS" : "FAILED"));
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ adapter->reset_desc.adapter_reset_results = rc;
+ adapter->access_ctrl.shost_recovering = false;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+ adapter->reset_desc.reset_cnt++;
+ mutex_unlock(&adapter->reset_desc.adapter_reset_mutex);
+
+ if (rc)
+ leapraid_clean_active_scsi_cmds(adapter);
+ leapraid_mq_polling_resume(adapter);
+
+exit_pci_unavailable:
+ dev_info(&adapter->pdev->dev, "pcie unavailable!\n");
+ return rc;
+}
+
+static int leapraid_get_adapter_features(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_features_req leap_mpi_req;
+ struct leapraid_adapter_features_rep leap_mpi_rep;
+ u8 fw_major, fw_minor, fw_build, fw_release;
+ u32 db;
+ int r;
+
+ db = leapraid_readl(&adapter->iomem_base->db);
+ if (db & LEAPRAID_DB_USED ||
+ (db & LEAPRAID_DB_MASK) == LEAPRAID_DB_FAULT)
+ return -EFAULT;
+
+ if (((db & LEAPRAID_DB_MASK) != LEAPRAID_DB_READY) &&
+ ((db & LEAPRAID_DB_MASK) != LEAPRAID_DB_OPERATIONAL)) {
+ if (!leapraid_wait_adapter_ready(adapter))
+ return -EFAULT;
+ }
+
+ memset(&leap_mpi_req, 0, sizeof(struct leapraid_adapter_features_req));
+ memset(&leap_mpi_rep, 0, sizeof(struct leapraid_adapter_features_rep));
+ leap_mpi_req.func = LEAPRAID_FUNC_GET_ADAPTER_FEATURES;
+ r = leapraid_handshake_func(adapter,
+ sizeof(struct leapraid_adapter_features_req),
+ (u32 *)&leap_mpi_req,
+ sizeof(struct leapraid_adapter_features_rep),
+ (u16 *)&leap_mpi_rep);
+ if (r) {
+ dev_err(&adapter->pdev->dev,
+ "%s %s: handshake failed, r=%d\n",
+ adapter->adapter_attr.name, __func__, r);
+ return r;
+ }
+
+ memset(&adapter->adapter_attr.features, 0,
+ sizeof(struct leapraid_adapter_features));
+ adapter->adapter_attr.features.req_slot =
+ le16_to_cpu(leap_mpi_rep.req_slot);
+ adapter->adapter_attr.features.hp_slot =
+ le16_to_cpu(leap_mpi_rep.hp_slot);
+ adapter->adapter_attr.features.adapter_caps =
+ le32_to_cpu(leap_mpi_rep.adapter_caps);
+ adapter->adapter_attr.features.max_volumes =
+ leap_mpi_rep.max_volumes;
+ if (!adapter->adapter_attr.features.max_volumes)
+ adapter->adapter_attr.features.max_volumes =
+ LEAPRAID_MAX_VOLUMES_DEFAULT;
+ adapter->adapter_attr.features.max_dev_handle =
+ le16_to_cpu(leap_mpi_rep.max_dev_hdl);
+ if (!adapter->adapter_attr.features.max_dev_handle)
+ adapter->adapter_attr.features.max_dev_handle =
+ LEAPRAID_MAX_DEV_HANDLE_DEFAULT;
+ adapter->adapter_attr.features.min_dev_handle =
+ le16_to_cpu(leap_mpi_rep.min_dev_hdl);
+ if ((adapter->adapter_attr.features.adapter_caps &
+ LEAPRAID_ADAPTER_FEATURES_CAP_INTEGRATED_RAID))
+ adapter->adapter_attr.raid_support = true;
+ if (WARN_ON(!(adapter->adapter_attr.features.adapter_caps &
+ LEAPRAID_ADAPTER_FEATURES_CAP_ATOMIC_REQ)))
+ return -EFAULT;
+ adapter->adapter_attr.features.fw_version =
+ le32_to_cpu(leap_mpi_rep.fw_version);
+
+ fw_major = (adapter->adapter_attr.features.fw_version >> 24) & 0xFF;
+ fw_minor = (adapter->adapter_attr.features.fw_version >> 16) & 0xFF;
+ fw_build = (adapter->adapter_attr.features.fw_version >> 8) & 0xFF;
+ fw_release = adapter->adapter_attr.features.fw_version & 0xFF;
+
+ dev_info(&adapter->pdev->dev,
+ "Firmware version: %u.%u.%u.%u (0x%08x)\n",
+ fw_major, fw_minor, fw_build, fw_release,
+ adapter->adapter_attr.features.fw_version);
+
+ if (fw_major < 2) {
+ dev_err(&adapter->pdev->dev,
+ "Unsupported firmware major version, requires >= 2\n");
+ return -EFAULT;
+ }
+ adapter->shost->max_id = -1;
+
+ return 0;
+}
+
+static inline void leapraid_disable_pcie(struct leapraid_adapter *adapter)
+{
+ mutex_lock(&adapter->access_ctrl.pci_access_lock);
+ if (adapter->iomem_base) {
+ iounmap(adapter->iomem_base);
+ adapter->iomem_base = NULL;
+ }
+ if (pci_is_enabled(adapter->pdev)) {
+ pci_disable_pcie_error_reporting(adapter->pdev);
+ pci_release_regions(adapter->pdev);
+ pci_disable_device(adapter->pdev);
+ }
+ mutex_unlock(&adapter->access_ctrl.pci_access_lock);
+}
+
+static int leapraid_enable_pcie(struct leapraid_adapter *adapter)
+{
+ u64 dma_mask;
+ int rc;
+
+ rc = pci_enable_device(adapter->pdev);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "failed to enable PCI device\n");
+ return rc;
+ }
+
+ rc = pci_request_regions(adapter->pdev, LEAPRAID_DRIVER_NAME);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to obtain PCI resources\n");
+ goto disable_pcie;
+ }
+
+ if (sizeof(dma_addr_t) > 4) {
+ dma_mask = DMA_BIT_MASK(64);
+ adapter->adapter_attr.use_32_dma_mask = false;
+ } else {
+ dma_mask = DMA_BIT_MASK(32);
+ adapter->adapter_attr.use_32_dma_mask = true;
+ }
+
+ rc = dma_set_mask_and_coherent(&adapter->pdev->dev, dma_mask);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to set %lld DMA mask\n", dma_mask);
+ goto disable_pcie;
+ }
+ adapter->iomem_base = ioremap(pci_resource_start(adapter->pdev, 0),
+ sizeof(struct leapraid_reg_base));
+ if (!adapter->iomem_base) {
+ dev_err(&adapter->pdev->dev,
+ "failed to map memory for controller registers\n");
+ rc = -ENOMEM;
+ goto disable_pcie;
+ }
+
+ pci_enable_pcie_error_reporting(adapter->pdev);
+ pci_set_master(adapter->pdev);
+
+ return 0;
+
+disable_pcie:
+ return rc;
+}
+
+static void leapraid_cpus_on_irq(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ unsigned int i, base_group, this_group;
+ unsigned int cpu, nr_cpus, total_msix, index = 0;
+
+ total_msix = adapter->notification_desc.iopoll_qdex;
+ nr_cpus = num_online_cpus();
+
+ if (!nr_cpus || !total_msix)
+ return;
+ base_group = nr_cpus / total_msix;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (index = 0; index < adapter->notification_desc.iopoll_qdex;
+ index++) {
+ int_rq = &adapter->notification_desc.int_rqs[index];
+
+ if (cpu >= nr_cpus)
+ break;
+
+ this_group = base_group +
+ (index < (nr_cpus % total_msix) ? 1 : 0);
+
+ for (i = 0 ; i < this_group ; i++) {
+ adapter->notification_desc.msix_cpu_map[cpu] =
+ int_rq->rq.msix_idx;
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ }
+}
+
+static void leapraid_map_msix_to_cpu(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ const cpumask_t *affinity_mask;
+ u32 i;
+ u16 cpu;
+
+ if (!adapter->adapter_attr.rq_cnt)
+ return;
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ affinity_mask = pci_irq_get_affinity(adapter->pdev,
+ int_rq->rq.msix_idx);
+ if (!affinity_mask)
+ goto out;
+
+ for_each_cpu_and(cpu, affinity_mask, cpu_online_mask) {
+ if (cpu >= adapter->notification_desc.msix_cpu_map_sz)
+ break;
+
+ adapter->notification_desc.msix_cpu_map[cpu] =
+ int_rq->rq.msix_idx;
+ }
+ }
+out:
+ leapraid_cpus_on_irq(adapter);
+}
+
+static void leapraid_configure_reply_queue_affinity(
+ struct leapraid_adapter *adapter)
+{
+ if (!adapter || !adapter->notification_desc.msix_enable)
+ return;
+
+ leapraid_map_msix_to_cpu(adapter);
+}
+
+static void leapraid_free_irq(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ unsigned int i;
+
+ if (!adapter->notification_desc.int_rqs)
+ return;
+
+ for (i = 0; i < adapter->notification_desc.int_rqs_allocated; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ if (!int_rq)
+ continue;
+
+ irq_set_affinity_hint(pci_irq_vector(adapter->pdev,
+ int_rq->rq.msix_idx), NULL);
+ free_irq(pci_irq_vector(adapter->pdev, int_rq->rq.msix_idx),
+ &int_rq->rq);
+ }
+ adapter->notification_desc.int_rqs_allocated = 0;
+
+ if (!adapter->notification_desc.msix_enable)
+ return;
+
+ pci_free_irq_vectors(adapter->pdev);
+ adapter->notification_desc.msix_enable = false;
+
+ kfree(adapter->notification_desc.blk_mq_poll_rqs);
+ adapter->notification_desc.blk_mq_poll_rqs = NULL;
+
+ kfree(adapter->notification_desc.int_rqs);
+ adapter->notification_desc.int_rqs = NULL;
+
+ kfree(adapter->notification_desc.msix_cpu_map);
+ adapter->notification_desc.msix_cpu_map = NULL;
+}
+
+static inline int leapraid_msix_cnt(struct pci_dev *pdev)
+{
+ return pci_msix_vec_count(pdev);
+}
+
+static inline int leapraid_msi_cnt(struct pci_dev *pdev)
+{
+ return pci_msi_vec_count(pdev);
+}
+
+static int leapraid_setup_irqs(struct leapraid_adapter *adapter)
+{
+ unsigned int i;
+ int rc = 0;
+
+ if (interrupt_mode == 0) {
+ rc = pci_alloc_irq_vectors_affinity(
+ adapter->pdev,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qdex,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, NULL);
+
+ if (rc < 0) {
+ dev_err(&adapter->pdev->dev,
+ "%d msi/msix vectors alloacted failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ adapter->notification_desc.int_rqs[i].rq.adapter = adapter;
+ adapter->notification_desc.int_rqs[i].rq.msix_idx = i;
+ atomic_set(&adapter->notification_desc.int_rqs[i].rq.busy, 0);
+ if (interrupt_mode == 0)
+ snprintf(adapter->notification_desc.int_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH, "%s%u-MSIx%u",
+ LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+ else if (interrupt_mode == 1)
+ snprintf(adapter->notification_desc.int_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH, "%s%u-MSI%u",
+ LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+
+ rc = request_irq(pci_irq_vector(adapter->pdev, i),
+ leapraid_irq_handler,
+ IRQF_SHARED,
+ adapter->notification_desc.int_rqs[i].rq.name,
+ &adapter->notification_desc.int_rqs[i].rq);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "MSI/MSIx: request_irq %s failed!\n",
+ adapter->notification_desc.int_rqs[i].rq.name);
+ return rc;
+ }
+ adapter->notification_desc.int_rqs_allocated++;
+ }
+
+ return 0;
+}
+
+static int leapraid_setup_legacy_int(struct leapraid_adapter *adapter)
+{
+ int rc;
+
+ adapter->notification_desc.int_rqs[0].rq.adapter = adapter;
+ adapter->notification_desc.int_rqs[0].rq.msix_idx = 0;
+ atomic_set(&adapter->notification_desc.int_rqs[0].rq.busy, 0);
+ snprintf(adapter->notification_desc.int_rqs[0].rq.name,
+ LEAPRAID_NAME_LENGTH, "%s%d-LegacyInt",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+
+ rc = pci_alloc_irq_vectors_affinity(
+ adapter->pdev,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qdex,
+ PCI_IRQ_LEGACY | PCI_IRQ_AFFINITY,
+ NULL);
+ if (rc < 0) {
+ dev_err(&adapter->pdev->dev,
+ "legacy irq alloacted failed!\n");
+ return rc;
+ }
+
+ rc = request_irq(pci_irq_vector(adapter->pdev, 0),
+ leapraid_irq_handler,
+ IRQF_SHARED,
+ adapter->notification_desc.int_rqs[0].rq.name,
+ &adapter->notification_desc.int_rqs[0].rq);
+ if (rc) {
+ irq_set_affinity_hint(pci_irq_vector(adapter->pdev, 0), NULL);
+ pci_free_irq_vectors(adapter->pdev);
+ dev_err(&adapter->pdev->dev,
+ "Legact Int: request_irq %s failed!\n",
+ adapter->notification_desc.int_rqs[0].rq.name);
+ return -EBUSY;
+ }
+ adapter->notification_desc.int_rqs_allocated = 1;
+ return rc;
+}
+
+static int leapraid_set_legacy_int(struct leapraid_adapter *adapter)
+{
+ int rc;
+
+ adapter->notification_desc.msix_cpu_map_sz = num_online_cpus();
+ adapter->notification_desc.msix_cpu_map =
+ kzalloc(adapter->notification_desc.msix_cpu_map_sz,
+ GFP_KERNEL);
+ if (!adapter->notification_desc.msix_cpu_map)
+ return -ENOMEM;
+
+ adapter->adapter_attr.rq_cnt = 1;
+ adapter->notification_desc.iopoll_qdex =
+ adapter->adapter_attr.rq_cnt;
+ adapter->notification_desc.iopoll_qcnt = 0;
+ dev_info(&adapter->pdev->dev,
+ "Legacy Intr: req queue cnt=%d, intr=%d/poll=%d rep queues!\n",
+ adapter->adapter_attr.rq_cnt,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qcnt);
+ adapter->notification_desc.int_rqs =
+ kcalloc(adapter->notification_desc.iopoll_qdex,
+ sizeof(struct leapraid_int_rq), GFP_KERNEL);
+ if (!adapter->notification_desc.int_rqs) {
+ dev_err(&adapter->pdev->dev,
+ "Legacy Intr: allocate %d intr rep queues failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return -ENOMEM;
+ }
+
+ rc = leapraid_setup_legacy_int(adapter);
+
+ return rc;
+}
+
+static int leapraid_set_msix(struct leapraid_adapter *adapter)
+{
+ int iopoll_qcnt = 0;
+ unsigned int i;
+ int rc, msix_cnt;
+
+ if (msix_disable == 1)
+ goto legacy_int;
+
+ msix_cnt = leapraid_msix_cnt(adapter->pdev);
+ if (msix_cnt <= 0) {
+ dev_info(&adapter->pdev->dev, "msix unsupported!\n");
+ goto legacy_int;
+ }
+
+ if (reset_devices)
+ adapter->adapter_attr.rq_cnt = 1;
+ else
+ adapter->adapter_attr.rq_cnt = min_t(int,
+ num_online_cpus(),
+ msix_cnt);
+
+ if (max_msix_vectors > 0)
+ adapter->adapter_attr.rq_cnt = min_t(
+ int, max_msix_vectors, adapter->adapter_attr.rq_cnt);
+
+ if (iopoll_qcnt) {
+ adapter->notification_desc.blk_mq_poll_rqs =
+ kcalloc(iopoll_qcnt,
+ sizeof(struct leapraid_blk_mq_poll_rq),
+ GFP_KERNEL);
+ if (!adapter->notification_desc.blk_mq_poll_rqs)
+ return -ENOMEM;
+ adapter->adapter_attr.rq_cnt =
+ min(adapter->adapter_attr.rq_cnt + iopoll_qcnt,
+ msix_cnt);
+ }
+
+ adapter->notification_desc.iopoll_qdex =
+ adapter->adapter_attr.rq_cnt - iopoll_qcnt;
+
+ adapter->notification_desc.iopoll_qcnt = iopoll_qcnt;
+ dev_info(&adapter->pdev->dev,
+ "MSIx: req queue cnt=%d, intr=%d/poll=%d rep queues!\n",
+ adapter->adapter_attr.rq_cnt,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qcnt);
+
+ adapter->notification_desc.int_rqs =
+ kcalloc(adapter->notification_desc.iopoll_qdex,
+ sizeof(struct leapraid_int_rq), GFP_KERNEL);
+ if (!adapter->notification_desc.int_rqs) {
+ dev_err(&adapter->pdev->dev,
+ "MSIx: allocate %d interrupt reply queues failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.adapter =
+ adapter;
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.msix_idx =
+ i + adapter->notification_desc.iopoll_qdex;
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[i].rq.busy, 0);
+ snprintf(adapter->notification_desc.blk_mq_poll_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH,
+ "%s%u-MQ-Poll%u", LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[i].busy, 0);
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[i].pause, 0);
+ }
+
+ adapter->notification_desc.msix_cpu_map_sz =
+ num_online_cpus();
+ adapter->notification_desc.msix_cpu_map =
+ kzalloc(adapter->notification_desc.msix_cpu_map_sz,
+ GFP_KERNEL);
+ if (!adapter->notification_desc.msix_cpu_map)
+ return -ENOMEM;
+ memset(adapter->notification_desc.msix_cpu_map, 0,
+ adapter->notification_desc.msix_cpu_map_sz);
+
+ adapter->notification_desc.msix_enable = true;
+ rc = leapraid_setup_irqs(adapter);
+ if (rc) {
+ leapraid_free_irq(adapter);
+ adapter->notification_desc.msix_enable = false;
+ goto legacy_int;
+ }
+
+ return 0;
+
+legacy_int:
+ rc = leapraid_set_legacy_int(adapter);
+
+ return rc;
+}
+
+static int leapraid_set_msi(struct leapraid_adapter *adapter)
+{
+ int iopoll_qcnt = 0;
+ unsigned int i;
+ int rc, msi_cnt;
+
+ if (msix_disable == 1)
+ goto legacy_int1;
+
+ msi_cnt = leapraid_msi_cnt(adapter->pdev);
+ if (msi_cnt <= 0) {
+ dev_info(&adapter->pdev->dev, "msix unsupported!\n");
+ goto legacy_int1;
+ }
+
+ if (reset_devices)
+ adapter->adapter_attr.rq_cnt = 1;
+ else
+ adapter->adapter_attr.rq_cnt = min_t(int,
+ num_online_cpus(),
+ msi_cnt);
+
+ if (max_msix_vectors > 0)
+ adapter->adapter_attr.rq_cnt = min_t(
+ int, max_msix_vectors, adapter->adapter_attr.rq_cnt);
+
+
+ if (iopoll_qcnt) {
+ adapter->notification_desc.blk_mq_poll_rqs =
+ kcalloc(iopoll_qcnt,
+ sizeof(struct leapraid_blk_mq_poll_rq),
+ GFP_KERNEL);
+ if (!adapter->notification_desc.blk_mq_poll_rqs)
+ return -ENOMEM;
+
+ adapter->adapter_attr.rq_cnt =
+ min(adapter->adapter_attr.rq_cnt + iopoll_qcnt,
+ msi_cnt);
+ }
+
+ adapter->notification_desc.iopoll_qdex =
+ adapter->adapter_attr.rq_cnt - iopoll_qcnt;
+ rc = pci_alloc_irq_vectors_affinity(
+ adapter->pdev,
+ 1,
+ adapter->notification_desc.iopoll_qdex,
+ PCI_IRQ_MSI | PCI_IRQ_AFFINITY, NULL);
+ if (rc < 0) {
+ dev_err(&adapter->pdev->dev,
+ "%d msi vectors alloacted failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ goto legacy_int1;
+ }
+ if (rc != adapter->notification_desc.iopoll_qdex) {
+ adapter->notification_desc.iopoll_qdex = rc;
+ adapter->adapter_attr.rq_cnt =
+ adapter->notification_desc.iopoll_qdex + iopoll_qcnt;
+ }
+ adapter->notification_desc.iopoll_qcnt = iopoll_qcnt;
+ dev_info(&adapter->pdev->dev,
+ "MSI: req queue cnt=%d, intr=%d/poll=%d rep queues!\n",
+ adapter->adapter_attr.rq_cnt,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qcnt);
+
+ adapter->notification_desc.int_rqs =
+ kcalloc(adapter->notification_desc.iopoll_qdex,
+ sizeof(struct leapraid_int_rq),
+ GFP_KERNEL);
+ if (!adapter->notification_desc.int_rqs) {
+ dev_err(&adapter->pdev->dev,
+ "MSI: allocate %d interrupt reply queues failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.adapter =
+ adapter;
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.msix_idx =
+ i + adapter->notification_desc.iopoll_qdex;
+ atomic_set(
+ &adapter->notification_desc.blk_mq_poll_rqs[i].rq.busy,
+ 0);
+ snprintf(adapter->notification_desc.blk_mq_poll_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH,
+ "%s%u-MQ-Poll%u", LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+ atomic_set(
+ &adapter->notification_desc.blk_mq_poll_rqs[i].busy,
+ 0);
+ atomic_set(
+ &adapter->notification_desc.blk_mq_poll_rqs[i].pause,
+ 0);
+ }
+
+ adapter->notification_desc.msix_cpu_map_sz = num_online_cpus();
+ adapter->notification_desc.msix_cpu_map =
+ kzalloc(adapter->notification_desc.msix_cpu_map_sz,
+ GFP_KERNEL);
+ if (!adapter->notification_desc.msix_cpu_map)
+ return -ENOMEM;
+ memset(adapter->notification_desc.msix_cpu_map, 0,
+ adapter->notification_desc.msix_cpu_map_sz);
+
+ adapter->notification_desc.msix_enable = true;
+ rc = leapraid_setup_irqs(adapter);
+ if (rc) {
+ leapraid_free_irq(adapter);
+ adapter->notification_desc.msix_enable = false;
+ goto legacy_int1;
+ }
+
+ return 0;
+
+legacy_int1:
+ rc = leapraid_set_legacy_int(adapter);
+
+ return rc;
+}
+
+static int leapraid_set_notification(struct leapraid_adapter *adapter)
+{
+ int rc = 0;
+
+ if (interrupt_mode == 0) {
+ rc = leapraid_set_msix(adapter);
+ if (rc)
+ pr_err("%s enable MSI-X irq failed!\n", __func__);
+ } else if (interrupt_mode == 1) {
+ rc = leapraid_set_msi(adapter);
+ if (rc)
+ pr_err("%s enable MSI irq failed!\n", __func__);
+ } else if (interrupt_mode == 2) {
+ rc = leapraid_set_legacy_int(adapter);
+ if (rc)
+ pr_err("%s enable legacy irq failed!\n", __func__);
+ }
+
+ return rc;
+}
+
+static void leapraid_disable_pcie_and_notification(
+ struct leapraid_adapter *adapter)
+{
+ leapraid_free_irq(adapter);
+ leapraid_disable_pcie(adapter);
+}
+
+int leapraid_set_pcie_and_notification(struct leapraid_adapter *adapter)
+{
+ int rc;
+
+ rc = leapraid_enable_pcie(adapter);
+ if (rc)
+ goto out_fail;
+
+ leapraid_mask_int(adapter);
+
+ rc = leapraid_set_notification(adapter);
+ if (rc)
+ goto out_fail;
+
+ pci_save_state(adapter->pdev);
+
+ return 0;
+
+out_fail:
+ leapraid_disable_pcie_and_notification(adapter);
+ return rc;
+}
+
+void leapraid_disable_controller(struct leapraid_adapter *adapter)
+{
+ if (!adapter->iomem_base)
+ return;
+
+ leapraid_mask_int(adapter);
+
+ adapter->access_ctrl.shost_recovering = true;
+ leapraid_make_adapter_ready(adapter, PART_RESET);
+ adapter->access_ctrl.shost_recovering = false;
+
+ leapraid_disable_pcie_and_notification(adapter);
+}
+
+static int leapraid_adapter_unit_reset(struct leapraid_adapter *adapter)
+{
+ int rc = 0;
+
+ dev_info(&adapter->pdev->dev, "fire unit reset\n");
+ writel(LEAPRAID_FUNC_ADAPTER_UNIT_RESET << LEAPRAID_DB_FUNC_SHIFT,
+ &adapter->iomem_base->db);
+ if (leapraid_db_wait_ack_and_clear_int(adapter))
+ rc = -EFAULT;
+
+ if (!leapraid_wait_adapter_ready(adapter)) {
+ rc = -EFAULT;
+ goto out;
+ }
+out:
+ dev_info(&adapter->pdev->dev, "unit reset: %s\n",
+ ((rc == 0) ? "SUCCESS" : "FAILED"));
+ return rc;
+}
+
+static int leapraid_make_adapter_ready(struct leapraid_adapter *adapter,
+ enum reset_type type)
+{
+ u32 db;
+ int rc;
+ int count;
+
+ if (!leapraid_pci_active(adapter))
+ return 0;
+
+ count = 0;
+ db = leapraid_readl(&adapter->iomem_base->db);
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_RESET) {
+ while ((db & LEAPRAID_DB_MASK) != LEAPRAID_DB_READY) {
+ if (count++ == LEAPRAID_DB_RETRY_COUNT_MAX) {
+ dev_err(&adapter->pdev->dev,
+ "wait adapter ready timeout\n");
+ return -EFAULT;
+ }
+ ssleep(1);
+ db = leapraid_readl(&adapter->iomem_base->db);
+ dev_info(&adapter->pdev->dev,
+ "wait adapter ready, count=%d, db=0x%x\n",
+ count, db);
+ }
+ }
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_READY)
+ return 0;
+
+ if (db & LEAPRAID_DB_USED)
+ goto full_reset;
+
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_FAULT)
+ goto full_reset;
+
+ if (type == FULL_RESET)
+ goto full_reset;
+
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_OPERATIONAL)
+ if (!(leapraid_adapter_unit_reset(adapter)))
+ return 0;
+
+full_reset:
+ rc = leapraid_host_diag_reset(adapter);
+ return rc;
+}
+
+static void leapraid_fw_log_exit(struct leapraid_adapter *adapter)
+{
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return;
+
+ if (adapter->fw_log_desc.fw_log_buffer) {
+ dma_free_coherent(&adapter->pdev->dev,
+ (LEAPRAID_SYS_LOG_BUF_SIZE +
+ LEAPRAID_SYS_LOG_BUF_RESERVE),
+ adapter->fw_log_desc.fw_log_buffer,
+ adapter->fw_log_desc.fw_log_buffer_dma);
+ adapter->fw_log_desc.fw_log_buffer = NULL;
+ }
+}
+
+static int leapraid_fw_log_init(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_log_req adapter_log_req;
+ struct leapraid_adapter_log_rep adapter_log_rep;
+ u16 adapter_status;
+ u64 buf_addr;
+ u32 rc;
+
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return 0;
+
+ if (!adapter->fw_log_desc.fw_log_buffer) {
+ adapter->fw_log_desc.fw_log_buffer =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ (LEAPRAID_SYS_LOG_BUF_SIZE +
+ LEAPRAID_SYS_LOG_BUF_RESERVE),
+ &adapter->fw_log_desc.fw_log_buffer_dma,
+ GFP_KERNEL);
+ if (!adapter->fw_log_desc.fw_log_buffer) {
+ dev_err(&adapter->pdev->dev,
+ "%s: log buf alloc failed.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ memset(&adapter_log_req, 0, sizeof(struct leapraid_adapter_log_req));
+ adapter_log_req.func = LEAPRAID_FUNC_LOGBUF_INIT;
+ buf_addr = adapter->fw_log_desc.fw_log_buffer_dma;
+
+ adapter_log_req.mbox.w[0] =
+ cpu_to_le32((u32)(buf_addr & 0xFFFFFFFF));
+ adapter_log_req.mbox.w[1] =
+ cpu_to_le32((u32)((buf_addr >> 32) & 0xFFFFFFFF));
+ adapter_log_req.mbox.w[2] =
+ cpu_to_le32(LEAPRAID_SYS_LOG_BUF_SIZE);
+ rc = leapraid_handshake_func(adapter,
+ sizeof(struct leapraid_adapter_log_req),
+ (u32 *)&adapter_log_req,
+ sizeof(struct leapraid_adapter_log_rep),
+ (u16 *)&adapter_log_rep);
+ if (rc != 0) {
+ dev_err(&adapter->pdev->dev, "%s: handshake failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ adapter_status = le16_to_cpu(adapter_log_rep.adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "%s: failed!\n", __func__);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static void leapraid_free_host_memory(struct leapraid_adapter *adapter)
+{
+ unsigned int i;
+
+ if (adapter->mem_desc.task_desc) {
+ dma_free_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.task_desc_dma_size,
+ adapter->mem_desc.task_desc,
+ adapter->mem_desc.task_desc_dma);
+ adapter->mem_desc.task_desc = NULL;
+ }
+
+ if (adapter->mem_desc.sense_data) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.io_qd * SCSI_SENSE_BUFFERSIZE,
+ adapter->mem_desc.sense_data,
+ adapter->mem_desc.sense_data_dma);
+ adapter->mem_desc.sense_data = NULL;
+ }
+
+ if (adapter->mem_desc.rep_msg) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd * LEAPRAID_REPLY_SIEZ,
+ adapter->mem_desc.rep_msg,
+ adapter->mem_desc.rep_msg_dma);
+ adapter->mem_desc.rep_msg = NULL;
+ }
+
+ if (adapter->mem_desc.rep_msg_addr) {
+ dma_free_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REP_MSG_ADDR_SIZE,
+ adapter->mem_desc.rep_msg_addr,
+ adapter->mem_desc.rep_msg_addr_dma);
+ adapter->mem_desc.rep_msg_addr = NULL;
+ }
+
+ if (adapter->mem_desc.rep_desc_seg_maint) {
+ for (i = 0; i < adapter->adapter_attr.rep_desc_q_seg_cnt;
+ i++) {
+ if (adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE) *
+ LEAPRAID_REP_DESC_CHUNK_SIZE,
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg,
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg_dma);
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg = NULL;
+ }
+ }
+
+ if (adapter->mem_desc.rep_desc_q_arr) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.rq_cnt *
+ LEAPRAID_REP_RQ_CNT_SIZE,
+ adapter->mem_desc.rep_desc_q_arr,
+ adapter->mem_desc.rep_desc_q_arr_dma);
+ adapter->mem_desc.rep_desc_q_arr = NULL;
+ }
+
+ for (i = 0; i < adapter->adapter_attr.rep_desc_q_seg_cnt; i++)
+ kfree(adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_maint);
+ kfree(adapter->mem_desc.rep_desc_seg_maint);
+ }
+
+ if (adapter->mem_desc.io_tracker) {
+ for (i = 0; i < (unsigned int)adapter->shost->can_queue; i++)
+ leapraid_internal_exit_cmd_priv(
+ adapter,
+ adapter->mem_desc.io_tracker + i);
+ kfree(adapter->mem_desc.io_tracker);
+ adapter->mem_desc.io_tracker = NULL;
+ }
+
+ dma_pool_destroy(adapter->mem_desc.sg_chain_pool);
+}
+
+static inline bool leapraid_is_in_same_4g_seg(dma_addr_t start, u32 size)
+{
+ return (upper_32_bits(start) == upper_32_bits(start + size - 1));
+}
+
+int leapraid_internal_init_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker)
+{
+ io_tracker->chain =
+ dma_pool_alloc(adapter->mem_desc.sg_chain_pool,
+ GFP_KERNEL,
+ &io_tracker->chain_dma);
+
+ if (!io_tracker->chain)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int leapraid_internal_exit_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker)
+{
+ if (io_tracker && io_tracker->chain)
+ dma_pool_free(adapter->mem_desc.sg_chain_pool,
+ io_tracker->chain,
+ io_tracker->chain_dma);
+
+ return 0;
+}
+
+static int leapraid_request_host_memory(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_features *facts =
+ &adapter->adapter_attr.features;
+ u16 rep_desc_q_cnt_allocated;
+ unsigned int i, j;
+ int rc;
+
+ /* sg table size */
+ adapter->shost->sg_tablesize = LEAPRAID_SG_DEPTH;
+ if (reset_devices)
+ adapter->shost->sg_tablesize =
+ LEAPRAID_KDUMP_MIN_PHYS_SEGMENTS;
+ /* high priority cmds queue depth */
+ adapter->dynamic_task_desc.hp_cmd_qd = facts->hp_slot;
+ adapter->dynamic_task_desc.hp_cmd_qd = LEAPRAID_FIXED_HP_CMDS;
+ /* internal cmds queue depth */
+ adapter->dynamic_task_desc.inter_cmd_qd = LEAPRAID_FIXED_INTER_CMDS;
+ /* adapter cmds total queue depth */
+ if (reset_devices)
+ adapter->adapter_attr.adapter_total_qd =
+ LEAPRAID_DEFAULT_CMD_QD_OFFSET +
+ adapter->dynamic_task_desc.inter_cmd_qd +
+ adapter->dynamic_task_desc.hp_cmd_qd;
+ else
+ adapter->adapter_attr.adapter_total_qd = facts->req_slot +
+ adapter->dynamic_task_desc.hp_cmd_qd;
+ /* reply message queue depth */
+ adapter->adapter_attr.rep_msg_qd =
+ adapter->adapter_attr.adapter_total_qd +
+ LEAPRAID_DEFAULT_CMD_QD_OFFSET;
+ /* reply descriptor queue depth */
+ adapter->adapter_attr.rep_desc_qd =
+ round_up(adapter->adapter_attr.adapter_total_qd +
+ adapter->adapter_attr.rep_msg_qd +
+ LEAPRAID_TASKID_OFFSET_CTRL_CMD,
+ LEAPRAID_REPLY_QD_ALIGNMENT);
+ /* scsi cmd io depth */
+ adapter->adapter_attr.io_qd =
+ adapter->adapter_attr.adapter_total_qd -
+ adapter->dynamic_task_desc.hp_cmd_qd -
+ adapter->dynamic_task_desc.inter_cmd_qd;
+ /* scsi host can queue */
+ adapter->shost->can_queue = adapter->adapter_attr.io_qd -
+ LEAPRAID_TASKID_OFFSET_SCSIIO_CMD;
+ adapter->driver_cmds.ctl_cmd.taskid = adapter->shost->can_queue +
+ LEAPRAID_TASKID_OFFSET_CTRL_CMD;
+ adapter->driver_cmds.driver_scsiio_cmd.taskid =
+ adapter->shost->can_queue +
+ LEAPRAID_TASKID_OFFSET_SCSIIO_CMD;
+
+ /* allocate task descriptor */
+try_again:
+ adapter->adapter_attr.task_desc_dma_size =
+ (adapter->adapter_attr.adapter_total_qd +
+ LEAPRAID_TASKID_OFFSET_CTRL_CMD) *
+ LEAPRAID_REQUEST_SIZE;
+ adapter->mem_desc.task_desc =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.task_desc_dma_size,
+ &adapter->mem_desc.task_desc_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.task_desc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate task descriptor DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* allocate chain message pool */
+ adapter->mem_desc.sg_chain_pool_size =
+ LEAPRAID_DEFAULT_CHAINS_PER_IO * LEAPRAID_CHAIN_SEG_SIZE;
+ adapter->mem_desc.sg_chain_pool =
+ dma_pool_create("leapraid chain pool",
+ &adapter->pdev->dev,
+ adapter->mem_desc.sg_chain_pool_size, 16, 0);
+ if (!adapter->mem_desc.sg_chain_pool) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate chain message DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* allocate io tracker to ref scsi io */
+ adapter->mem_desc.io_tracker =
+ kcalloc(adapter->shost->can_queue,
+ sizeof(struct leapraid_io_req_tracker),
+ GFP_KERNEL);
+ if (!adapter->mem_desc.io_tracker) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; (int)i < adapter->shost->can_queue; i++) {
+ rc = leapraid_internal_init_cmd_priv(
+ adapter,
+ adapter->mem_desc.io_tracker + i);
+ if (rc)
+ goto out;
+ }
+
+
+ adapter->dynamic_task_desc.hp_taskid =
+ adapter->adapter_attr.io_qd +
+ LEAPRAID_HP_TASKID_OFFSET_CTL_CMD;
+ /* allocate static hp taskid */
+ adapter->driver_cmds.ctl_cmd.hp_taskid =
+ adapter->dynamic_task_desc.hp_taskid;
+ adapter->driver_cmds.tm_cmd.hp_taskid =
+ adapter->dynamic_task_desc.hp_taskid +
+ LEAPRAID_HP_TASKID_OFFSET_TM_CMD;
+
+ adapter->dynamic_task_desc.inter_taskid =
+ adapter->dynamic_task_desc.hp_taskid +
+ adapter->dynamic_task_desc.hp_cmd_qd;
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid;
+ adapter->driver_cmds.cfg_op_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_CFG_OP_CMD;
+ adapter->driver_cmds.transport_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_TRANSPORT_CMD;
+ adapter->driver_cmds.timestamp_sync_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_TIMESTAMP_SYNC_CMD;
+ adapter->driver_cmds.raid_action_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_RAID_ACTION_CMD;
+ adapter->driver_cmds.enc_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_ENC_CMD;
+ adapter->driver_cmds.notify_event_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_NOTIFY_EVENT_CMD;
+ dev_info(&adapter->pdev->dev, "queue depth:\n");
+ dev_info(&adapter->pdev->dev, " host->can_queue: %d\n",
+ adapter->shost->can_queue);
+ dev_info(&adapter->pdev->dev, " io_qd: %d\n",
+ adapter->adapter_attr.io_qd);
+ dev_info(&adapter->pdev->dev, " hpr_cmd_qd: %d\n",
+ adapter->dynamic_task_desc.hp_cmd_qd);
+ dev_info(&adapter->pdev->dev, " inter_cmd_qd: %d\n",
+ adapter->dynamic_task_desc.inter_cmd_qd);
+ dev_info(&adapter->pdev->dev, " adapter_total_qd: %d\n",
+ adapter->adapter_attr.adapter_total_qd);
+
+ dev_info(&adapter->pdev->dev, "taskid range:\n");
+ dev_info(&adapter->pdev->dev,
+ " adapter->dynamic_task_desc.hp_taskid: %d\n",
+ adapter->dynamic_task_desc.hp_taskid);
+ dev_info(&adapter->pdev->dev,
+ " adapter->dynamic_task_desc.inter_taskid: %d\n",
+ adapter->dynamic_task_desc.inter_taskid);
+
+ /*
+ * allocate sense dma, driver maintain
+ * need in same 4GB segment
+ */
+ adapter->mem_desc.sense_data =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.io_qd * SCSI_SENSE_BUFFERSIZE,
+ &adapter->mem_desc.sense_data_dma, GFP_KERNEL);
+ if (!adapter->mem_desc.sense_data) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate sense data DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (!leapraid_is_in_same_4g_seg(adapter->mem_desc.sense_data_dma,
+ adapter->adapter_attr.io_qd *
+ SCSI_SENSE_BUFFERSIZE)) {
+ dev_warn(&adapter->pdev->dev,
+ "try 32 bit dma due to sense data is not in same 4g!\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* reply frame, need in same 4GB segment */
+ adapter->mem_desc.rep_msg =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REPLY_SIEZ,
+ &adapter->mem_desc.rep_msg_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_msg) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply message DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (!leapraid_is_in_same_4g_seg(adapter->mem_desc.rep_msg_dma,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REPLY_SIEZ)) {
+ dev_warn(&adapter->pdev->dev,
+ "use 32 bit dma due to rep msg is not in same 4g!\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* address of reply frame */
+ adapter->mem_desc.rep_msg_addr =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REP_MSG_ADDR_SIZE,
+ &adapter->mem_desc.rep_msg_addr_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_msg_addr) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply message address DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ adapter->adapter_attr.rep_desc_q_seg_cnt =
+ DIV_ROUND_UP(adapter->adapter_attr.rq_cnt,
+ LEAPRAID_REP_DESC_CHUNK_SIZE);
+ adapter->mem_desc.rep_desc_seg_maint =
+ kcalloc(adapter->adapter_attr.rep_desc_q_seg_cnt,
+ sizeof(struct leapraid_rep_desc_seg_maint),
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_seg_maint) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rep_desc_q_cnt_allocated = 0;
+ for (i = 0; i < adapter->adapter_attr.rep_desc_q_seg_cnt; i++) {
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_maint =
+ kcalloc(LEAPRAID_REP_DESC_CHUNK_SIZE,
+ sizeof(struct leapraid_rep_desc_maint),
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_maint) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE) *
+ LEAPRAID_REP_DESC_CHUNK_SIZE,
+ &adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply descriptor segment DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (j = 0; j < LEAPRAID_REP_DESC_CHUNK_SIZE; j++) {
+ if (rep_desc_q_cnt_allocated >=
+ adapter->adapter_attr.rq_cnt)
+ break;
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_maint[j]
+ .rep_desc =
+ (void *)((u8 *)(
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_seg) +
+ j *
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE));
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_maint[j]
+ .rep_desc_dma =
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_seg_dma +
+ j *
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE);
+ rep_desc_q_cnt_allocated++;
+ }
+ }
+
+ if (!reset_devices) {
+ adapter->mem_desc.rep_desc_q_arr =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.rq_cnt *
+ LEAPRAID_REP_RQ_CNT_SIZE,
+ &adapter->mem_desc.rep_desc_q_arr_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_q_arr) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply descriptor queue array DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ if (rc == -EAGAIN) {
+ leapraid_free_host_memory(adapter);
+ adapter->adapter_attr.use_32_dma_mask = true;
+ rc = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to set 32 DMA mask\n");
+ return rc;
+ }
+ goto try_again;
+ }
+ return rc;
+}
+
+static int leapraid_alloc_dev_topo_bitmaps(struct leapraid_adapter *adapter)
+{
+ adapter->dev_topo.pd_hdls_sz =
+ adapter->adapter_attr.features.max_dev_handle /
+ LEAPRAID_BITS_PER_BYTE;
+ if (adapter->adapter_attr.features.max_dev_handle %
+ LEAPRAID_BITS_PER_BYTE)
+ adapter->dev_topo.pd_hdls_sz++;
+ adapter->dev_topo.pd_hdls =
+ kzalloc(adapter->dev_topo.pd_hdls_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.pd_hdls)
+ return -ENOMEM;
+
+ adapter->dev_topo.blocking_hdls =
+ kzalloc(adapter->dev_topo.pd_hdls_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.blocking_hdls)
+ return -ENOMEM;
+
+ adapter->dev_topo.pending_dev_add_sz =
+ adapter->adapter_attr.features.max_dev_handle /
+ LEAPRAID_BITS_PER_BYTE;
+ if (adapter->adapter_attr.features.max_dev_handle %
+ LEAPRAID_BITS_PER_BYTE)
+ adapter->dev_topo.pending_dev_add_sz++;
+ adapter->dev_topo.pending_dev_add =
+ kzalloc(adapter->dev_topo.pending_dev_add_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.pending_dev_add)
+ return -ENOMEM;
+
+ adapter->dev_topo.dev_removing_sz =
+ adapter->dev_topo.pending_dev_add_sz;
+ adapter->dev_topo.dev_removing =
+ kzalloc(adapter->dev_topo.dev_removing_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.dev_removing)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void leapraid_free_dev_topo_bitmaps(struct leapraid_adapter *adapter)
+{
+ kfree(adapter->dev_topo.pd_hdls);
+ kfree(adapter->dev_topo.blocking_hdls);
+ kfree(adapter->dev_topo.pending_dev_add);
+ kfree(adapter->dev_topo.dev_removing);
+}
+
+static int leapraid_init_driver_cmds(struct leapraid_adapter *adapter)
+{
+ u32 buffer_size = 0;
+ void *buffer;
+
+ INIT_LIST_HEAD(&adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.scan_dev_cmd.cb_idx = LEAPRAID_SCAN_DEV_CB_IDX;
+ list_add_tail(&adapter->driver_cmds.scan_dev_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.cfg_op_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.cfg_op_cmd.cb_idx = LEAPRAID_CONFIG_CB_IDX;
+ mutex_init(&adapter->driver_cmds.cfg_op_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.cfg_op_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.transport_cmd.cb_idx = LEAPRAID_TRANSPORT_CB_IDX;
+ mutex_init(&adapter->driver_cmds.transport_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.transport_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.timestamp_sync_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.timestamp_sync_cmd.cb_idx =
+ LEAPRAID_TIMESTAMP_SYNC_CB_IDX;
+ mutex_init(&adapter->driver_cmds.timestamp_sync_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.timestamp_sync_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.raid_action_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.raid_action_cmd.cb_idx =
+ LEAPRAID_RAID_ACTION_CB_IDX;
+ mutex_init(&adapter->driver_cmds.raid_action_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.raid_action_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.driver_scsiio_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.driver_scsiio_cmd.cb_idx =
+ LEAPRAID_DRIVER_SCSIIO_CB_IDX;
+ mutex_init(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.driver_scsiio_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ buffer_size = sizeof(struct scsi_cmnd) +
+ sizeof(struct leapraid_io_req_tracker) +
+ SCSI_SENSE_BUFFERSIZE +
+ sizeof(struct scatterlist);
+ buffer_size += 32;
+ buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ adapter->driver_cmds.internal_scmd = buffer;
+ buffer = (void *)((u8 *)buffer +
+ sizeof(struct scsi_cmnd) +
+ sizeof(struct leapraid_io_req_tracker));
+ adapter->driver_cmds.internal_scmd->sense_buffer =
+ (unsigned char *)buffer;
+ buffer = (void *)((u8 *)buffer + SCSI_SENSE_BUFFERSIZE);
+ adapter->driver_cmds.internal_scmd->sdb.table.sgl =
+ (struct scatterlist *)buffer;
+ buffer = (void *)((u8 *)buffer + sizeof(struct scatterlist));
+ adapter->driver_cmds.internal_scmd->cmnd = buffer;
+ adapter->driver_cmds.internal_scmd->host_scribble =
+ (unsigned char *)(adapter->driver_cmds.internal_scmd + 1);
+
+ adapter->driver_cmds.enc_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.enc_cmd.cb_idx = LEAPRAID_ENC_CB_IDX;
+ mutex_init(&adapter->driver_cmds.enc_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.enc_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.notify_event_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.notify_event_cmd.cb_idx =
+ LEAPRAID_NOTIFY_EVENT_CB_IDX;
+ mutex_init(&adapter->driver_cmds.notify_event_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.notify_event_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.ctl_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.ctl_cmd.cb_idx = LEAPRAID_CTL_CB_IDX;
+ mutex_init(&adapter->driver_cmds.ctl_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.ctl_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.tm_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.tm_cmd.cb_idx = LEAPRAID_TM_CB_IDX;
+ mutex_init(&adapter->driver_cmds.tm_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.tm_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ return 0;
+}
+
+static void leapraid_unmask_evts(struct leapraid_adapter *adapter, u16 evt)
+{
+ if (evt >= LEAPRAID_MAX_EVENT_NUM)
+ return;
+
+ clear_bit(evt, (unsigned long *)adapter->fw_evt_s.leapraid_evt_masks);
+}
+
+static void leapraid_init_event_mask(struct leapraid_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < LEAPRAID_EVT_MASK_COUNT; i++)
+ adapter->fw_evt_s.leapraid_evt_masks[i] = -1;
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_DISCOVERY);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_DEV_STATUS_CHANGE);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_IR_CHANGE);
+}
+
+static void leapraid_prepare_adp_init_req(
+ struct leapraid_adapter *adapter,
+ struct leapraid_adapter_init_req *init_req)
+{
+ ktime_t cur_time;
+ int i;
+ u32 reply_post_free_ary_sz;
+
+ memset(init_req, 0, sizeof(struct leapraid_adapter_init_req));
+ init_req->func = LEAPRAID_FUNC_ADAPTER_INIT;
+ init_req->who_init = LEAPRAID_WHOINIT_LINUX_DRIVER;
+ init_req->msg_ver = cpu_to_le16(0x0100);
+ init_req->header_ver = cpu_to_le16(0x0000);
+
+ init_req->driver_ver = cpu_to_le32((LEAPRAID_MAJOR_VERSION << 24) |
+ (LEAPRAID_MINOR_VERSION << 16) |
+ (LEAPRAID_BUILD_VERSION << 8) |
+ LEAPRAID_RELEASE_VERSION);
+ if (adapter->notification_desc.msix_enable)
+ init_req->host_msix_vectors = adapter->adapter_attr.rq_cnt;
+
+ init_req->req_frame_size =
+ cpu_to_le16(LEAPRAID_REQUEST_SIZE / LEAPRAID_DWORDS_BYTE_SIZE);
+ init_req->rep_desc_qd =
+ cpu_to_le16(adapter->adapter_attr.rep_desc_qd);
+ init_req->rep_msg_qd =
+ cpu_to_le16(adapter->adapter_attr.rep_msg_qd);
+ init_req->sense_buffer_add_high =
+ cpu_to_le32((u64)adapter->mem_desc.sense_data_dma >> 32);
+ init_req->rep_msg_dma_high =
+ cpu_to_le32((u64)adapter->mem_desc.rep_msg_dma >> 32);
+ init_req->task_desc_base_addr =
+ cpu_to_le64((u64)adapter->mem_desc.task_desc_dma);
+ init_req->rep_msg_addr_dma =
+ cpu_to_le64((u64)adapter->mem_desc.rep_msg_addr_dma);
+ if (!reset_devices) {
+ reply_post_free_ary_sz =
+ adapter->adapter_attr.rq_cnt * LEAPRAID_REP_RQ_CNT_SIZE;
+ memset(adapter->mem_desc.rep_desc_q_arr, 0,
+ reply_post_free_ary_sz);
+
+ for (i = 0; i < adapter->adapter_attr.rq_cnt; i++) {
+ adapter->mem_desc
+ .rep_desc_q_arr[i]
+ .rep_desc_base_addr =
+ cpu_to_le64 (
+ (u64)adapter->mem_desc
+ .rep_desc_seg_maint[i /
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc_maint[i %
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc_dma);
+ }
+
+ init_req->msg_flg =
+ LEAPRAID_ADAPTER_INIT_MSGFLG_RDPQ_ARRAY_MODE;
+ init_req->rep_desc_q_arr_addr =
+ cpu_to_le64((u64)adapter->mem_desc.rep_desc_q_arr_dma);
+ } else {
+ init_req->rep_desc_q_arr_addr =
+ cpu_to_le64((u64)adapter->mem_desc
+ .rep_desc_seg_maint[0]
+ .rep_desc_maint[0]
+ .rep_desc_dma);
+ }
+ cur_time = ktime_get_real();
+ init_req->time_stamp = cpu_to_le64(ktime_to_ms(cur_time));
+}
+
+static int leapraid_send_adapter_init(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_init_req init_req;
+ struct leapraid_adapter_init_rep init_rep;
+ u16 adapter_status;
+ int rc = 0;
+
+ leapraid_prepare_adp_init_req(adapter, &init_req);
+
+ rc = leapraid_handshake_func(adapter,
+ sizeof(struct leapraid_adapter_init_req),
+ (u32 *)&init_req,
+ sizeof(struct leapraid_adapter_init_rep),
+ (u16 *)&init_rep);
+ if (rc != 0) {
+ dev_err(&adapter->pdev->dev, "%s: handshake failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ adapter_status =
+ le16_to_cpu(init_rep.adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "%s: failed\n", __func__);
+ rc = -EIO;
+ }
+
+ adapter->timestamp_sync_cnt = 0;
+ return rc;
+}
+
+static int leapraid_cfg_pages(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_io_unit_page1 *sas_io_unit_page1 = NULL;
+ struct leapraid_bios_page3 bios_page3;
+ struct leapraid_bios_page2 bios_page2;
+ int rc = 0;
+ int sz;
+
+ rc = leapraid_op_config_page(adapter, &bios_page3, cfgp1,
+ cfgp2, GET_BIOS_PG3);
+ if (rc)
+ return rc;
+
+ rc = leapraid_op_config_page(adapter, &bios_page2, cfgp1,
+ cfgp2, GET_BIOS_PG2);
+ if (rc)
+ return rc;
+
+ adapter->adapter_attr.bios_version =
+ le32_to_cpu(bios_page3.bios_version);
+ adapter->adapter_attr.wideport_max_queue_depth =
+ LEAPRAID_SAS_QUEUE_DEPTH;
+ adapter->adapter_attr.narrowport_max_queue_depth =
+ LEAPRAID_SAS_QUEUE_DEPTH;
+ adapter->adapter_attr.sata_max_queue_depth =
+ LEAPRAID_SATA_QUEUE_DEPTH;
+
+ adapter->boot_devs.requested_boot_dev.form =
+ bios_page2.requested_boot_dev_form;
+ memcpy((void *)adapter->boot_devs.requested_boot_dev.pg_dev,
+ (void *)&bios_page2.requested_boot_dev,
+ LEAPRAID_BOOT_DEV_SIZE);
+ adapter->boot_devs.requested_alt_boot_dev.form =
+ bios_page2.requested_alt_boot_dev_form;
+ memcpy((void *)adapter->boot_devs.requested_alt_boot_dev.pg_dev,
+ (void *)&bios_page2.requested_alt_boot_dev,
+ LEAPRAID_BOOT_DEV_SIZE);
+ adapter->boot_devs.current_boot_dev.form =
+ bios_page2.current_boot_dev_form;
+ memcpy((void *)adapter->boot_devs.current_boot_dev.pg_dev,
+ (void *)&bios_page2.current_boot_dev,
+ LEAPRAID_BOOT_DEV_SIZE);
+
+ sz = offsetof(struct leapraid_sas_io_unit_page1, phy_info);
+ sas_io_unit_page1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_page1) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ cfgp1.size = sz;
+
+ rc = leapraid_op_config_page(adapter, sas_io_unit_page1, cfgp1,
+ cfgp2, GET_SAS_IOUNIT_PG1);
+ if (rc)
+ goto out;
+
+ if (le16_to_cpu(sas_io_unit_page1->wideport_max_queue_depth))
+ adapter->adapter_attr.wideport_max_queue_depth =
+ le16_to_cpu(
+ sas_io_unit_page1->wideport_max_queue_depth);
+
+ if (le16_to_cpu(sas_io_unit_page1->narrowport_max_queue_depth))
+ adapter->adapter_attr.narrowport_max_queue_depth =
+ le16_to_cpu(
+ sas_io_unit_page1->narrowport_max_queue_depth);
+
+ if (sas_io_unit_page1->sata_max_queue_depth)
+ adapter->adapter_attr.sata_max_queue_depth =
+ sas_io_unit_page1->sata_max_queue_depth;
+
+out:
+ kfree(sas_io_unit_page1);
+ dev_info(&adapter->pdev->dev,
+ "max wp qd=%d, max np qd=%d, max sata qd=%d\n",
+ adapter->adapter_attr.wideport_max_queue_depth,
+ adapter->adapter_attr.narrowport_max_queue_depth,
+ adapter->adapter_attr.sata_max_queue_depth);
+ return rc;
+}
+
+static int leapraid_evt_notify(struct leapraid_adapter *adapter)
+{
+ struct leapraid_evt_notify_req *evt_notify_req;
+ int rc = 0;
+ int i;
+
+ mutex_lock(&adapter->driver_cmds.notify_event_cmd.mutex);
+ adapter->driver_cmds.notify_event_cmd.status = LEAPRAID_CMD_PENDING;
+ evt_notify_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.notify_event_cmd.inter_taskid);
+ memset(evt_notify_req, 0, sizeof(struct leapraid_evt_notify_req));
+ evt_notify_req->func = LEAPRAID_FUNC_EVENT_NOTIFY;
+ for (i = 0; i < LEAPRAID_EVT_MASK_COUNT; i++)
+ evt_notify_req->evt_masks[i] =
+ cpu_to_le32(adapter->fw_evt_s.leapraid_evt_masks[i]);
+ init_completion(&adapter->driver_cmds.notify_event_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.notify_event_cmd.inter_taskid);
+ wait_for_completion_timeout(
+ &adapter->driver_cmds.notify_event_cmd.done,
+ LEAPRAID_NOTIFY_EVENT_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.notify_event_cmd.status &
+ LEAPRAID_CMD_DONE))
+ if (adapter->driver_cmds.notify_event_cmd.status &
+ LEAPRAID_CMD_RESET)
+ rc = -EFAULT;
+ adapter->driver_cmds.notify_event_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.notify_event_cmd.mutex);
+
+ return rc;
+}
+
+int leapraid_scan_dev(struct leapraid_adapter *adapter, bool async_scan_dev)
+{
+ struct leapraid_scan_dev_req *scan_dev_req;
+ struct leapraid_scan_dev_rep *scan_dev_rep;
+ u16 adapter_status;
+ int rc = 0;
+
+ dev_info(&adapter->pdev->dev,
+ "send device scan, async_scan_dev=%d!\n", async_scan_dev);
+
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_PENDING;
+ adapter->driver_cmds.scan_dev_cmd.async_scan_dev = async_scan_dev;
+ scan_dev_req = leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid);
+ memset(scan_dev_req, 0, sizeof(struct leapraid_scan_dev_req));
+ scan_dev_req->func = LEAPRAID_FUNC_SCAN_DEV;
+
+ if (async_scan_dev) {
+ adapter->scan_dev_desc.first_scan_dev_fired = true;
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid);
+ return 0;
+ }
+
+ init_completion(&adapter->driver_cmds.scan_dev_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.scan_dev_cmd.done,
+ LEAPRAID_SCAN_DEV_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.scan_dev_cmd.status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev, "device scan timeout!\n");
+ if (adapter->driver_cmds.scan_dev_cmd.status &
+ LEAPRAID_CMD_RESET)
+ rc = -EFAULT;
+ else
+ rc = -ETIME;
+ goto out;
+ }
+
+ scan_dev_rep = (void *)(&adapter->driver_cmds.scan_dev_cmd.reply);
+ adapter_status =
+ le16_to_cpu(scan_dev_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "device scan failure!\n");
+ rc = -EFAULT;
+ goto out;
+ }
+
+out:
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_NOT_USED;
+ dev_info(&adapter->pdev->dev,
+ "device scan %s\n", ((rc == 0) ? "SUCCESS" : "FAILED"));
+ return rc;
+}
+
+static void leapraid_init_task_tracker(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+ u16 taskid;
+ int i;
+
+ spin_lock_irqsave(&adapter->dynamic_task_desc.task_lock, flags);
+ taskid = 1;
+ for (i = 0; i < adapter->shost->can_queue; i++, taskid++) {
+ adapter->mem_desc.io_tracker[i].taskid = taskid;
+ adapter->mem_desc.io_tracker[i].scmd = NULL;
+ }
+
+ spin_unlock_irqrestore(&adapter->dynamic_task_desc.task_lock, flags);
+}
+
+static void leapraid_init_rep_msg_addr(struct leapraid_adapter *adapter)
+{
+ u32 reply_address;
+ unsigned int i;
+
+ for (i = 0, reply_address = (u32)adapter->mem_desc.rep_msg_dma;
+ i < adapter->adapter_attr.rep_msg_qd;
+ i++, reply_address += LEAPRAID_REPLY_SIEZ) {
+ adapter->mem_desc.rep_msg_addr[i] = cpu_to_le32(reply_address);
+ }
+}
+
+static void init_rep_desc(struct leapraid_rq *rq, int index,
+ union leapraid_rep_desc_union *reply_post_free_contig)
+{
+ struct leapraid_adapter *adapter = rq->adapter;
+ unsigned int i;
+
+ if (!reset_devices)
+ rq->rep_desc =
+ adapter->mem_desc
+ .rep_desc_seg_maint[index /
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc_maint[index %
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc;
+ else
+ rq->rep_desc = reply_post_free_contig;
+
+ rq->rep_post_host_idx = 0;
+ for (i = 0; i < adapter->adapter_attr.rep_desc_qd; i++)
+ rq->rep_desc[i].words = cpu_to_le64(ULLONG_MAX);
+}
+
+static void leapraid_init_rep_desc(struct leapraid_adapter *adapter)
+{
+ union leapraid_rep_desc_union *reply_post_free_contig;
+ struct leapraid_int_rq *int_rq;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ unsigned int i;
+ int index;
+
+ index = 0;
+ reply_post_free_contig = adapter->mem_desc
+ .rep_desc_seg_maint[0]
+ .rep_desc_maint[0]
+ .rep_desc;
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ init_rep_desc(&int_rq->rq, index, reply_post_free_contig);
+ if (!reset_devices)
+ index++;
+ else
+ reply_post_free_contig +=
+ adapter->adapter_attr.rep_desc_qd;
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ blk_mq_poll_rq = &adapter->notification_desc.blk_mq_poll_rqs[i];
+ init_rep_desc(&blk_mq_poll_rq->rq,
+ index, reply_post_free_contig);
+ if (!reset_devices)
+ index++;
+ else
+ reply_post_free_contig +=
+ adapter->adapter_attr.rep_desc_qd;
+ }
+}
+
+static void leapraid_init_bar_idx_regs(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ unsigned int i, j;
+
+ adapter->rep_msg_host_idx = adapter->adapter_attr.rep_msg_qd - 1;
+ writel(adapter->rep_msg_host_idx,
+ &adapter->iomem_base->rep_msg_host_idx);
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ for (j = 0; j < REP_POST_HOST_IDX_REG_CNT; j++)
+ writel((int_rq->rq.msix_idx & 7) <<
+ LEAPRAID_RPHI_MSIX_IDX_SHIFT,
+ &adapter->iomem_base->rep_post_reg_idx[j].idx);
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ blk_mq_poll_rq =
+ &adapter->notification_desc.blk_mq_poll_rqs[i];
+ for (j = 0; j < REP_POST_HOST_IDX_REG_CNT; j++)
+ writel((blk_mq_poll_rq->rq.msix_idx & 7) <<
+ LEAPRAID_RPHI_MSIX_IDX_SHIFT,
+ &adapter->iomem_base->rep_post_reg_idx[j].idx);
+ }
+}
+
+static int leapraid_make_adapter_available(struct leapraid_adapter *adapter)
+{
+ int rc = 0;
+
+ leapraid_init_task_tracker(adapter);
+ leapraid_init_rep_msg_addr(adapter);
+
+ if (adapter->scan_dev_desc.driver_loading)
+ leapraid_configure_reply_queue_affinity(adapter);
+
+ leapraid_init_rep_desc(adapter);
+ rc = leapraid_send_adapter_init(adapter);
+ if (rc)
+ return rc;
+
+ leapraid_init_bar_idx_regs(adapter);
+ leapraid_unmask_int(adapter);
+ rc = leapraid_cfg_pages(adapter);
+ if (rc)
+ return rc;
+
+ rc = leapraid_evt_notify(adapter);
+ if (rc)
+ return rc;
+
+ if (!adapter->access_ctrl.shost_recovering) {
+ adapter->scan_dev_desc.wait_scan_dev_done = true;
+ return 0;
+ }
+
+ rc = leapraid_scan_dev(adapter, false);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+int leapraid_ctrl_init(struct leapraid_adapter *adapter)
+{
+ u32 cap;
+ int rc = 0;
+
+ rc = leapraid_set_pcie_and_notification(adapter);
+ if (rc)
+ goto out_free_resources;
+
+ pci_set_drvdata(adapter->pdev, adapter->shost);
+
+ pcie_capability_read_dword(adapter->pdev, PCI_EXP_DEVCAP, &cap);
+
+ if (cap & PCI_EXP_DEVCAP_EXT_TAG) {
+ pcie_capability_set_word(adapter->pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_EXT_TAG);
+ }
+
+ rc = leapraid_make_adapter_ready(adapter, PART_RESET);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "make adapter ready failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_get_adapter_features(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "get adapter feature failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_fw_log_init(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "fw log init failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_request_host_memory(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "request host memory failure\n");
+ goto out_free_resources;
+ }
+
+ init_waitqueue_head(&adapter->reset_desc.reset_wait_queue);
+
+ rc = leapraid_alloc_dev_topo_bitmaps(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "alloc topo bitmaps failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_init_driver_cmds(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "init driver cmds failure\n");
+ goto out_free_resources;
+ }
+
+ leapraid_init_event_mask(adapter);
+
+ rc = leapraid_make_adapter_available(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "make adapter available failure\n");
+ goto out_free_resources;
+ }
+ return 0;
+
+out_free_resources:
+ adapter->access_ctrl.host_removing = true;
+ leapraid_fw_log_exit(adapter);
+ leapraid_disable_controller(adapter);
+ leapraid_free_host_memory(adapter);
+ leapraid_free_dev_topo_bitmaps(adapter);
+ pci_set_drvdata(adapter->pdev, NULL);
+ return rc;
+}
+
+void leapraid_remove_ctrl(struct leapraid_adapter *adapter)
+{
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_fw_log_exit(adapter);
+ leapraid_disable_controller(adapter);
+ leapraid_free_host_memory(adapter);
+ leapraid_free_dev_topo_bitmaps(adapter);
+ leapraid_free_enc_list(adapter);
+ pci_set_drvdata(adapter->pdev, NULL);
+}
+
+void leapraid_free_internal_scsi_cmd(struct leapraid_adapter *adapter)
+{
+ mutex_lock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ kfree(adapter->driver_cmds.internal_scmd);
+ adapter->driver_cmds.internal_scmd = NULL;
+ mutex_unlock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+}
diff --git a/drivers/scsi/leapraid/leapraid_func.h b/drivers/scsi/leapraid/leapraid_func.h
new file mode 100644
index 000000000000..9f42763bda72
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_func.h
@@ -0,0 +1,1423 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#ifndef LEAPRAID_FUNC_H_INCLUDED
+#define LEAPRAID_FUNC_H_INCLUDED
+
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_sas.h>
+
+#include "leapraid.h"
+
+/* some requset and reply buffer size */
+#define LEAPRAID_REQUEST_SIZE 128
+#define LEAPRAID_REPLY_SIEZ 128
+#define LEAPRAID_CHAIN_SEG_SIZE 128
+#define LEAPRAID_MAX_SGES_IN_CHAIN 7
+#define LEAPRAID_DEFAULT_CHAINS_PER_IO 19
+#define LEAPRAID_DEFAULT_DIX_CHAINS_PER_IO \
+ (2 * LEAPRAID_DEFAULT_CHAINS_PER_IO) /* TODO DIX */
+#define LEAPRAID_IEEE_SGE64_ENTRY_SIZE 16
+#define LEAPRAID_REP_DESC_CHUNK_SIZE 16
+#define LEAPRAID_REP_DESC_ENTRY_SIZE 8
+#define LEAPRAID_REP_MSG_ADDR_SIZE 4
+#define LEAPRAID_REP_RQ_CNT_SIZE 16
+
+#define LEAPRAID_SYS_LOG_BUF_SIZE 0x200000
+#define LEAPRAID_SYS_LOG_BUF_RESERVE 0x1000
+
+/* Driver version and name */
+#define LEAPRAID_DRIVER_NAME "LeapRaid"
+#define LEAPRAID_NAME_LENGTH 48
+#define LEAPRAID_AUTHOR "LeapIO Inc."
+#define LEAPRAID_DESCRIPTION "LeapRaid Driver"
+#define LEAPRAID_DRIVER_VERSION "2.00.00.05"
+#define LEAPRAID_MAJOR_VERSION 2
+#define LEAPRAID_MINOR_VERSION 00
+#define LEAPRAID_BUILD_VERSION 00
+#define LEAPRAID_RELEASE_VERSION 05
+
+/* Device ID */
+#define LEAPRAID_VENDOR_ID 0xD405
+#define LEAPRAID_DEVID_HBA 0x8200
+#define LEAPRAID_DEVID_RAID 0x8201
+
+#define LEAPRAID_PCI_VENDOR_ID_MASK 0xFFFF
+
+ /* RAID virtual channel ID */
+#define RAID_CHANNEL 1
+
+/* Scatter/Gather (SG) segment limits */
+#define LEAPRAID_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE
+
+#define LEAPRAID_KDUMP_MIN_PHYS_SEGMENTS 32
+#define LEAPRAID_SG_DEPTH LEAPRAID_MAX_PHYS_SEGMENTS
+
+/* firmware / config page operations */
+#define LEAPRAID_SET_PARAMETER_SYNC_TIMESTAMP 0x81
+#define LEAPRAID_CFG_REQ_RETRY_TIMES 2
+
+/* Hardware access helpers*/
+#define leapraid_readl(addr) readl(addr)
+#define leapraid_check_reset(status) \
+ (!((status) & LEAPRAID_CMD_RESET))
+
+/* Polling intervals */
+#define LEAPRAID_PCIE_LOG_POLLING_INTERVAL 1
+#define LEAPRAID_FAULT_POLLING_INTERVAL 1000
+#define LEAPRAID_TIMESTAMP_SYNC_INTERVAL 900
+#define LEAPRAID_SMART_POLLING_INTERVAL (300 * 1000)
+
+/* init mask */
+#define LEAPRAID_RESET_IRQ_MASK 0x40000000
+#define LEAPRAID_REPLY_INT_MASK 0x00000008
+#define LEAPRAID_TO_SYS_DB_MASK 0x00000001
+
+/* queue depth */
+#define LEAPRAID_SATA_QUEUE_DEPTH 32
+#define LEAPRAID_SAS_QUEUE_DEPTH 254
+#define LEAPRAID_RAID_QUEUE_DEPTH 128
+
+/* SCSI device and queue limits */
+#define LEAPRAID_MAX_SECTORS 8192
+#define LEAPRAID_DEF_MAX_SECTORS 32767
+#define LEAPRAID_MAX_CDB_LEN 32
+#define LEAPRAID_MAX_LUNS 16384
+#define LEAPRAID_CAN_QUEUE_MIN 1
+#define LEAPRAID_THIS_ID_NONE -1
+#define LEAPRAID_CMD_PER_LUN 128
+#define LEAPRAID_MAX_SEGMENT_SIZE 0xffffffff
+
+/* SCSI sense and ASC/ASCQ and disk geometry configuration */
+#define DESC_FORMAT_THRESHOLD 0x72
+#define SENSE_KEY_MASK 0x0F
+#define SCSI_SENSE_RESPONSE_CODE_MASK 0x7F
+#define ASC_FAILURE_PREDICTION_THRESHOLD_EXCEEDED 0x5D
+#define LEAPRAID_LARGE_DISK_THRESHOLD 0x200000UL /* in sectors, 1GB */
+#define LEAPRAID_LARGE_DISK_HEADS 255
+#define LEAPRAID_LARGE_DISK_SECTORS 63
+#define LEAPRAID_SMALL_DISK_HEADS 64
+#define LEAPRAID_SMALL_DISK_SECTORS 32
+
+/* SMP (Serial Management Protocol) */
+#define LEAPRAID_SMP_PT_FLAG_SGL_PTR 0x80
+#define LEAPRAID_SMP_FN_REPORT_PHY_ERR_LOG 0x91
+#define LEAPRAID_SMP_FRAME_HEADER_SIZE 4
+#define LEAPRAID_SCSI_HOST_SHIFT 16
+#define LEAPRAID_SCSI_DRIVER_SHIFT 24
+
+/* SCSI ASC/ASCQ definitions */
+#define LEAPRAID_SCSI_ASCQ_DEFAULT 0x00
+#define LEAPRAID_SCSI_ASC_POWER_ON_RESET 0x29
+#define LEAPRAID_SCSI_ASC_INVALID_CMD_CODE 0x20
+#define LEAPRAID_SCSI_ASCQ_POWER_ON_RESET 0x07
+
+/* ---- VPD Page 0x89 (ATA Information) ---- */
+#define LEAPRAID_VPD_PAGE_ATA_INFO 0x89
+#define LEAPRAID_VPD_PG89_MAX_LEN 255
+#define LEAPRAID_VPD_PG89_MIN_LEN 214
+
+/* Byte index for NCQ support flag in VPD Page 0x89 */
+#define LEAPRAID_VPD_PG89_NCQ_BYTE_IDX 213
+#define LEAPRAID_VPD_PG89_NCQ_BIT_SHIFT 4
+#define LEAPRAID_VPD_PG89_NCQ_BIT_MASK 0x1
+
+/* readiness polling: max retries, sleep µs between */
+#define LEAPRAID_ADAPTER_READY_MAX_RETRY 15000
+#define LEAPRAID_ADAPTER_READY_SLEEP_MIN_US 1000
+#define LEAPRAID_ADAPTER_READY_SLEEP_MAX_US 1100
+
+/* Doorbell wait parameters */
+#define LEAPRAID_DB_WAIT_MAX_RETRY 20000
+#define LEAPRAID_DB_WAIT_DELAY_US 500
+
+/* Basic data size definitions */
+#define LEAPRAID_DWORDS_BYTE_SIZE 4
+#define LEAPRAID_WORD_BYTE_SIZE 2
+
+/* SGL threshold and chain offset*/
+#define LEAPRAID_SGL_INLINE_THRESHOLD 2
+#define LEAPRAID_CHAIN_OFFSET_DWORDS 7
+
+/* MSI-X group size and mask */
+#define LEAPRAID_MSIX_GROUP_SIZE 8
+#define LEAPRAID_MSIX_GROUP_MASK 7
+
+/* basic constants and limits */
+#define LEAPRAID_BUSY_LIMIT 1
+#define LEAPRAID_INDEX_FIRST 0
+#define LEAPRAID_BITS_PER_BYTE 8
+#define LEAPRAID_INVALID_HOST_DIAG_VAL 0xFFFFFFFF
+
+/* retry / sleep configuration */
+#define LEAPRAID_UNLOCK_RETRY_LIMIT 20
+#define LEAPRAID_UNLOCK_SLEEP_MS 100
+#define LEAPRAID_MSLEEP_SHORT_MS 50
+#define LEAPRAID_MSLEEP_NORMAL_MS 100
+#define LEAPRAID_MSLEEP_LONG_MS 256
+#define LEAPRAID_MSLEEP_EXTRA_LONG_MS 500
+#define LEAPRAID_IO_POLL_DELAY_US 500
+
+/* controller reset loop parameters */
+#define LEAPRAID_RESET_LOOP_COUNT_REF (300000 / 256)
+#define LEAPRAID_RESET_LOOP_COUNT_DEFAULT 10000
+#define LEAPRAID_RESET_POLL_INTERVAL_MS 500
+
+/* Device / Volume configuration */
+#define LEAPRAID_MAX_VOLUMES_DEFAULT 32
+#define LEAPRAID_MAX_DEV_HANDLE_DEFAULT 2048
+#define LEAPRAID_INVALID_DEV_HANDLE 0xFFFF
+
+/* cmd queue depth */
+#define LEAPRAID_COALESCING_DEPTH_MAX 256
+#define LEAPRAID_DEFAULT_CMD_QD_OFFSET 64
+#define LEAPRAID_REPLY_QD_ALIGNMENT 16
+/* task id offset */
+#define LEAPRAID_TASKID_OFFSET_CTRL_CMD 1
+#define LEAPRAID_TASKID_OFFSET_SCSIIO_CMD 2
+#define LEAPRAID_TASKID_OFFSET_CFG_OP_CMD 1
+#define LEAPRAID_TASKID_OFFSET_TRANSPORT_CMD 2
+#define LEAPRAID_TASKID_OFFSET_TIMESTAMP_SYNC_CMD 3
+#define LEAPRAID_TASKID_OFFSET_RAID_ACTION_CMD 4
+#define LEAPRAID_TASKID_OFFSET_ENC_CMD 5
+#define LEAPRAID_TASKID_OFFSET_NOTIFY_EVENT_CMD 6
+
+/* task id offset for high-priority */
+#define LEAPRAID_HP_TASKID_OFFSET_CTL_CMD 0
+#define LEAPRAID_HP_TASKID_OFFSET_TM_CMD 1
+
+/* Event / Boot configuration */
+#define LEAPRAID_EVT_MASK_COUNT 4
+#define LEAPRAID_BOOT_DEV_SIZE 24
+
+/* logsense command definitions */
+#define LEAPRAID_LOGSENSE_DATA_LENGTH 16
+#define LEAPRAID_LOGSENSE_CDB_LENGTH 10
+#define LEAPRAID_LOGSENSE_CDB_CODE 0x6F
+#define LEAPRAID_LOGSENSE_TIMEOUT 5
+#define LEAPRAID_LOGSENSE_SMART_CODE 0x5D
+
+/* cmd timeout */
+#define LEAPRAID_DRIVER_SCSIIO_CMD_TIMEOUT LEAPRAID_LOGSENSE_TIMEOUT
+#define LEAPRAID_CFG_OP_TIMEOUT 15
+#define LEAPRAID_CTL_CMD_TIMEOUT 10
+#define LEAPRAID_SCAN_DEV_CMD_TIMEOUT 300
+#define LEAPRAID_TIMESTAMP_SYNC_CMD_TIMEOUT 10
+#define LEAPRAID_RAID_ACTION_CMD_TIMEOUT 10
+#define LEAPRAID_ENC_CMD_TIMEOUT 10
+#define LEAPRAID_NOTIFY_EVENT_CMD_TIMEOUT 30
+#define LEAPRAID_TM_CMD_TIMEOUT 30
+#define LEAPRAID_TRANSPORT_CMD_TIMEOUT 10
+
+/**
+ * struct leapraid_adapter_features - Features and
+ * capabilities of a LeapRAID adapter
+ *
+ * @req_slot: Number of request slots supported by the adapter
+ * @hp_slot: Number of high-priority slots supported by the adapter
+ * @adapter_caps: Adapter capabilities
+ * @fw_version: Firmware version of the adapter
+ * @max_dev_handle: Maximum device supported by the adapter
+ */
+struct leapraid_adapter_features {
+ u16 req_slot;
+ u16 hp_slot;
+ u32 adapter_caps;
+ u32 fw_version;
+ u8 max_volumes;
+ u16 max_dev_handle;
+ u16 min_dev_handle;
+};
+
+/**
+ * struct leapraid_adapter_attr - Adapter attributes and capabilities
+ *
+ * @id: Adapter identifier
+ * @raid_support: Indicates if RAID is supported
+ * @bios_version: Version of the adapter BIOS
+ * @enable_mp: Indicates if multipath (MP) support is enabled
+ * @wideport_max_queue_depth: Maximum queue depth for wide ports
+ * @narrowport_max_queue_depth: Maximum queue depth for narrow ports
+ * @sata_max_queue_depth: Maximum queue depth for SATA
+ * @features: Detailed features of the adapter
+ * @adapter_total_qd: Total queue depth available on the adapter
+ * @io_qd: Queue depth allocated for I/O operations
+ * @rep_msg_qd: Queue depth for reply messages
+ * @rep_desc_qd: Queue depth for reply descriptors
+ * @rep_desc_q_seg_cnt: Number of segments in a reply descriptor queue
+ * @rq_cnt: Number of request queues
+ * @task_desc_dma_size: Size of task descriptor DMA memory
+ * @use_32_dma_mask: Indicates if 32-bit DMA mask is used
+ * @name: Adapter name string
+ */
+struct leapraid_adapter_attr {
+ u8 id;
+ bool raid_support;
+ u32 bios_version;
+ bool enable_mp;
+ u32 wideport_max_queue_depth;
+ u32 narrowport_max_queue_depth;
+ u32 sata_max_queue_depth;
+ struct leapraid_adapter_features features;
+ u32 adapter_total_qd;
+ u32 io_qd;
+ u32 rep_msg_qd;
+ u32 rep_desc_qd;
+ u32 rep_desc_q_seg_cnt;
+ u16 rq_cnt;
+ u32 task_desc_dma_size;
+ bool use_32_dma_mask;
+ char name[LEAPRAID_NAME_LENGTH];
+};
+
+/**
+ * struct leapraid_io_req_tracker - Track a SCSI I/O request
+ * for the adapter
+ *
+ * @taskid: Unique task ID for this I/O request
+ * @scmd: Pointer to the associated SCSI command
+ * @chain_list: List of chain frames associated with this request
+ * @msix_io: MSI-X vector assigned to this I/O request
+ * @chain: Pointer to the chain memory for this request
+ * @chain_dma: DMA address of the chain memory
+ */
+struct leapraid_io_req_tracker {
+ u16 taskid;
+ struct scsi_cmnd *scmd;
+ struct list_head chain_list;
+ u16 msix_io;
+ void *chain;
+ dma_addr_t chain_dma;
+};
+
+/**
+ * struct leapraid_task_tracker - Tracks a task in the adapter
+ *
+ * @taskid: Unique task ID for this tracker
+ * @cb_idx: Callback index associated with this task
+ * @tracker_list: Linked list node to chain this tracker in lists
+ */
+struct leapraid_task_tracker {
+ u16 taskid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct leapraid_rep_desc_maint - Maintains reply descriptor
+ * memory
+ *
+ * @rep_desc: Pointer to the reply descriptor
+ * @rep_desc_dma: DMA address of the reply descriptor
+ */
+struct leapraid_rep_desc_maint {
+ union leapraid_rep_desc_union *rep_desc;
+ dma_addr_t rep_desc_dma;
+};
+
+/**
+ * struct leapraid_rep_desc_seg_maint - Maintains reply descriptor
+ * segment memory
+ *
+ * @rep_desc_seg: Pointer to the reply descriptor segment
+ * @rep_desc_seg_dma: DMA address of the reply descriptor segment
+ * @rep_desc_maint: Pointer to the main reply descriptor structure
+ */
+struct leapraid_rep_desc_seg_maint {
+ void *rep_desc_seg;
+ dma_addr_t rep_desc_seg_dma;
+ struct leapraid_rep_desc_maint *rep_desc_maint;
+};
+
+/**
+ * struct leapraid_mem_desc - Memory descriptor for LeapRaid adapter
+ *
+ * @task_desc: Pointer to task descriptor
+ * @task_desc_dma: DMA address of task descriptor
+ * @sg_chain_pool: DMA pool for SGL chain allocations
+ * @sg_chain_pool_size: Size of the sg_chain_pool
+ * @io_tracker: IO request tracker array
+ * @sense_data: Buffer for SCSI sense data
+ * @sense_data_dma: DMA address of sense_data buffer
+ * @rep_msg: Buffer for reply message
+ * @rep_msg_dma: DMA address of reply message buffer
+ * @rep_msg_addr: Pointer to reply message address
+ * @rep_msg_addr_dma: DMA address of reply message address
+ * @rep_desc_seg_maint: Pointer to reply descriptor segment
+ * @rep_desc_q_arr: Pointer to reply descriptor queue array
+ * @rep_desc_q_arr_dma: DMA address of reply descriptor queue array
+ */
+struct leapraid_mem_desc {
+ void *task_desc;
+ dma_addr_t task_desc_dma;
+ struct dma_pool *sg_chain_pool;
+ u16 sg_chain_pool_size;
+ struct leapraid_io_req_tracker *io_tracker;
+ u8 *sense_data;
+ dma_addr_t sense_data_dma;
+ u8 *rep_msg;
+ dma_addr_t rep_msg_dma;
+ __le32 *rep_msg_addr;
+ dma_addr_t rep_msg_addr_dma;
+ struct leapraid_rep_desc_seg_maint *rep_desc_seg_maint;
+ struct leapraid_rep_desc_q_arr *rep_desc_q_arr;
+ dma_addr_t rep_desc_q_arr_dma;
+};
+
+#define LEAPRAID_FIXED_INTER_CMDS 7
+#define LEAPRAID_FIXED_HP_CMDS 2
+#define LEAPRAID_INTER_HP_CMDS_DIF \
+ (LEAPRAID_FIXED_INTER_CMDS - LEAPRAID_FIXED_HP_CMDS)
+
+#define LEAPRAID_CMD_NOT_USED 0x8000
+#define LEAPRAID_CMD_DONE 0x0001
+#define LEAPRAID_CMD_PENDING 0x0002
+#define LEAPRAID_CMD_REPLY_VALID 0x0004
+#define LEAPRAID_CMD_RESET 0x0008
+
+/**
+ * enum LEAPRAID_CB_INDEX - Callback index for LeapRaid driver
+ *
+ * @LEAPRAID_SCAN_DEV_CB_IDX: Scan device callback index
+ * @LEAPRAID_CONFIG_CB_IDX: Configuration callback index
+ * @LEAPRAID_TRANSPORT_CB_IDX: Transport callback index
+ * @LEAPRAID_TIMESTAMP_SYNC_CB_IDX: Timestamp sync callback index
+ * @LEAPRAID_RAID_ACTION_CB_IDX: RAID action callback index
+ * @LEAPRAID_DRIVER_SCSIIO_CB_IDX: Driver SCSI I/O callback index
+ * @LEAPRAID_SAS_CTRL_CB_IDX: SAS controller callback index
+ * @LEAPRAID_ENC_CB_IDX: Encryption callback index
+ * @LEAPRAID_NOTIFY_EVENT_CB_IDX: Notify event callback index
+ * @LEAPRAID_CTL_CB_IDX: Control callback index
+ * @LEAPRAID_TM_CB_IDX: Task management callback index
+ */
+enum LEAPRAID_CB_INDEX {
+ LEAPRAID_SCAN_DEV_CB_IDX = 0x1,
+ LEAPRAID_CONFIG_CB_IDX = 0x2,
+ LEAPRAID_TRANSPORT_CB_IDX = 0x3,
+ LEAPRAID_TIMESTAMP_SYNC_CB_IDX = 0x4,
+ LEAPRAID_RAID_ACTION_CB_IDX = 0x5,
+ LEAPRAID_DRIVER_SCSIIO_CB_IDX = 0x6,
+ LEAPRAID_SAS_CTRL_CB_IDX = 0x7,
+ LEAPRAID_ENC_CB_IDX = 0x8,
+ LEAPRAID_NOTIFY_EVENT_CB_IDX = 0x9,
+ LEAPRAID_CTL_CB_IDX = 0xA,
+ LEAPRAID_TM_CB_IDX = 0xB,
+ LEAPRAID_NUM_CB_IDXS
+};
+
+struct leapraid_default_reply {
+ u8 pad[LEAPRAID_REPLY_SIEZ];
+};
+
+struct leapraid_sense_buffer {
+ u8 pad[SCSI_SENSE_BUFFERSIZE];
+};
+
+/**
+ * struct leapraid_driver_cmd - Driver command tracking structure
+ *
+ * @reply: Default reply structure returned by the adapter
+ * @done: Completion object used to signal command completion
+ * @status: Status code returned by the firmware
+ * @taskid: Unique task identifier for this command
+ * @hp_taskid: Task identifier for high-priority commands
+ * @inter_taskid: Task identifier for internal commands
+ * @cb_idx: Callback index used to identify completion context
+ * @async_scan_dev: True if this command is for asynchronous device scan
+ * @sense: Sense buffer holding error information from device
+ * @mutex: Mutex to protect access to this command structure
+ * @list: List node for linking driver commands into lists
+ */
+struct leapraid_driver_cmd {
+ struct leapraid_default_reply reply;
+ struct completion done;
+ u16 status;
+ u16 taskid;
+ u16 hp_taskid;
+ u16 inter_taskid;
+ u8 cb_idx;
+ bool async_scan_dev;
+ struct leapraid_sense_buffer sense;
+ struct mutex mutex;
+ struct list_head list;
+};
+
+/**
+ * struct leapraid_driver_cmds - Collection of driver command objects
+ *
+ * @special_cmd_list: List head for tracking special driver commands
+ * @scan_dev_cmd: Command used for asynchronous device scan operations
+ * @cfg_op_cmd: Command for configuration operations
+ * @transport_cmd: Command for transport-level operations
+ * @timestamp_sync_cmd: Command for synchronizing timestamp with firmware
+ * @raid_action_cmd: Command for RAID-related management or action requests
+ * @driver_scsiio_cmd: Command used for internal SCSI I/O processing
+ * @enc_cmd: Command for enclosure management operations
+ * @notify_event_cmd: Command for asynchronous event notification handling
+ * @ctl_cmd: Command for generic control or maintenance operations
+ * @tm_cmd: Task management command
+ * @internal_scmd: Pointer to internal SCSI command used by the driver
+ */
+struct leapraid_driver_cmds {
+ struct list_head special_cmd_list;
+ struct leapraid_driver_cmd scan_dev_cmd;
+ struct leapraid_driver_cmd cfg_op_cmd;
+ struct leapraid_driver_cmd transport_cmd;
+ struct leapraid_driver_cmd timestamp_sync_cmd;
+ struct leapraid_driver_cmd raid_action_cmd;
+ struct leapraid_driver_cmd driver_scsiio_cmd;
+ struct leapraid_driver_cmd enc_cmd;
+ struct leapraid_driver_cmd notify_event_cmd;
+ struct leapraid_driver_cmd ctl_cmd;
+ struct leapraid_driver_cmd tm_cmd;
+ struct scsi_cmnd *internal_scmd;
+};
+
+/**
+ * struct leapraid_dynamic_task_desc - Dynamic task descriptor
+ *
+ * @task_lock: Spinlock to protect concurrent access
+ * @hp_taskid: Current high-priority task ID
+ * @hp_cmd_qd: Fixed command queue depth for high-priority tasks
+ * @inter_taskid: Current internal task ID
+ * @inter_cmd_qd: Fixed command queue depth for internal tasks
+ */
+struct leapraid_dynamic_task_desc {
+ spinlock_t task_lock;
+ u16 hp_taskid;
+ u16 hp_cmd_qd;
+ u16 inter_taskid;
+ u16 inter_cmd_qd;
+};
+
+/**
+ * struct leapraid_fw_evt_work - Firmware event work structure
+ *
+ * @list: Linked list node for queuing the work
+ * @adapter: Pointer to the associated LeapRaid adapter
+ * @work: Work structure used by the kernel workqueue
+ * @refcnt: Reference counter for managing the lifetime of this work
+ * @evt_data: Pointer to firmware event data
+ * @dev_handle: Device handle associated with the event
+ * @evt_type: Type of firmware event
+ * @ignore: Flag indicating whether the event should be ignored
+ */
+struct leapraid_fw_evt_work {
+ struct list_head list;
+ struct leapraid_adapter *adapter;
+ struct work_struct work;
+ struct kref refcnt;
+ void *evt_data;
+ u16 dev_handle;
+ u16 evt_type;
+ u8 ignore;
+};
+
+/**
+ * struct leapraid_fw_evt_struct - Firmware event handling structure
+ *
+ * @fw_evt_name: Name of the firmware event
+ * @fw_evt_thread: Workqueue used for processing firmware events
+ * @fw_evt_lock: Spinlock protecting access to the firmware event list
+ * @fw_evt_list: Linked list of pending firmware events
+ * @cur_evt: Pointer to the currently processing firmware event
+ * @fw_evt_cleanup: Flag indicating whether cleanup of events is in progress
+ * @leapraid_evt_masks: Array of event masks for filtering firmware events
+ */
+struct leapraid_fw_evt_struct {
+ char fw_evt_name[48];
+ struct workqueue_struct *fw_evt_thread;
+ spinlock_t fw_evt_lock;
+ struct list_head fw_evt_list;
+ struct leapraid_fw_evt_work *cur_evt;
+ int fw_evt_cleanup;
+ u32 leapraid_evt_masks[4];
+};
+
+/**
+ * struct leapraid_rq - Represents a LeapRaid request queue
+ *
+ * @adapter: Pointer to the associated LeapRaid adapter
+ * @msix_idx: MSI-X vector index used by this queue
+ * @rep_post_host_idx: Index of the last processed reply descriptor
+ * @rep_desc: Pointer to the reply descriptor associated with this queue
+ * @name: Name of the request queue
+ * @busy: Atomic counter indicating if the queue is busy
+ */
+struct leapraid_rq {
+ struct leapraid_adapter *adapter;
+ u8 msix_idx;
+ u32 rep_post_host_idx;
+ union leapraid_rep_desc_union *rep_desc;
+ char name[LEAPRAID_NAME_LENGTH];
+ atomic_t busy;
+};
+
+/**
+ * struct leapraid_int_rq - Internal request queue for a CPU
+ *
+ * @affinity_hint: CPU affinity mask for the queue
+ * @rq: Underlying LeapRaid request queue structure
+ */
+struct leapraid_int_rq {
+ cpumask_var_t affinity_hint;
+ struct leapraid_rq rq;
+};
+
+/**
+ * struct leapraid_blk_mq_poll_rq - Polling request for LeapRaid blk-mq
+ *
+ * @busy: Atomic flag indicating request is being processed
+ * @pause: Atomic flag to temporarily suspend polling
+ * @rq: The underlying LeapRaid request structure
+ */
+struct leapraid_blk_mq_poll_rq {
+ atomic_t busy;
+ atomic_t pause;
+ struct leapraid_rq rq;
+};
+
+/**
+ * struct leapraid_notification_desc - Notification
+ * descriptor for LeapRaid
+ *
+ * @iopoll_qdex: Index of the I/O polling queue
+ * @iopoll_qcnt: Count of I/O polling queues
+ * @msix_enable: Flag indicating MSI-X is enabled
+ * @msix_cpu_map: CPU map for MSI-X interrupts
+ * @msix_cpu_map_sz: Size of the MSI-X CPU map
+ * @int_rqs: Array of interrupt request queues
+ * @int_rqs_allocated: Count of allocated interrupt request queues
+ * @blk_mq_poll_rqs: Array of blk-mq polling requests
+ */
+struct leapraid_notification_desc {
+ u32 iopoll_qdex;
+ u32 iopoll_qcnt;
+ bool msix_enable;
+ u8 *msix_cpu_map;
+ u32 msix_cpu_map_sz;
+ struct leapraid_int_rq *int_rqs;
+ u32 int_rqs_allocated;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rqs;
+};
+
+/**
+ * struct leapraid_reset_desc - Reset descriptor for LeapRaid
+ *
+ * @fault_reset_wq: Workqueue for fault reset operations
+ * @fault_reset_work: Delayed work structure for fault reset
+ * @fault_reset_wq_name: Name of the fault reset workqueue
+ * @host_diag_mutex: Mutex for host diagnostic operations
+ * @adapter_reset_lock: Spinlock for adapter reset operations
+ * @adapter_reset_mutex: Mutex for adapter reset operations
+ * @adapter_link_resetting: Flag indicating if adapter link is resetting
+ * @adapter_reset_results: Results of the adapter reset operation
+ * @pending_io_cnt: Count of pending I/O operations
+ * @reset_wait_queue: Wait queue for reset operations
+ * @reset_cnt: Counter for reset operations
+ */
+struct leapraid_reset_desc {
+ struct workqueue_struct *fault_reset_wq;
+ struct delayed_work fault_reset_work;
+ char fault_reset_wq_name[48];
+ struct mutex host_diag_mutex;
+ spinlock_t adapter_reset_lock;
+ struct mutex adapter_reset_mutex;
+ bool adapter_link_resetting;
+ int adapter_reset_results;
+ int pending_io_cnt;
+ wait_queue_head_t reset_wait_queue;
+ u32 reset_cnt;
+};
+
+/**
+ * struct leapraid_scan_dev_desc - Scan device descriptor
+ * for LeapRaid
+ *
+ * @wait_scan_dev_done: Flag indicating if scan device operation is done
+ * @driver_loading: Flag indicating if driver is loading
+ * @first_scan_dev_fired: Flag indicating if first scan device operation fired
+ * @scan_dev_failed: Flag indicating if scan device operation failed
+ * @scan_start: Flag indicating if scan operation started
+ * @scan_start_failed: Count of failed scan start operations
+ */
+struct leapraid_scan_dev_desc {
+ bool wait_scan_dev_done;
+ bool driver_loading;
+ bool first_scan_dev_fired;
+ bool scan_dev_failed;
+ bool scan_start;
+ u16 scan_start_failed;
+};
+
+/**
+ * struct leapraid_access_ctrl - Access control structure for LeapRaid
+ *
+ * @pci_access_lock: Mutex for PCI access control
+ * @adapter_thermal_alert: Flag indicating if adapter thermal alert is active
+ * @shost_recovering: Flag indicating if host is recovering
+ * @host_removing: Flag indicating if host is being removed
+ * @pcie_recovering: Flag indicating if PCIe is recovering
+ */
+struct leapraid_access_ctrl {
+ struct mutex pci_access_lock;
+ bool adapter_thermal_alert;
+ bool shost_recovering;
+ bool host_removing;
+ bool pcie_recovering;
+};
+
+/**
+ * struct leapraid_fw_log_desc - Firmware log descriptor for LeapRaid
+ *
+ * @fw_log_buffer: Buffer for firmware log data
+ * @fw_log_buffer_dma: DMA address of the firmware log buffer
+ * @fw_log_wq_name: Name of the firmware log workqueue
+ * @fw_log_wq: Workqueue for firmware log operations
+ * @fw_log_work: Delayed work structure for firmware log
+ * @open_pcie_trace: Flag indicating if PCIe tracing is open
+ * @fw_log_init_flag: Flag indicating if firmware log is initialized
+ */
+struct leapraid_fw_log_desc {
+ u8 *fw_log_buffer;
+ dma_addr_t fw_log_buffer_dma;
+ char fw_log_wq_name[48];
+ struct workqueue_struct *fw_log_wq;
+ struct delayed_work fw_log_work;
+ int open_pcie_trace;
+ int fw_log_init_flag;
+};
+
+#define LEAPRAID_CARD_PORT_FLG_DIRTY 0x01
+#define LEAPRAID_CARD_PORT_FLG_NEW 0x02
+#define LEAPRAID_DISABLE_MP_PORT_ID 0xFF
+/**
+ * struct leapraid_card_port - Card port structure for LeapRaid
+ *
+ * @list: List head for card port
+ * @vphys_list: List head for virtual phy list
+ * @port_id: Port ID
+ * @sas_address: SAS address
+ * @phy_mask: Mask of phy
+ * @vphys_mask: Mask of virtual phy
+ * @flg: Flags for the port
+ */
+struct leapraid_card_port {
+ struct list_head list;
+ struct list_head vphys_list;
+ u8 port_id;
+ u64 sas_address;
+ u32 phy_mask;
+ u32 vphys_mask;
+ u8 flg;
+};
+
+/**
+ * struct leapraid_card_phy - Card phy structure for LeapRaid
+ *
+ * @port_siblings: List head for port siblings
+ * @card_port: Pointer to the card port
+ * @identify: SAS identify structure
+ * @remote_identify: Remote SAS identify structure
+ * @phy: SAS phy structure
+ * @phy_id: Phy ID
+ * @hdl: Handle for the port
+ * @attached_hdl: Handle for the attached port
+ * @phy_is_assigned: Flag indicating if phy is assigned
+ * @vphy: Flag indicating if virtual phy
+ */
+struct leapraid_card_phy {
+ struct list_head port_siblings;
+ struct leapraid_card_port *card_port;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 hdl;
+ u16 attached_hdl;
+ bool phy_is_assigned;
+ bool vphy;
+};
+
+/**
+ * struct leapraid_topo_node - SAS topology node for LeapRaid
+ *
+ * @list: List head for linking nodes
+ * @sas_port_list: List of SAS ports
+ * @card_port: Associated card port
+ * @card_phy: Associated card PHY
+ * @rphy: SAS remote PHY device
+ * @parent_dev: Parent device pointer
+ * @sas_address: SAS address of this node
+ * @sas_address_parent: Parent node's SAS address
+ * @phys_num: Number of physical links
+ * @hdl: Handle identifier
+ * @enc_hdl: Enclosure handle
+ * @enc_lid: Enclosure logical identifier
+ * @resp: Response status flag
+ */
+struct leapraid_topo_node {
+ struct list_head list;
+ struct list_head sas_port_list;
+ struct leapraid_card_port *card_port;
+ struct leapraid_card_phy *card_phy;
+ struct sas_rphy *rphy;
+ struct device *parent_dev;
+ u64 sas_address;
+ u64 sas_address_parent;
+ u8 phys_num;
+ u16 hdl;
+ u16 enc_hdl;
+ u64 enc_lid;
+ bool resp;
+};
+
+/**
+ * struct leapraid_dev_topo - LeapRaid device topology management structure
+ *
+ * @topo_node_lock: Spinlock for protecting topology node operations
+ * @sas_dev_lock: Spinlock for SAS device list access
+ * @raid_volume_lock: Spinlock for RAID volume list access
+ * @sas_id: SAS domain identifier
+ * @card: Main card topology node
+ * @exp_list: List of expander devices
+ * @enc_list: List of enclosure devices
+ * @sas_dev_list: List of SAS devices
+ * @sas_dev_init_list: List of SAS devices being initialized
+ * @raid_volume_list: List of RAID volumes
+ * @card_port_list: List of card ports
+ * @pd_hdls: Array of physical disk handles
+ * @dev_removing: Array tracking devices being removed
+ * @pending_dev_add: Array tracking devices pending addition
+ * @blocking_hdls: Array of blocking handles
+ */
+struct leapraid_dev_topo {
+ spinlock_t topo_node_lock;
+ spinlock_t sas_dev_lock;
+ spinlock_t raid_volume_lock;
+ int sas_id;
+ struct leapraid_topo_node card;
+ struct list_head exp_list;
+ struct list_head enc_list;
+ struct list_head sas_dev_list;
+ struct list_head sas_dev_init_list;
+ struct list_head raid_volume_list;
+ struct list_head card_port_list;
+ u16 pd_hdls_sz;
+ void *pd_hdls;
+ void *blocking_hdls;
+ u16 pending_dev_add_sz;
+ void *pending_dev_add;
+ u16 dev_removing_sz;
+ void *dev_removing;
+};
+
+/**
+ * struct leapraid_boot_dev - Boot device structure for LeapRaid
+ *
+ * @dev: Device pointer
+ * @chnl: Channel number
+ * @form: Form factor
+ * @pg_dev: Config page device content
+ */
+struct leapraid_boot_dev {
+ void *dev;
+ u8 chnl;
+ u8 form;
+ u8 pg_dev[24];
+};
+
+/**
+ * struct leapraid_boot_devs - Boot device management structure
+ * @requested_boot_dev: Requested primary boot device
+ * @requested_alt_boot_dev: Requested alternate boot device
+ * @current_boot_dev: Currently active boot device
+ */
+struct leapraid_boot_devs {
+ struct leapraid_boot_dev requested_boot_dev;
+ struct leapraid_boot_dev requested_alt_boot_dev;
+ struct leapraid_boot_dev current_boot_dev;
+};
+
+/**
+ * struct leapraid_smart_poll_desc - SMART polling descriptor
+ * @smart_poll_wq: Workqueue for SMART polling tasks
+ * @smart_poll_work: Delayed work for SMART polling operations
+ * @smart_poll_wq_name: Workqueue name string
+ */
+struct leapraid_smart_poll_desc {
+ struct workqueue_struct *smart_poll_wq;
+ struct delayed_work smart_poll_work;
+ char smart_poll_wq_name[48];
+};
+
+/**
+ * struct leapraid_adapter - Main LeapRaid adapter structure
+ * @list: List head for adapter management
+ * @shost: SCSI host structure
+ * @pdev: PCI device structure
+ * @iomem_base: I/O memory mapped base address
+ * @rep_msg_host_idx: Host index for reply messages
+ * @mask_int: Interrupt masking flag
+ * @timestamp_sync_cnt: Timestamp synchronization counter
+ * @adapter_attr: Adapter attributes
+ * @mem_desc: Memory descriptor
+ * @driver_cmds: Driver commands
+ * @dynamic_task_desc: Dynamic task descriptor
+ * @fw_evt_s: Firmware event structure
+ * @notification_desc: Notification descriptor
+ * @reset_desc: Reset descriptor
+ * @scan_dev_desc: Device scan descriptor
+ * @access_ctrl: Access control
+ * @fw_log_desc: Firmware log descriptor
+ * @dev_topo: Device topology
+ * @boot_devs: Boot devices
+ * @smart_poll_desc: SMART polling descriptor
+ */
+struct leapraid_adapter {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ struct pci_dev *pdev;
+ struct leapraid_reg_base __iomem *iomem_base;
+ u32 rep_msg_host_idx;
+ bool mask_int;
+ u32 timestamp_sync_cnt;
+
+ struct leapraid_adapter_attr adapter_attr;
+ struct leapraid_mem_desc mem_desc;
+ struct leapraid_driver_cmds driver_cmds;
+ struct leapraid_dynamic_task_desc dynamic_task_desc;
+ struct leapraid_fw_evt_struct fw_evt_s;
+ struct leapraid_notification_desc notification_desc;
+ struct leapraid_reset_desc reset_desc;
+ struct leapraid_scan_dev_desc scan_dev_desc;
+ struct leapraid_access_ctrl access_ctrl;
+ struct leapraid_fw_log_desc fw_log_desc;
+ struct leapraid_dev_topo dev_topo;
+ struct leapraid_boot_devs boot_devs;
+ struct leapraid_smart_poll_desc smart_poll_desc;
+};
+
+union cfg_param_1 {
+ u32 form;
+ u32 size;
+ u32 phy_number;
+};
+
+union cfg_param_2 {
+ u32 handle;
+ u32 form_specific;
+};
+
+enum config_page_action {
+ GET_BIOS_PG2,
+ GET_BIOS_PG3,
+ GET_SAS_DEVICE_PG0,
+ GET_SAS_IOUNIT_PG0,
+ GET_SAS_IOUNIT_PG1,
+ GET_SAS_EXPANDER_PG0,
+ GET_SAS_EXPANDER_PG1,
+ GET_SAS_ENCLOSURE_PG0,
+ GET_PHY_PG0,
+ GET_RAID_VOLUME_PG0,
+ GET_RAID_VOLUME_PG1,
+ GET_PHY_DISK_PG0,
+};
+
+/**
+ * struct leapraid_enc_node - Enclosure node structure
+ * @list: List head for enclosure management
+ * @pg0: Enclosure page 0 data
+ */
+struct leapraid_enc_node {
+ struct list_head list;
+ struct leapraid_enc_p0 pg0;
+};
+
+/**
+ * struct leapraid_raid_volume - RAID volume structure
+ * @list: List head for volume management
+ * @starget: SCSI target structure
+ * @sdev: SCSI device structure
+ * @id: Volume ID
+ * @channel: SCSI channel
+ * @wwid: World Wide Identifier
+ * @hdl: Volume handle
+ * @vol_type: Volume type
+ * @pd_num: Number of physical disks
+ * @resp: Response status
+ * @dev_info: Device information
+ */
+struct leapraid_raid_volume {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct scsi_device *sdev;
+ unsigned int id;
+ unsigned int channel;
+ u64 wwid;
+ u16 hdl;
+ u8 vol_type;
+ u8 pd_num;
+ u8 resp;
+ u32 dev_info;
+};
+
+#define LEAPRAID_TGT_FLG_RAID_MEMBER 0x01
+#define LEAPRAID_TGT_FLG_VOLUME 0x02
+#define LEAPRAID_NO_ULD_ATTACH 1
+/**
+ * struct leapraid_starget_priv - SCSI target private data
+ * @starget: SCSI target structure
+ * @sas_address: SAS address
+ * @hdl: Device handle
+ * @num_luns: Number of LUNs
+ * @flg: Flags
+ * @deleted: Deletion flag
+ * @tm_busy: Task management busy flag
+ * @card_port: Associated card port
+ * @sas_dev: SAS device structure
+ */
+struct leapraid_starget_priv {
+ struct scsi_target *starget;
+ u64 sas_address;
+ u16 hdl;
+ int num_luns;
+ u32 flg;
+ bool deleted;
+ bool tm_busy;
+ struct leapraid_card_port *card_port;
+ struct leapraid_sas_dev *sas_dev;
+};
+
+#define LEAPRAID_DEVICE_FLG_INIT 0x01
+/**
+ * struct leapraid_sdev_priv - SCSI device private data
+ * @starget_priv: Associated target private data
+ * @lun: Logical Unit Number
+ * @flg: Flags
+ * @block: Block flag
+ * @deleted: Deletion flag
+ * @sep: SEP flag
+ */
+struct leapraid_sdev_priv {
+ struct leapraid_starget_priv *starget_priv;
+ unsigned int lun;
+ u32 flg;
+ bool ncq;
+ bool block;
+ bool deleted;
+ bool sep;
+};
+
+/**
+ * struct leapraid_sas_dev - SAS device structure
+ * @list: List head for device management
+ * @starget: SCSI target structure
+ * @card_port: Associated card port
+ * @rphy: SAS remote PHY
+ * @refcnt: Reference count
+ * @id: Device ID
+ * @channel: SCSI channel
+ * @slot: Slot number
+ * @phy: PHY identifier
+ * @resp: Response status
+ * @led_on: LED state
+ * @sas_addr: SAS address
+ * @dev_name: Device name
+ * @hdl: Device handle
+ * @parent_sas_addr: Parent SAS address
+ * @enc_hdl: Enclosure handle
+ * @enc_lid: Enclosure logical ID
+ * @volume_hdl: Volume handle
+ * @volume_wwid: Volume WWID
+ * @dev_info: Device information
+ * @pend_sas_rphy_add: Pending SAS rphy addition flag
+ * @enc_level: Enclosure level
+ * @port_type: Port type
+ * @connector_name: Connector name
+ * @support_smart: SMART support flag
+ */
+struct leapraid_sas_dev {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct leapraid_card_port *card_port;
+ struct sas_rphy *rphy;
+ struct kref refcnt;
+ unsigned int id;
+ unsigned int channel;
+ u16 slot;
+ u8 phy;
+ bool resp;
+ bool led_on;
+ u64 sas_addr;
+ u64 dev_name;
+ u16 hdl;
+ u64 parent_sas_addr;
+ u16 enc_hdl;
+ u64 enc_lid;
+ u16 volume_hdl;
+ u64 volume_wwid;
+ u32 dev_info;
+ u8 pend_sas_rphy_add;
+ u8 enc_level;
+ u8 port_type;
+ u8 connector_name[5];
+ bool support_smart;
+};
+
+static inline void leapraid_sdev_free(struct kref *ref)
+{
+ kfree(container_of(ref, struct leapraid_sas_dev, refcnt));
+}
+
+#define leapraid_sdev_get(sdev) kref_get(&(sdev)->refcnt)
+#define leapraid_sdev_put(sdev) kref_put(&(sdev)->refcnt, leapraid_sdev_free)
+
+/**
+ * struct leapraid_sas_port - SAS port structure
+ * @port_list: List head for port management
+ * @phy_list: List of PHYs in this port
+ * @port: SAS port structure
+ * @card_port: Associated card port
+ * @remote_identify: Remote device identification
+ * @rphy: SAS remote PHY
+ * @phys_num: Number of PHYs in this port
+ */
+struct leapraid_sas_port {
+ struct list_head port_list;
+ struct list_head phy_list;
+ struct sas_port *port;
+ struct leapraid_card_port *card_port;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ u8 phys_num;
+};
+
+#define LEAPRAID_VPHY_FLG_DIRTY 0x01
+/**
+ * struct leapraid_vphy - Virtual PHY structure
+ * @list: List head for PHY management
+ * @sas_address: SAS address
+ * @phy_mask: PHY mask
+ * @flg: Flags
+ */
+struct leapraid_vphy {
+ struct list_head list;
+ u64 sas_address;
+ u32 phy_mask;
+ u8 flg;
+};
+
+struct leapraid_tgt_rst_list {
+ struct list_head list;
+ u16 handle;
+ u16 state;
+};
+
+struct leapraid_sc_list {
+ struct list_head list;
+ u16 handle;
+};
+
+struct sense_info {
+ u8 sense_key;
+ u8 asc;
+ u8 ascq;
+};
+
+struct leapraid_fw_log_info {
+ u32 user_position;
+ u32 adapter_position;
+};
+
+/**
+ * enum reset_type - Reset type enumeration
+ * @FULL_RESET: Full hardware reset
+ * @PART_RESET: Partial reset
+ */
+enum reset_type {
+ FULL_RESET,
+ PART_RESET,
+};
+
+enum leapraid_card_port_checking_flg {
+ CARD_PORT_FURTHER_CHECKING_NEEDED = 0,
+ CARD_PORT_SKIP_CHECKING,
+};
+
+enum leapraid_port_checking_state {
+ NEW_CARD_PORT = 0,
+ SAME_PORT_WITH_NOTHING_CHANGED,
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS,
+ SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS,
+ SAME_ADDR_ONLY,
+};
+
+/**
+ * struct leapraid_card_port_feature - Card port feature
+ * @dirty_flg: Dirty flag indicator
+ * @same_addr: Same address flag
+ * @exact_phy: Exact PHY match flag
+ * @phy_overlap: PHY overlap bitmap
+ * @same_port: Same port flag
+ * @cur_chking_old_port: Current checking old port
+ * @expected_old_port: Expected old port
+ * @same_addr_port_count: Same address port count
+ * @checking_state: Port checking state
+ */
+struct leapraid_card_port_feature {
+ u8 dirty_flg;
+ bool same_addr;
+ bool exact_phy;
+ u32 phy_overlap;
+ bool same_port;
+ struct leapraid_card_port *cur_chking_old_port;
+ struct leapraid_card_port *expected_old_port;
+ int same_addr_port_count;
+ enum leapraid_port_checking_state checking_state;
+};
+
+#define SMP_REPORT_MANUFACTURER_INFORMATION_FRAME_TYPE 0x40
+#define SMP_REPORT_MANUFACTURER_INFORMATION_FUNC 0x01
+
+/**
+ * ref: SAS-2(INCITS 457-2010) 10.4.3.5
+ */
+struct leapraid_rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 allocated_response_length;
+ u8 request_length;
+};
+
+/**
+ * ref: SAS-2(INCITS 457-2010) 10.4.3.5
+ */
+struct leapraid_rep_manu_reply {
+ u8 smp_frame_type;
+ u8 function;
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 r1[2];
+ u8 sas_format;
+ u8 r2[3];
+ u8 vendor_identification[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_identification[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_revision_level[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_identification[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_level;
+ u8 r3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * struct leapraid_scsi_cmd_desc - SCSI command descriptor
+ * @hdl: Device handle
+ * @lun: Logical Unit Number
+ * @raid_member: RAID member flag
+ * @dir: DMA data direction
+ * @data_length: Data transfer length
+ * @data_buffer: Data buffer pointer
+ * @cdb_length: CDB length
+ * @cdb: Command Descriptor Block
+ * @time_out: Timeout
+ */
+struct leapraid_scsi_cmd_desc {
+ u16 hdl;
+ u32 lun;
+ bool raid_member;
+ enum dma_data_direction dir;
+ u32 data_length;
+ void *data_buffer;
+ u8 cdb_length;
+ u8 cdb[32];
+ u8 time_out;
+};
+
+extern struct list_head leapraid_adapter_list;
+extern spinlock_t leapraid_adapter_lock;
+extern char driver_name[LEAPRAID_NAME_LENGTH];
+
+int leapraid_ctrl_init(struct leapraid_adapter *adapter);
+void leapraid_remove_ctrl(struct leapraid_adapter *adapter);
+void leapraid_check_scheduled_fault_start(struct leapraid_adapter *adapter);
+void leapraid_check_scheduled_fault_stop(struct leapraid_adapter *adapter);
+void leapraid_fw_log_start(struct leapraid_adapter *adapter);
+void leapraid_fw_log_stop(struct leapraid_adapter *adapter);
+int leapraid_set_pcie_and_notification(struct leapraid_adapter *adapter);
+void leapraid_disable_controller(struct leapraid_adapter *adapter);
+int leapraid_hard_reset_handler(struct leapraid_adapter *adapter,
+ enum reset_type type);
+void leapraid_mask_int(struct leapraid_adapter *adapter);
+void leapraid_unmask_int(struct leapraid_adapter *adapter);
+u32 leapraid_get_adapter_state(struct leapraid_adapter *adapter);
+bool leapraid_pci_removed(struct leapraid_adapter *adapter);
+int leapraid_check_adapter_is_op(struct leapraid_adapter *adapter);
+void *leapraid_get_task_desc(struct leapraid_adapter *adapter, u16 taskid);
+void *leapraid_get_sense_buffer(struct leapraid_adapter *adapter, u16 taskid);
+__le32 leapraid_get_sense_buffer_dma(struct leapraid_adapter *adapter,
+ u16 taskid);
+void *leapraid_get_reply_vaddr(struct leapraid_adapter *adapter,
+ u32 phys_addr);
+u16 leapraid_alloc_scsiio_taskid(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd);
+void leapraid_free_taskid(struct leapraid_adapter *adapter, u16 taskid);
+struct leapraid_io_req_tracker *leapraid_get_io_tracker_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid);
+struct leapraid_io_req_tracker *leapraid_get_scmd_priv(struct scsi_cmnd *scmd);
+struct scsi_cmnd *leapraid_get_scmd_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid);
+int leapraid_scan_dev(struct leapraid_adapter *adapter, bool async_scan_dev);
+void leapraid_scan_dev_done(struct leapraid_adapter *adapter);
+void leapraid_wait_cmds_done(struct leapraid_adapter *adapter);
+void leapraid_clean_active_scsi_cmds(struct leapraid_adapter *adapter);
+void leapraid_sync_irqs(struct leapraid_adapter *adapter, bool poll);
+int leapraid_rep_queue_handler(struct leapraid_rq *rq);
+void leapraid_mq_polling_pause(struct leapraid_adapter *adapter);
+void leapraid_mq_polling_resume(struct leapraid_adapter *adapter);
+void leapraid_set_tm_flg(struct leapraid_adapter *adapter, u16 handle);
+void leapraid_clear_tm_flg(struct leapraid_adapter *adapter, u16 handle);
+void leapraid_async_turn_on_led(struct leapraid_adapter *adapter, u16 handle);
+int leapraid_issue_locked_tm(struct leapraid_adapter *adapter, u16 handle,
+ uint channel, uint id, uint lun, u8 type,
+ u16 taskid_task, u8 tr_method);
+int leapraid_issue_tm(struct leapraid_adapter *adapter, u16 handle,
+ uint channel, uint id, uint lun, u8 type,
+ u16 taskid_task, u8 tr_method);
+u8 leapraid_scsiio_done(struct leapraid_adapter *adapter, u16 taskid,
+ u8 msix_index, u32 rep);
+int leapraid_get_volume_cap(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume);
+int leapraid_internal_init_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker);
+int leapraid_internal_exit_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker);
+void leapraid_clean_active_fw_evt(struct leapraid_adapter *adapter);
+bool leapraid_scmd_find_by_lun(struct leapraid_adapter *adapter,
+ uint id, unsigned int lun, uint channel);
+bool leapraid_scmd_find_by_tgt(struct leapraid_adapter *adapter,
+ uint id, uint channel);
+struct leapraid_vphy *leapraid_get_vphy_by_phy(struct leapraid_card_port *port,
+ u32 phy);
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_id(
+ struct leapraid_adapter *adapter, uint id, uint channel);
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 handle);
+struct leapraid_topo_node *leapraid_exp_find_by_sas_address(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct leapraid_card_port *port);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct sas_rphy *rphy);
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct leapraid_card_port *port);
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 handle);
+struct leapraid_sas_dev *leapraid_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 handle);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct leapraid_card_port *port);
+struct leapraid_sas_dev *leapraid_get_next_sas_dev_from_init_list(
+ struct leapraid_adapter *adapter);
+void leapraid_sas_dev_remove_by_sas_address(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port);
+void leapraid_sas_dev_remove(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev);
+void leapraid_raid_volume_remove(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume);
+void leapraid_exp_rm(struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port);
+void leapraid_build_mpi_sg(struct leapraid_adapter *adapter,
+ void *sge, dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size);
+void leapraid_build_ieee_nodata_sg(struct leapraid_adapter *adapter,
+ void *sge);
+void leapraid_build_ieee_sg(struct leapraid_adapter *adapter,
+ void *psge, dma_addr_t h2c_dma_addr,
+ size_t h2c_size, dma_addr_t c2h_dma_addr,
+ size_t c2h_size);
+int leapraid_build_scmd_ieee_sg(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd, u16 taskid);
+void leapraid_fire_scsi_io(struct leapraid_adapter *adapter,
+ u16 taskid, u16 handle);
+void leapraid_fire_hpr_task(struct leapraid_adapter *adapter, u16 taskid,
+ u16 msix_task);
+void leapraid_fire_task(struct leapraid_adapter *adapter, u16 taskid);
+int leapraid_cfg_get_volume_hdl(struct leapraid_adapter *adapter,
+ u16 pd_handle, u16 *volume_handle);
+int leapraid_cfg_get_volume_wwid(struct leapraid_adapter *adapter,
+ u16 volume_handle, u64 *wwid);
+int leapraid_op_config_page(struct leapraid_adapter *adapter,
+ void *cfgp, union cfg_param_1 cfgp1,
+ union cfg_param_2 cfgp2,
+ enum config_page_action cfg_op);
+void leapraid_adjust_sdev_queue_depth(struct scsi_device *sdev, int qdepth);
+
+int leapraid_ctl_release(struct inode *inode, struct file *filep);
+void leapraid_ctl_init(void);
+void leapraid_ctl_exit(void);
+
+extern struct sas_function_template leapraid_transport_functions;
+extern struct scsi_transport_template *leapraid_transport_template;
+struct leapraid_sas_port *leapraid_transport_port_add(
+ struct leapraid_adapter *adapter, u16 handle, u64 sas_address,
+ struct leapraid_card_port *card_port);
+void leapraid_transport_port_remove(struct leapraid_adapter *adapter,
+ u64 sas_address, u64 sas_address_parent,
+ struct leapraid_card_port *card_port);
+void leapraid_transport_add_card_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_sas_phy_p0 *phy_pg0,
+ struct device *parent_dev);
+int leapraid_transport_add_exp_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_exp_p1 *exp_pg1,
+ struct device *parent_dev);
+void leapraid_transport_update_links(struct leapraid_adapter *adapter,
+ u64 sas_address, u16 handle,
+ u8 phy_number, u8 link_rate,
+ struct leapraid_card_port *card_port);
+void leapraid_transport_detach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_phy *card_phy);
+void leapraid_transport_attach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *sas_node,
+ struct leapraid_card_phy *card_phy,
+ u64 sas_address,
+ struct leapraid_card_port *card_port);
+int leapraid_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd);
+void leapraid_smart_polling_start(struct leapraid_adapter *adapter);
+void leapraid_smart_polling_stop(struct leapraid_adapter *adapter);
+void leapraid_smart_fault_detect(struct leapraid_adapter *adapter, u16 hdl);
+void leapraid_free_internal_scsi_cmd(struct leapraid_adapter *adapter);
+
+#endif /* LEAPRAID_FUNC_H_INCLUDED */
diff --git a/drivers/scsi/leapraid/leapraid_os.c b/drivers/scsi/leapraid/leapraid_os.c
new file mode 100644
index 000000000000..44ec2615648f
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_os.c
@@ -0,0 +1,2271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <linux/module.h>
+
+#include "leapraid_func.h"
+#include "leapraid.h"
+
+LIST_HEAD(leapraid_adapter_list);
+DEFINE_SPINLOCK(leapraid_adapter_lock);
+
+MODULE_AUTHOR(LEAPRAID_AUTHOR);
+MODULE_DESCRIPTION(LEAPRAID_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LEAPRAID_DRIVER_VERSION);
+
+static int leapraid_ids;
+
+static int open_pcie_trace = 1;
+module_param(open_pcie_trace, int, 0644);
+MODULE_PARM_DESC(open_pcie_trace, "open_pcie_trace: default=1(open)/0(close)");
+
+static int enable_mp = 1;
+module_param(enable_mp, int, 0444);
+MODULE_PARM_DESC(enable_mp,
+ "enable multipath on target device. default=1(enable)");
+
+static inline void leapraid_get_sense_data(char *sense,
+ struct sense_info *data)
+{
+ bool desc_format = (sense[0] & SCSI_SENSE_RESPONSE_CODE_MASK) >=
+ DESC_FORMAT_THRESHOLD;
+
+ if (desc_format) {
+ data->sense_key = sense[1] & SENSE_KEY_MASK;
+ data->asc = sense[2];
+ data->ascq = sense[3];
+ } else {
+ data->sense_key = sense[2] & SENSE_KEY_MASK;
+ data->asc = sense[12];
+ data->ascq = sense[13];
+ }
+}
+
+static struct Scsi_Host *pdev_to_shost(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+static struct leapraid_adapter *pdev_to_adapter(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+
+ if (!shost)
+ return NULL;
+
+ return shost_priv(shost);
+}
+
+struct leapraid_io_req_tracker *leapraid_get_scmd_priv(struct scsi_cmnd *scmd)
+{
+ return (struct leapraid_io_req_tracker *)scmd->host_scribble;
+}
+
+void leapraid_set_tm_flg(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+ bool skip = false;
+
+ /* don't break out of the loop */
+ shost_for_each_device(sdev, adapter->shost) {
+ if (skip)
+ continue;
+
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl == hdl) {
+ sdev_priv->starget_priv->tm_busy = true;
+ skip = true;
+ }
+ }
+}
+
+void leapraid_clear_tm_flg(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+ bool skip = false;
+
+ /* don't break out of the loop */
+ shost_for_each_device(sdev, adapter->shost) {
+ if (skip)
+ continue;
+
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl == hdl) {
+ sdev_priv->starget_priv->tm_busy = false;
+ skip = true;
+ }
+ }
+}
+
+static int leapraid_tm_cmd_map_status(struct leapraid_adapter *adapter,
+ uint channel,
+ uint id,
+ uint lun,
+ u8 type,
+ u16 taskid_task)
+{
+ int rc = FAILED;
+
+ if (taskid_task <= adapter->shost->can_queue) {
+ switch (type) {
+ case LEAPRAID_TM_TASKTYPE_ABRT_TASK_SET:
+ case LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!leapraid_scmd_find_by_lun(adapter, id, lun,
+ channel))
+ rc = SUCCESS;
+ break;
+ case LEAPRAID_TM_TASKTYPE_TARGET_RESET:
+ if (!leapraid_scmd_find_by_tgt(adapter, id, channel))
+ rc = SUCCESS;
+ break;
+ default:
+ rc = SUCCESS;
+ }
+ }
+
+ if (taskid_task == adapter->driver_cmds.driver_scsiio_cmd.taskid) {
+ if ((adapter->driver_cmds.driver_scsiio_cmd.status &
+ LEAPRAID_CMD_DONE) ||
+ (adapter->driver_cmds.driver_scsiio_cmd.status &
+ LEAPRAID_CMD_NOT_USED))
+ rc = SUCCESS;
+ }
+
+ if (taskid_task == adapter->driver_cmds.ctl_cmd.hp_taskid) {
+ if ((adapter->driver_cmds.ctl_cmd.status &
+ LEAPRAID_CMD_DONE) ||
+ (adapter->driver_cmds.ctl_cmd.status &
+ LEAPRAID_CMD_NOT_USED))
+ rc = SUCCESS;
+ }
+
+ return rc;
+}
+
+static int leapraid_tm_post_processing(struct leapraid_adapter *adapter,
+ u16 hdl, uint channel, uint id,
+ uint lun, u8 type, u16 taskid_task)
+{
+ int rc;
+
+ rc = leapraid_tm_cmd_map_status(adapter, channel, id, lun,
+ type, taskid_task);
+ if (rc == SUCCESS)
+ return rc;
+
+ leapraid_mask_int(adapter);
+ leapraid_sync_irqs(adapter, true);
+ leapraid_unmask_int(adapter);
+
+ rc = leapraid_tm_cmd_map_status(adapter, channel, id, lun, type,
+ taskid_task);
+ return rc;
+}
+
+static void leapraid_build_tm_req(struct leapraid_scsi_tm_req *scsi_tm_req,
+ u16 hdl, uint lun, u8 type, u8 tr_method,
+ u16 target_taskid)
+{
+ memset(scsi_tm_req, 0, sizeof(*scsi_tm_req));
+ scsi_tm_req->func = LEAPRAID_FUNC_SCSI_TMF;
+ scsi_tm_req->dev_hdl = cpu_to_le16(hdl);
+ scsi_tm_req->task_type = type;
+ scsi_tm_req->msg_flg = tr_method;
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK ||
+ type == LEAPRAID_TM_TASKTYPE_QUERY_TASK)
+ scsi_tm_req->task_mid = cpu_to_le16(target_taskid);
+ int_to_scsilun(lun, (struct scsi_lun *)scsi_tm_req->lun);
+}
+
+int leapraid_issue_tm(struct leapraid_adapter *adapter, u16 hdl, uint channel,
+ uint id, uint lun, u8 type,
+ u16 target_taskid, u8 tr_method)
+{
+ struct leapraid_scsi_tm_req *scsi_tm_req;
+ struct leapraid_scsiio_req *scsiio_req;
+ struct leapraid_io_req_tracker *io_req_tracker = NULL;
+ u16 msix_task = 0;
+ bool issue_reset = false;
+ u32 db;
+ int rc;
+
+ lockdep_assert_held(&adapter->driver_cmds.tm_cmd.mutex);
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering) {
+ dev_info(&adapter->pdev->dev,
+ "%s %s: host is recovering, skip tm command!\n",
+ __func__, adapter->adapter_attr.name);
+ return FAILED;
+ }
+
+ db = leapraid_readl(&adapter->iomem_base->db);
+ if (db & LEAPRAID_DB_USED) {
+ dev_info(&adapter->pdev->dev,
+ "%s unexpected db status, issuing hard reset!\n",
+ adapter->adapter_attr.name);
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ return (!rc) ? SUCCESS : FAILED;
+ }
+
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_FAULT) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ return (!rc) ? SUCCESS : FAILED;
+ }
+
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK)
+ io_req_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ target_taskid);
+
+ adapter->driver_cmds.tm_cmd.status = LEAPRAID_CMD_PENDING;
+ scsi_tm_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.tm_cmd.hp_taskid);
+ leapraid_build_tm_req(scsi_tm_req, hdl, lun, type, tr_method,
+ target_taskid);
+ memset((void *)(&adapter->driver_cmds.tm_cmd.reply), 0,
+ sizeof(struct leapraid_scsi_tm_rep));
+ leapraid_set_tm_flg(adapter, hdl);
+ init_completion(&adapter->driver_cmds.tm_cmd.done);
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK &&
+ io_req_tracker &&
+ io_req_tracker->msix_io < adapter->adapter_attr.rq_cnt)
+ msix_task = io_req_tracker->msix_io;
+ else
+ msix_task = 0;
+ leapraid_fire_hpr_task(adapter,
+ adapter->driver_cmds.tm_cmd.hp_taskid,
+ msix_task);
+ wait_for_completion_timeout(&adapter->driver_cmds.tm_cmd.done,
+ LEAPRAID_TM_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.tm_cmd.status & LEAPRAID_CMD_DONE)) {
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.tm_cmd.status);
+ if (issue_reset) {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto out;
+ }
+ }
+
+ leapraid_sync_irqs(adapter, false);
+
+ switch (type) {
+ case LEAPRAID_TM_TASKTYPE_TARGET_RESET:
+ case LEAPRAID_TM_TASKTYPE_ABRT_TASK_SET:
+ case LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET:
+ rc = leapraid_tm_post_processing(adapter, hdl, channel, id, lun,
+ type, target_taskid);
+ break;
+ case LEAPRAID_TM_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ scsiio_req = leapraid_get_task_desc(adapter, target_taskid);
+ if (le16_to_cpu(scsiio_req->dev_hdl) != hdl)
+ break;
+ dev_err(&adapter->pdev->dev, "%s abort failed, hdl=0x%04x\n",
+ adapter->adapter_attr.name, hdl);
+ rc = FAILED;
+ break;
+ case LEAPRAID_TM_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+out:
+ leapraid_clear_tm_flg(adapter, hdl);
+ adapter->driver_cmds.tm_cmd.status = LEAPRAID_CMD_NOT_USED;
+ return rc;
+}
+
+int leapraid_issue_locked_tm(struct leapraid_adapter *adapter, u16 hdl,
+ uint channel, uint id, uint lun, u8 type,
+ u16 target_taskid, u8 tr_method)
+{
+ int rc;
+
+ mutex_lock(&adapter->driver_cmds.tm_cmd.mutex);
+ rc = leapraid_issue_tm(adapter, hdl, channel, id, lun, type,
+ target_taskid, tr_method);
+ mutex_unlock(&adapter->driver_cmds.tm_cmd.mutex);
+
+ return rc;
+}
+
+void leapraid_smart_fault_detect(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_target *starget;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (!sas_dev) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ goto out;
+ }
+
+ starget = sas_dev->starget;
+ starget_priv = starget->hostdata;
+ if ((starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER) ||
+ (starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME)) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ goto out;
+ }
+
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_async_turn_on_led(adapter, hdl);
+out:
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_process_sense_data(struct leapraid_adapter *adapter,
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct scsi_cmnd *scmd, u16 taskid)
+{
+ struct sense_info data;
+ const void *sense_data;
+ u32 sz;
+
+ if (!(scsiio_rep->scsi_state & LEAPRAID_SCSI_STATE_AUTOSENSE_VALID))
+ return;
+
+ sense_data = leapraid_get_sense_buffer(adapter, taskid);
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_rep->sense_count));
+
+ memcpy(scmd->sense_buffer, sense_data, sz);
+ leapraid_get_sense_data(scmd->sense_buffer, &data);
+ if (data.asc == ASC_FAILURE_PREDICTION_THRESHOLD_EXCEEDED)
+ leapraid_smart_fault_detect(adapter,
+ le16_to_cpu(scsiio_rep->dev_hdl));
+}
+
+static void leapraid_handle_data_underrun(
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct scsi_cmnd *scmd, u32 xfer_cnt)
+{
+ u8 scsi_status = scsiio_rep->scsi_status;
+ u8 scsi_state = scsiio_rep->scsi_state;
+
+ scmd->result = (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) | scsi_status;
+
+ if (scsi_state & LEAPRAID_SCSI_STATE_AUTOSENSE_VALID)
+ return;
+
+ if (xfer_cnt < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR <<
+ LEAPRAID_SCSI_HOST_SHIFT;
+ } else if (scsi_state & (LEAPRAID_SCSI_STATE_AUTOSENSE_FAILED |
+ LEAPRAID_SCSI_STATE_NO_SCSI_STATUS)) {
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ } else if (scsi_state & LEAPRAID_SCSI_STATE_TERMINATED) {
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ } else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
+ scsiio_rep->scsi_state = LEAPRAID_SCSI_STATE_AUTOSENSE_VALID;
+ scsiio_rep->scsi_status = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ LEAPRAID_SCSI_ASC_INVALID_CMD_CODE,
+ LEAPRAID_SCSI_ASCQ_DEFAULT);
+ scmd->result = (DRIVER_SENSE << LEAPRAID_SCSI_DRIVER_SHIFT) |
+ (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) |
+ SAM_STAT_CHECK_CONDITION;
+ }
+}
+
+static void leapraid_handle_success_status(
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct scsi_cmnd *scmd,
+ u32 response_code)
+{
+ u8 scsi_status = scsiio_rep->scsi_status;
+ u8 scsi_state = scsiio_rep->scsi_state;
+
+ scmd->result = (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) | scsi_status;
+
+ if (response_code == LEAPRAID_TM_RSP_INVALID_FRAME ||
+ (scsi_state & (LEAPRAID_SCSI_STATE_AUTOSENSE_FAILED |
+ LEAPRAID_SCSI_STATE_NO_SCSI_STATUS)))
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ else if (scsi_state & LEAPRAID_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+}
+
+static void leapraid_scsiio_done_dispatch(struct leapraid_adapter *adapter,
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct leapraid_sdev_priv *sdev_priv,
+ struct scsi_cmnd *scmd,
+ u16 taskid, u32 response_code)
+{
+ u8 scsi_status = scsiio_rep->scsi_status;
+ u8 scsi_state = scsiio_rep->scsi_state;
+ u16 adapter_status;
+ u32 xfer_cnt;
+ u32 sz;
+
+ adapter_status = le16_to_cpu(scsiio_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+
+ xfer_cnt = le32_to_cpu(scsiio_rep->transfer_count);
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
+
+ if (adapter_status == LEAPRAID_ADAPTER_STATUS_SCSI_DATA_UNDERRUN &&
+ xfer_cnt == 0 &&
+ (scsi_status == LEAPRAID_SCSI_STATUS_BUSY ||
+ scsi_status == LEAPRAID_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == LEAPRAID_SCSI_STATUS_TASK_SET_FULL)) {
+ adapter_status = LEAPRAID_ADAPTER_STATUS_SUCCESS;
+ }
+
+ switch (adapter_status) {
+ case LEAPRAID_ADAPTER_STATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_BUSY:
+ case LEAPRAID_ADAPTER_STATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_RESIDUAL_MISMATCH:
+ if (xfer_cnt == 0 || scmd->underflow > xfer_cnt)
+ scmd->result = DID_SOFT_ERROR <<
+ LEAPRAID_SCSI_HOST_SHIFT;
+ else
+ scmd->result = (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) |
+ scsi_status;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_ADAPTER_TERMINATED:
+ if (sdev_priv->block) {
+ scmd->result = DID_TRANSPORT_DISRUPTED <<
+ LEAPRAID_SCSI_HOST_SHIFT;
+ return;
+ }
+
+ if (scmd->device->channel == RAID_CHANNEL &&
+ scsi_state == (LEAPRAID_SCSI_STATE_TERMINATED |
+ LEAPRAID_SCSI_STATE_NO_SCSI_STATUS)) {
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+ }
+
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_TASK_TERMINATED:
+ case LEAPRAID_ADAPTER_STATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_DATA_UNDERRUN:
+ leapraid_handle_data_underrun(scsiio_rep, scmd, xfer_cnt);
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ leapraid_handle_success_status(scsiio_rep, scmd,
+ response_code);
+ break;
+ case LEAPRAID_ADAPTER_STATUS_SCSI_RECOVERED_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_SUCCESS:
+ leapraid_handle_success_status(scsiio_rep, scmd,
+ response_code);
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_PROTOCOL_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_INTERNAL_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_SCSI_IO_DATA_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_SCSI_TASK_MGMT_FAILED:
+ default:
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+ }
+
+ if (!scmd->result)
+ return;
+
+ scsi_print_command(scmd);
+ dev_warn(&adapter->pdev->dev,
+ "scsiio warn: hdl=0x%x, status are: 0x%x, 0x%x, 0x%x\n",
+ le16_to_cpu(scsiio_rep->dev_hdl), adapter_status,
+ scsi_status, scsi_state);
+
+ if (scsi_state & LEAPRAID_SCSI_STATE_AUTOSENSE_VALID) {
+ struct scsi_sense_hdr sshdr;
+
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_rep->sense_count));
+ if (scsi_normalize_sense(scmd->sense_buffer, sz,
+ &sshdr)) {
+ dev_warn(&adapter->pdev->dev,
+ "sense: key=0x%x asc=0x%x ascq=0x%x\n",
+ sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "sense: invalid sense data\n");
+ }
+ }
+}
+
+u8 leapraid_scsiio_done(struct leapraid_adapter *adapter, u16 taskid,
+ u8 msix_index, u32 rep)
+{
+ struct leapraid_scsiio_rep *scsiio_rep = NULL;
+ struct leapraid_sdev_priv *sdev_priv = NULL;
+ struct scsi_cmnd *scmd = NULL;
+ u32 response_code = 0;
+
+ if (likely(taskid != adapter->driver_cmds.driver_scsiio_cmd.taskid))
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ else
+ scmd = adapter->driver_cmds.internal_scmd;
+ if (!scmd)
+ return 1;
+
+ scsiio_rep = leapraid_get_reply_vaddr(adapter, rep);
+ if (!scsiio_rep) {
+ scmd->result = DID_OK << LEAPRAID_SCSI_HOST_SHIFT;
+ goto out;
+ }
+
+ sdev_priv = scmd->device->hostdata;
+ if (!sdev_priv ||
+ !sdev_priv->starget_priv ||
+ sdev_priv->starget_priv->deleted) {
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+ goto out;
+ }
+
+ if (scsiio_rep->scsi_state & LEAPRAID_SCSI_STATE_RESPONSE_INFO_VALID)
+ response_code = le32_to_cpu(scsiio_rep->resp_info) & 0xFF;
+
+ leapraid_process_sense_data(adapter, scsiio_rep, scmd, taskid);
+ leapraid_scsiio_done_dispatch(adapter, scsiio_rep, sdev_priv, scmd,
+ taskid, response_code);
+
+out:
+ scsi_dma_unmap(scmd);
+ if (unlikely(taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid)) {
+ adapter->driver_cmds.driver_scsiio_cmd.status =
+ LEAPRAID_CMD_DONE;
+ complete(&adapter->driver_cmds.driver_scsiio_cmd.done);
+ return 0;
+ }
+ leapraid_free_taskid(adapter, taskid);
+ scmd->scsi_done(scmd);
+ return 0;
+}
+
+static void leapraid_probe_raid(struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_volume *raid_volume, *raid_volume_next;
+ int rc;
+
+ list_for_each_entry_safe(raid_volume, raid_volume_next,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (raid_volume->starget)
+ continue;
+
+ rc = scsi_add_device(adapter->shost, RAID_CHANNEL,
+ raid_volume->id, 0);
+ if (rc)
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ }
+}
+
+static void leapraid_sas_dev_make_active(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ leapraid_sdev_put(sas_dev);
+ }
+
+ leapraid_sdev_get(sas_dev);
+ list_add_tail(&sas_dev->list, &adapter->dev_topo.sas_dev_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_probe_sas(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sas_dev *sas_dev;
+ bool added;
+
+ for (;;) {
+ sas_dev = leapraid_get_next_sas_dev_from_init_list(adapter);
+ if (!sas_dev)
+ break;
+
+ added = leapraid_transport_port_add(adapter,
+ sas_dev->hdl,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+
+ if (!added)
+ goto remove_dev;
+
+ if (!sas_dev->starget &&
+ !adapter->scan_dev_desc.driver_loading) {
+ leapraid_transport_port_remove(adapter,
+ sas_dev->sas_addr,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ goto remove_dev;
+ }
+
+ leapraid_sas_dev_make_active(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ continue;
+
+remove_dev:
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+}
+
+static bool leapraid_get_boot_dev(struct leapraid_boot_dev *boot_dev,
+ void **pdev, u32 *pchnl)
+{
+ if (boot_dev->dev) {
+ *pdev = boot_dev->dev;
+ *pchnl = boot_dev->chnl;
+ return true;
+ }
+ return false;
+}
+
+static void leapraid_probe_boot_dev(struct leapraid_adapter *adapter)
+{
+ void *dev = NULL;
+ u32 chnl;
+
+ if (leapraid_get_boot_dev(&adapter->boot_devs.requested_boot_dev, &dev,
+ &chnl))
+ goto boot_dev_found;
+
+ if (leapraid_get_boot_dev(&adapter->boot_devs.requested_alt_boot_dev,
+ &dev, &chnl))
+ goto boot_dev_found;
+
+ if (leapraid_get_boot_dev(&adapter->boot_devs.current_boot_dev, &dev,
+ &chnl))
+ goto boot_dev_found;
+
+ return;
+
+boot_dev_found:
+ switch (chnl) {
+ case RAID_CHANNEL:
+ {
+ struct leapraid_raid_volume *raid_volume =
+ (struct leapraid_raid_volume *)dev;
+
+ if (raid_volume->starget)
+ return;
+
+ /* TODO eedp */
+
+ if (scsi_add_device(adapter->shost, RAID_CHANNEL,
+ raid_volume->id, 0))
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ break;
+ }
+ default:
+ {
+ struct leapraid_sas_dev *sas_dev =
+ (struct leapraid_sas_dev *)dev;
+ struct leapraid_sas_port *sas_port;
+ unsigned long flags;
+
+ if (sas_dev->starget)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ list_move_tail(&sas_dev->list,
+ &adapter->dev_topo.sas_dev_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (!sas_dev->card_port)
+ return;
+
+ sas_port = leapraid_transport_port_add(adapter, sas_dev->hdl,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ if (!sas_port)
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ break;
+ }
+ }
+}
+
+static void leapraid_probe_devices(struct leapraid_adapter *adapter)
+{
+ leapraid_probe_boot_dev(adapter);
+
+ if (adapter->adapter_attr.raid_support) {
+ leapraid_probe_raid(adapter);
+ leapraid_probe_sas(adapter);
+ } else {
+ leapraid_probe_sas(adapter);
+ }
+}
+
+void leapraid_scan_dev_done(struct leapraid_adapter *adapter)
+{
+ if (adapter->scan_dev_desc.wait_scan_dev_done) {
+ adapter->scan_dev_desc.wait_scan_dev_done = false;
+ leapraid_probe_devices(adapter);
+ }
+
+ leapraid_check_scheduled_fault_start(adapter);
+ leapraid_fw_log_start(adapter);
+ adapter->scan_dev_desc.driver_loading = false;
+ leapraid_smart_polling_start(adapter);
+}
+
+static void leapraid_ir_shutdown(struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_act_req *raid_act_req;
+ struct leapraid_raid_act_rep *raid_act_rep;
+ struct leapraid_driver_cmd *raid_action_cmd;
+
+ if (!adapter || !adapter->adapter_attr.raid_support)
+ return;
+
+ if (list_empty(&adapter->dev_topo.raid_volume_list))
+ return;
+
+ if (leapraid_pci_removed(adapter))
+ return;
+
+ raid_action_cmd = &adapter->driver_cmds.raid_action_cmd;
+
+ mutex_lock(&raid_action_cmd->mutex);
+ raid_action_cmd->status = LEAPRAID_CMD_PENDING;
+
+ raid_act_req = leapraid_get_task_desc(adapter,
+ raid_action_cmd->inter_taskid);
+ memset(raid_act_req, 0, sizeof(struct leapraid_raid_act_req));
+ raid_act_req->func = LEAPRAID_FUNC_RAID_ACTION;
+ raid_act_req->act = LEAPRAID_RAID_ACT_SYSTEM_SHUTDOWN_INITIATED;
+
+ dev_info(&adapter->pdev->dev, "ir shutdown start\n");
+ init_completion(&raid_action_cmd->done);
+ leapraid_fire_task(adapter, raid_action_cmd->inter_taskid);
+ wait_for_completion_timeout(&raid_action_cmd->done,
+ LEAPRAID_RAID_ACTION_CMD_TIMEOUT * HZ);
+
+ if (!(raid_action_cmd->status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: timeout waiting for ir shutdown\n", __func__);
+ goto out;
+ }
+
+ if (raid_action_cmd->status & LEAPRAID_CMD_REPLY_VALID) {
+ raid_act_rep = (void *)(&raid_action_cmd->reply);
+ dev_info(&adapter->pdev->dev,
+ "ir shutdown done, adapter status=0x%04x\n",
+ le16_to_cpu(raid_act_rep->adapter_status));
+ }
+
+out:
+ raid_action_cmd->status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&raid_action_cmd->mutex);
+}
+
+static const struct pci_device_id leapraid_pci_table[] = {
+ { PCI_DEVICE(LEAPRAID_VENDOR_ID, LEAPRAID_DEVID_HBA) },
+ { PCI_DEVICE(LEAPRAID_VENDOR_ID, LEAPRAID_DEVID_RAID) },
+ { 0, }
+};
+
+static inline bool leapraid_is_scmd_permitted(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd)
+{
+ u8 opcode;
+
+ if (adapter->access_ctrl.pcie_recovering ||
+ adapter->access_ctrl.adapter_thermal_alert)
+ return false;
+
+ if (adapter->access_ctrl.host_removing) {
+ if (leapraid_pci_removed(adapter))
+ return false;
+
+ opcode = scmd->cmnd[0];
+ if (opcode == SYNCHRONIZE_CACHE || opcode == START_STOP)
+ return true;
+ else
+ return false;
+ }
+ return true;
+}
+
+static bool leapraid_should_queuecommand(struct leapraid_adapter *adapter,
+ struct leapraid_sdev_priv *sdev_priv,
+ struct scsi_cmnd *scmd, int *rc)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ if (!sdev_priv || !sdev_priv->starget_priv)
+ goto no_connect;
+
+ if (!leapraid_is_scmd_permitted(adapter, scmd))
+ goto no_connect;
+
+ starget_priv = sdev_priv->starget_priv;
+ if (starget_priv->hdl == LEAPRAID_INVALID_DEV_HANDLE)
+ goto no_connect;
+
+ if (sdev_priv->block &&
+ scmd->device->host->shost_state == SHOST_RECOVERY &&
+ scmd->cmnd[0] == TEST_UNIT_READY) {
+ scsi_build_sense_buffer(0, scmd->sense_buffer, UNIT_ATTENTION,
+ LEAPRAID_SCSI_ASC_POWER_ON_RESET,
+ LEAPRAID_SCSI_ASCQ_POWER_ON_RESET);
+ scmd->result = (DRIVER_SENSE << LEAPRAID_SCSI_DRIVER_SHIFT) |
+ (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) |
+ SAM_STAT_CHECK_CONDITION;
+ goto done_out;
+ }
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->reset_desc.adapter_link_resetting) {
+ *rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ } else if (starget_priv->deleted || sdev_priv->deleted) {
+ goto no_connect;
+ } else if (starget_priv->tm_busy || sdev_priv->block) {
+ *rc = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
+ return true;
+
+no_connect:
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+done_out:
+ if (likely(scmd != adapter->driver_cmds.internal_scmd))
+ scmd->scsi_done(scmd);
+out:
+ return false;
+}
+
+static u32 build_scsiio_req_control(struct scsi_cmnd *scmd,
+ struct leapraid_sdev_priv *sdev_priv)
+{
+ u32 control;
+
+ switch (scmd->sc_data_direction) {
+ case DMA_FROM_DEVICE:
+ control = LEAPRAID_SCSIIO_CTRL_READ;
+ break;
+ case DMA_TO_DEVICE:
+ control = LEAPRAID_SCSIIO_CTRL_WRITE;
+ break;
+ default:
+ control = LEAPRAID_SCSIIO_CTRL_NODATATRANSFER;
+ break;
+ }
+
+ control |= LEAPRAID_SCSIIO_CTRL_SIMPLEQ;
+
+ if (sdev_priv->ncq &&
+ (IOPRIO_PRIO_CLASS(req_get_ioprio(scmd->request)) ==
+ IOPRIO_CLASS_RT))
+ control |= LEAPRAID_SCSIIO_CTRL_CMDPRI;
+ if (scmd->cmd_len == 32)
+ control |= 4 << LEAPRAID_SCSIIO_CTRL_ADDCDBLEN_SHIFT;
+
+ return control;
+}
+
+int leapraid_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct leapraid_adapter *adapter = shost_priv(scmd->device->host);
+ struct leapraid_sdev_priv *sdev_priv = scmd->device->hostdata;
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_scsiio_req *scsiio_req;
+ u32 control;
+ u16 taskid;
+ u16 hdl;
+ int rc = 0;
+
+ if (!leapraid_should_queuecommand(adapter, sdev_priv, scmd, &rc))
+ goto out;
+
+ starget_priv = sdev_priv->starget_priv;
+ hdl = starget_priv->hdl;
+ control = build_scsiio_req_control(scmd, sdev_priv);
+
+ if (unlikely(scmd == adapter->driver_cmds.internal_scmd))
+ taskid = adapter->driver_cmds.driver_scsiio_cmd.taskid;
+ else
+ taskid = leapraid_alloc_scsiio_taskid(adapter, scmd);
+ scsiio_req = leapraid_get_task_desc(adapter, taskid);
+
+ scsiio_req->func = LEAPRAID_FUNC_SCSIIO_REQ;
+ if (sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER)
+ scsiio_req->func = LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH;
+ else
+ scsiio_req->func = LEAPRAID_FUNC_SCSIIO_REQ;
+
+ scsiio_req->dev_hdl = cpu_to_le16(hdl);
+ scsiio_req->data_len = cpu_to_le32(scsi_bufflen(scmd));
+ scsiio_req->ctrl = cpu_to_le32(control);
+ scsiio_req->io_flg = cpu_to_le16(scmd->cmd_len);
+ scsiio_req->msg_flg = 0;
+ scsiio_req->sense_buffer_len = SCSI_SENSE_BUFFERSIZE;
+ scsiio_req->sense_buffer_low_add =
+ leapraid_get_sense_buffer_dma(adapter, taskid);
+ scsiio_req->sgl_offset0 =
+ offsetof(struct leapraid_scsiio_req, sgl) /
+ LEAPRAID_DWORDS_BYTE_SIZE;
+ int_to_scsilun(sdev_priv->lun, (struct scsi_lun *)scsiio_req->lun);
+ memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
+ if (scsiio_req->data_len) {
+ if (leapraid_build_scmd_ieee_sg(adapter, scmd, taskid)) {
+ leapraid_free_taskid(adapter, taskid);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ } else {
+ leapraid_build_ieee_nodata_sg(adapter, &scsiio_req->sgl);
+ }
+
+ if (likely(scsiio_req->func == LEAPRAID_FUNC_SCSIIO_REQ)) {
+ leapraid_fire_scsi_io(adapter, taskid,
+ le16_to_cpu(scsiio_req->dev_hdl));
+ } else {
+ leapraid_fire_task(adapter, taskid);
+ }
+ dev_dbg(&adapter->pdev->dev,
+ "LEAPRAID_SCSIIO: Send Descriptor taskid %d, req type 0x%x\n",
+ taskid, scsiio_req->func);
+out:
+ return rc;
+}
+
+
+static int leapraid_error_handler(struct scsi_cmnd *scmd,
+ const char *str, u8 type)
+{
+ struct leapraid_adapter *adapter = shost_priv(scmd->device->host);
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct leapraid_starget_priv *starget_priv = starget->hostdata;
+ struct leapraid_io_req_tracker *io_req_tracker = NULL;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ u16 hdl;
+ int rc;
+
+ dev_info(&adapter->pdev->dev,
+ "EH enter: type=%s, scmd=0x%p, req tag=%d\n", str, scmd,
+ scmd->request->tag);
+ scsi_print_command(scmd);
+
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK) {
+ io_req_tracker = leapraid_get_scmd_priv(scmd);
+ dev_info(&adapter->pdev->dev,
+ "EH ABORT: scmd=0x%p, pending=%u ms, tout=%u ms, req tag=%d\n",
+ scmd,
+ jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
+ (scmd->request->timeout / HZ) * 1000,
+ scmd->request->tag);
+ }
+
+ if (leapraid_pci_removed(adapter) ||
+ adapter->access_ctrl.host_removing) {
+ dev_err(&adapter->pdev->dev,
+ "EH %s failed: %s scmd=0x%p\n", str,
+ (adapter->access_ctrl.host_removing ?
+ "shost removing!" : "pci_dev removed!"), scmd);
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK)
+ if (io_req_tracker && io_req_tracker->taskid)
+ leapraid_free_taskid(adapter,
+ io_req_tracker->taskid);
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+#ifdef FAST_IO_FAIL
+ rc = FAST_IO_FAIL;
+#else
+ rc = FAILED;
+#endif
+ goto out;
+ }
+
+ sdev_priv = scmd->device->hostdata;
+ if (!sdev_priv || !sdev_priv->starget_priv) {
+ dev_warn(&adapter->pdev->dev,
+ "EH %s: sdev or starget gone, scmd=0x%p\n",
+ str, scmd);
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+ scmd->scsi_done(scmd);
+ rc = SUCCESS;
+ goto out;
+ }
+
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK) {
+ if (!io_req_tracker) {
+ dev_warn(&adapter->pdev->dev,
+ "EH ABORT: no io tracker, scmd 0x%p\n", scmd);
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ rc = SUCCESS;
+ goto out;
+ }
+
+ if (sdev_priv->starget_priv->flg &
+ LEAPRAID_TGT_FLG_RAID_MEMBER ||
+ sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME) {
+ dev_err(&adapter->pdev->dev,
+ "EH ABORT: skip RAID/VOLUME target, scmd=0x%p\n",
+ scmd);
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ rc = FAILED;
+ goto out;
+ }
+
+ hdl = sdev_priv->starget_priv->hdl;
+ } else {
+ hdl = 0;
+ if (sdev_priv->starget_priv->flg &
+ LEAPRAID_TGT_FLG_RAID_MEMBER) {
+ sas_dev = leapraid_get_sas_dev_from_tgt(adapter,
+ starget_priv);
+ if (sas_dev)
+ hdl = sas_dev->volume_hdl;
+ } else {
+ hdl = sdev_priv->starget_priv->hdl;
+ }
+
+ if (!hdl) {
+ dev_err(&adapter->pdev->dev,
+ "EH %s failed: target handle is 0, scmd=0x%p\n",
+ str, scmd);
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ rc = FAILED;
+ goto out;
+ }
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "EH issue TM: type=%s, scmd=0x%p, hdl=0x%x\n",
+ str, scmd, hdl);
+
+ rc = leapraid_issue_locked_tm(adapter, hdl, scmd->device->channel,
+ scmd->device->id,
+ (type == LEAPRAID_TM_TASKTYPE_TARGET_RESET ?
+ 0 : scmd->device->lun),
+ type,
+ (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK ?
+ io_req_tracker->taskid : 0),
+ LEAPRAID_TM_MSGFLAGS_LINK_RESET);
+
+out:
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK) {
+ dev_info(&adapter->pdev->dev,
+ "EH ABORT result: %s, scmd=0x%p\n",
+ ((rc == SUCCESS) ? "success" : "failed"), scmd);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "EH %s result: %s, scmd=0x%p\n",
+ str, ((rc == SUCCESS) ? "success" : "failed"), scmd);
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ }
+ return rc;
+}
+
+static int leapraid_eh_abort_handler(struct scsi_cmnd *scmd)
+{
+ return leapraid_error_handler(scmd, "ABORT TASK",
+ LEAPRAID_TM_TASKTYPE_ABORT_TASK);
+}
+
+static int leapraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+ return leapraid_error_handler(scmd, "UNIT RESET",
+ LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET);
+}
+
+static int leapraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
+{
+ return leapraid_error_handler(scmd, "TARGET RESET",
+ LEAPRAID_TM_TASKTYPE_TARGET_RESET);
+}
+
+static int leapraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct leapraid_adapter *adapter = shost_priv(scmd->device->host);
+ int rc;
+
+ dev_info(&adapter->pdev->dev,
+ "EH HOST RESET enter: scmd=%p, req tag=%d\n",
+ scmd,
+ scmd->request->tag);
+ scsi_print_command(scmd);
+
+ if (adapter->scan_dev_desc.driver_loading ||
+ adapter->access_ctrl.host_removing) {
+ dev_err(&adapter->pdev->dev,
+ "EH HOST RESET failed: %s scmd=0x%p\n",
+ (adapter->access_ctrl.host_removing ?
+ "shost removing!" : "driver loading!"), scmd);
+ rc = FAILED;
+ goto out;
+ }
+
+ dev_info(&adapter->pdev->dev, "%s:%d issuing hard reset\n",
+ __func__, __LINE__);
+ if (leapraid_hard_reset_handler(adapter, FULL_RESET) < 0)
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+
+out:
+ dev_info(&adapter->pdev->dev, "EH HOST RESET result: %s, scmd=0x%p\n",
+ ((rc == SUCCESS) ? "success" : "failed"), scmd);
+ return rc;
+}
+
+static int leapraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct leapraid_raid_volume *raid_volume;
+ struct leapraid_starget_priv *stgt_priv;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_adapter *adapter;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_target *tgt;
+ struct Scsi_Host *shost;
+ unsigned long flags;
+
+ sdev_priv = kzalloc(sizeof(*sdev_priv), GFP_KERNEL);
+ if (!sdev_priv)
+ return -ENOMEM;
+
+ sdev_priv->lun = sdev->lun;
+ sdev_priv->flg = LEAPRAID_DEVICE_FLG_INIT;
+ tgt = scsi_target(sdev);
+ stgt_priv = tgt->hostdata;
+ stgt_priv->num_luns++;
+ sdev_priv->starget_priv = stgt_priv;
+ sdev->hostdata = sdev_priv;
+ if ((stgt_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER))
+ sdev->no_uld_attach = LEAPRAID_NO_ULD_ATTACH;
+
+ shost = dev_to_shost(&tgt->dev);
+ adapter = shost_priv(shost);
+ if (tgt->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_id(adapter,
+ tgt->id,
+ tgt->channel);
+ if (raid_volume)
+ raid_volume->sdev = sdev;
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ }
+
+ if (!(stgt_priv->flg & LEAPRAID_TGT_FLG_VOLUME)) {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter,
+ stgt_priv->sas_address,
+ stgt_priv->card_port);
+ if (sas_dev && !sas_dev->starget) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: assign starget to sas_dev\n", __func__);
+ sas_dev->starget = tgt;
+ }
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ }
+ return 0;
+}
+
+static int leapraid_slave_cfg_volume(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ struct leapraid_raid_volume *raid_volume;
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_sdev_priv *sdev_priv;
+ unsigned long flags;
+ int qd;
+ u16 hdl;
+
+ sdev_priv = sdev->hostdata;
+ starget_priv = sdev_priv->starget_priv;
+ hdl = starget_priv->hdl;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_hdl(adapter, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+ if (!raid_volume) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: raid_volume not found, hdl=0x%x\n",
+ __func__, hdl);
+ return 1;
+ }
+
+ if (leapraid_get_volume_cap(adapter, raid_volume)) {
+ sdev_printk(KERN_ERR, sdev,
+ "%s: failed to get volume cap, hdl=0x%x\n",
+ __func__, hdl);
+ return 1;
+ }
+
+ qd = (raid_volume->dev_info & LEAPRAID_DEVTYP_SSP_TGT) ?
+ LEAPRAID_SAS_QUEUE_DEPTH : LEAPRAID_SATA_QUEUE_DEPTH;
+ if (raid_volume->vol_type != LEAPRAID_VOL_TYPE_RAID0)
+ qd = LEAPRAID_RAID_QUEUE_DEPTH;
+
+ sdev_printk(KERN_INFO, sdev,
+ "raid volume: hdl=0x%04x, wwid=0x%016llx\n",
+ raid_volume->hdl, (unsigned long long)raid_volume->wwid);
+
+ if (shost->max_sectors > LEAPRAID_MAX_SECTORS)
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ LEAPRAID_MAX_SECTORS);
+
+ leapraid_adjust_sdev_queue_depth(sdev, qd);
+ return 0;
+}
+
+static int leapraid_slave_configure_extra(struct scsi_device *sdev,
+ struct leapraid_sas_dev **psas_dev,
+ u16 vol_hdl, u64 volume_wwid,
+ bool *is_target_ssp, int *qd)
+{
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct Scsi_Host *shost = sdev->host;
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ unsigned long flags;
+
+ sdev_priv = sdev->hostdata;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ *is_target_ssp = false;
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter,
+ sdev_priv->starget_priv->sas_address,
+ sdev_priv->starget_priv->card_port);
+ if (!sas_dev) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sas_dev not found, sas=0x%llx\n",
+ __func__, sdev_priv->starget_priv->sas_address);
+ return 1;
+ }
+
+ *psas_dev = sas_dev;
+ sas_dev->volume_hdl = vol_hdl;
+ sas_dev->volume_wwid = volume_wwid;
+ if (sas_dev->dev_info & LEAPRAID_DEVTYP_SSP_TGT) {
+ *qd = (sas_dev->port_type > 1) ?
+ adapter->adapter_attr.wideport_max_queue_depth :
+ adapter->adapter_attr.narrowport_max_queue_depth;
+ *is_target_ssp = true;
+ if (sas_dev->dev_info & LEAPRAID_DEVTYP_SEP)
+ sdev_priv->sep = true;
+ } else {
+ *qd = adapter->adapter_attr.sata_max_queue_depth;
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "sdev: dev name=0x%016llx, sas addr=0x%016llx\n",
+ (unsigned long long)sas_dev->dev_name,
+ (unsigned long long)sas_dev->sas_addr);
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return 0;
+}
+
+static int leapraid_slave_configure(struct scsi_device *sdev)
+{
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct Scsi_Host *shost = sdev->host;
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_adapter *adapter;
+ u16 hdl, vol_hdl = 0;
+ bool is_target_ssp = false;
+ u64 volume_wwid = 0;
+ int qd = 1;
+
+ adapter = shost_priv(shost);
+ sdev_priv = sdev->hostdata;
+ sdev_priv->flg &= ~LEAPRAID_DEVICE_FLG_INIT;
+ starget_priv = sdev_priv->starget_priv;
+ hdl = starget_priv->hdl;
+ if (starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME)
+ return leapraid_slave_cfg_volume(sdev);
+
+ if (starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER) {
+ if (leapraid_cfg_get_volume_hdl(adapter, hdl, &vol_hdl)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: get volume hdl failed, hdl=0x%x\n",
+ __func__, hdl);
+ return 1;
+ }
+
+ if (vol_hdl && leapraid_cfg_get_volume_wwid(adapter, vol_hdl,
+ &volume_wwid)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: get wwid failed, volume_hdl=0x%x\n",
+ __func__, vol_hdl);
+ return 1;
+ }
+ }
+
+ if (leapraid_slave_configure_extra(sdev, &sas_dev, vol_hdl,
+ volume_wwid, &is_target_ssp, &qd)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: slave_configure_extra failed\n", __func__);
+ return 1;
+ }
+
+ leapraid_adjust_sdev_queue_depth(sdev, qd);
+ if (is_target_ssp)
+ sas_read_port_mode_page(sdev);
+
+ return 0;
+}
+
+static void leapraid_slave_destroy(struct scsi_device *sdev)
+{
+ struct leapraid_adapter *adapter;
+ struct Scsi_Host *shost;
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_starget_priv *starget_priv;
+ struct scsi_target *stgt;
+ unsigned long flags;
+
+ if (!sdev->hostdata)
+ return;
+
+ stgt = scsi_target(sdev);
+ starget_priv = stgt->hostdata;
+ starget_priv->num_luns--;
+ shost = dev_to_shost(&stgt->dev);
+ adapter = shost_priv(shost);
+ if (!(starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME)) {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_from_tgt(adapter,
+ starget_priv);
+ if (sas_dev && !starget_priv->num_luns)
+ sas_dev->starget = NULL;
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ }
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+static int leapraid_target_alloc_raid(struct scsi_target *tgt)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_raid_volume *raid_volume;
+ struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ unsigned long flags;
+
+ starget_priv = (struct leapraid_starget_priv *)tgt->hostdata;
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_id(adapter, tgt->id,
+ tgt->channel);
+ if (raid_volume) {
+ starget_priv->hdl = raid_volume->hdl;
+ starget_priv->sas_address = raid_volume->wwid;
+ starget_priv->flg |= LEAPRAID_TGT_FLG_VOLUME;
+ raid_volume->starget = tgt;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+ return 0;
+}
+
+static int leapraid_target_alloc_sas(struct scsi_target *tgt)
+{
+ struct sas_rphy *rphy;
+ struct Scsi_Host *shost;
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_adapter *adapter;
+ struct leapraid_starget_priv *starget_priv;
+ unsigned long flags;
+
+ shost = dev_to_shost(&tgt->dev);
+ adapter = shost_priv(shost);
+ starget_priv = (struct leapraid_starget_priv *)tgt->hostdata;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ rphy = dev_to_rphy(tgt->dev.parent);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(adapter,
+ rphy->identify.sas_address,
+ rphy);
+ if (sas_dev) {
+ starget_priv->sas_dev = sas_dev;
+ starget_priv->card_port = sas_dev->card_port;
+ starget_priv->sas_address = sas_dev->sas_addr;
+ starget_priv->hdl = sas_dev->hdl;
+ sas_dev->channel = tgt->channel;
+ sas_dev->id = tgt->id;
+ sas_dev->starget = tgt;
+ if (test_bit(sas_dev->hdl,
+ (unsigned long *)adapter->dev_topo.pd_hdls))
+ starget_priv->flg |= LEAPRAID_TGT_FLG_RAID_MEMBER;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ return 0;
+}
+
+static int leapraid_target_alloc(struct scsi_target *tgt)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ starget_priv = kzalloc(sizeof(*starget_priv), GFP_KERNEL);
+ if (!starget_priv)
+ return -ENOMEM;
+
+ tgt->hostdata = starget_priv;
+ starget_priv->starget = tgt;
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ if (tgt->channel == RAID_CHANNEL)
+ return leapraid_target_alloc_raid(tgt);
+
+ return leapraid_target_alloc_sas(tgt);
+}
+
+static void leapraid_target_destroy_raid(struct scsi_target *tgt)
+{
+ struct leapraid_raid_volume *raid_volume;
+ struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_id(adapter, tgt->id,
+ tgt->channel);
+ if (raid_volume) {
+ raid_volume->starget = NULL;
+ raid_volume->sdev = NULL;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static void leapraid_target_destroy_sas(struct scsi_target *tgt)
+{
+ struct leapraid_adapter *adapter;
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_starget_priv *starget_priv;
+ struct Scsi_Host *shost;
+ unsigned long flags;
+
+ shost = dev_to_shost(&tgt->dev);
+ adapter = shost_priv(shost);
+ starget_priv = tgt->hostdata;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_from_tgt(adapter,
+ starget_priv);
+ if (sas_dev &&
+ sas_dev->starget == tgt &&
+ sas_dev->id == tgt->id &&
+ sas_dev->channel == tgt->channel)
+ sas_dev->starget = NULL;
+
+ if (sas_dev) {
+ starget_priv->sas_dev = NULL;
+ leapraid_sdev_put(sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_target_destroy(struct scsi_target *tgt)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ starget_priv = tgt->hostdata;
+ if (!starget_priv)
+ return;
+
+ if (tgt->channel == RAID_CHANNEL) {
+ leapraid_target_destroy_raid(tgt);
+ goto out;
+ }
+
+ leapraid_target_destroy_sas(tgt);
+
+out:
+ kfree(starget_priv);
+ tgt->hostdata = NULL;
+}
+
+static bool leapraid_scan_check_status(struct leapraid_adapter *adapter,
+ bool *need_hard_reset)
+{
+ u32 adapter_state;
+
+ if (adapter->scan_dev_desc.scan_start) {
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state == LEAPRAID_DB_FAULT) {
+ *need_hard_reset = true;
+ return true;
+ }
+ return false;
+ }
+
+ if (adapter->driver_cmds.scan_dev_cmd.status & LEAPRAID_CMD_RESET) {
+ dev_err(&adapter->pdev->dev,
+ "device scan: aborted due to reset\n");
+ adapter->driver_cmds.scan_dev_cmd.status =
+ LEAPRAID_CMD_NOT_USED;
+ adapter->scan_dev_desc.driver_loading = false;
+ return true;
+ }
+
+ if (adapter->scan_dev_desc.scan_start_failed) {
+ dev_err(&adapter->pdev->dev,
+ "device scan: failed with adapter_status=0x%08x\n",
+ adapter->scan_dev_desc.scan_start_failed);
+ adapter->scan_dev_desc.driver_loading = false;
+ adapter->scan_dev_desc.wait_scan_dev_done = false;
+ adapter->access_ctrl.host_removing = true;
+ return true;
+ }
+
+ dev_info(&adapter->pdev->dev, "device scan: SUCCESS\n");
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_NOT_USED;
+ leapraid_scan_dev_done(adapter);
+ return true;
+}
+
+static int leapraid_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ bool need_hard_reset = false;
+
+ if (time >= (LEAPRAID_SCAN_DEV_CMD_TIMEOUT * HZ)) {
+ adapter->driver_cmds.scan_dev_cmd.status =
+ LEAPRAID_CMD_NOT_USED;
+ dev_err(&adapter->pdev->dev,
+ "device scan: failed with timeout 300s\n");
+ adapter->scan_dev_desc.driver_loading = false;
+ return 1;
+ }
+
+ if (!leapraid_scan_check_status(adapter, &need_hard_reset))
+ return 0;
+
+ if (need_hard_reset) {
+ adapter->driver_cmds.scan_dev_cmd.status =
+ LEAPRAID_CMD_NOT_USED;
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ if (leapraid_hard_reset_handler(adapter, PART_RESET))
+ adapter->scan_dev_desc.driver_loading = false;
+ }
+
+ return 1;
+}
+
+static void leapraid_scan_start(struct Scsi_Host *shost)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+
+ adapter->scan_dev_desc.scan_start = true;
+ leapraid_scan_dev(adapter, true);
+}
+
+static int leapraid_calc_max_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost;
+ int max_depth;
+
+ shost = sdev->host;
+ max_depth = shost->can_queue;
+
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+
+ return qdepth;
+}
+
+static int leapraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ qdepth = leapraid_calc_max_queue_depth(sdev, qdepth);
+ scsi_change_queue_depth(sdev, qdepth);
+ return sdev->queue_depth;
+}
+
+void leapraid_adjust_sdev_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ leapraid_change_queue_depth(sdev, qdepth);
+}
+
+
+
+static int leapraid_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads = 0;
+ int sectors = 0;
+ sector_t cylinders;
+
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
+
+ if ((ulong)capacity >= LEAPRAID_LARGE_DISK_THRESHOLD) {
+ heads = LEAPRAID_LARGE_DISK_HEADS;
+ sectors = LEAPRAID_LARGE_DISK_SECTORS;
+ } else {
+ heads = LEAPRAID_SMALL_DISK_HEADS;
+ sectors = LEAPRAID_SMALL_DISK_SECTORS;
+ }
+
+ cylinders = capacity;
+ sector_div(cylinders, heads * sectors);
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+ return 0;
+}
+
+static ssize_t fw_queue_depth_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%02d\n",
+ adapter->adapter_attr.features.req_slot);
+}
+
+static ssize_t host_sas_address_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)adapter->dev_topo.card.sas_address);
+}
+
+static DEVICE_ATTR_RO(fw_queue_depth);
+static DEVICE_ATTR_RO(host_sas_address);
+
+static struct device_attribute *leapraid_shost_attrs[] = {
+ &dev_attr_fw_queue_depth,
+ &dev_attr_host_sas_address,
+ NULL,
+};
+
+static ssize_t sas_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)sas_device_priv_data->starget_priv->sas_address);
+}
+
+static ssize_t sas_device_handle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%04x\n",
+ sas_device_priv_data->starget_priv->hdl);
+}
+
+static ssize_t sas_ncq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sas_device_priv_data->ncq);
+}
+
+static ssize_t sas_ncq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+ unsigned char *vpd_pg89;
+ int ncq_op = 0;
+ bool ncq_supported = false;
+
+ if (kstrtoint(buf, 0, &ncq_op))
+ goto out;
+
+ vpd_pg89 = kmalloc(LEAPRAID_VPD_PG89_MAX_LEN, GFP_KERNEL);
+ if (!vpd_pg89)
+ goto out;
+
+ if (!scsi_device_supports_vpd(sdev) ||
+ scsi_get_vpd_page(sdev, LEAPRAID_VPD_PAGE_ATA_INFO,
+ vpd_pg89, LEAPRAID_VPD_PG89_MAX_LEN)) {
+ kfree(vpd_pg89);
+ goto out;
+ }
+
+ ncq_supported = (vpd_pg89[LEAPRAID_VPD_PG89_NCQ_BYTE_IDX] >>
+ LEAPRAID_VPD_PG89_NCQ_BIT_SHIFT) &
+ LEAPRAID_VPD_PG89_NCQ_BIT_MASK;
+ kfree(vpd_pg89);
+ if (ncq_supported)
+ sas_device_priv_data->ncq = ncq_op;
+ return strlen(buf);
+out:
+ return -EINVAL;
+}
+
+static DEVICE_ATTR_RO(sas_address);
+static DEVICE_ATTR_RO(sas_device_handle);
+
+static DEVICE_ATTR_RW(sas_ncq);
+
+static struct device_attribute *leapraid_sdev_attrs[] = {
+ &dev_attr_sas_address,
+ &dev_attr_sas_device_handle,
+ &dev_attr_sas_ncq,
+ NULL,
+};
+
+static struct scsi_host_template leapraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "LEAPIO RAID Host",
+ .proc_name = LEAPRAID_DRIVER_NAME,
+ .queuecommand = leapraid_queuecommand,
+ .eh_abort_handler = leapraid_eh_abort_handler,
+ .eh_device_reset_handler = leapraid_eh_device_reset_handler,
+ .eh_target_reset_handler = leapraid_eh_target_reset_handler,
+ .eh_host_reset_handler = leapraid_eh_host_reset_handler,
+ .slave_alloc = leapraid_slave_alloc,
+ .slave_destroy = leapraid_slave_destroy,
+ .slave_configure = leapraid_slave_configure,
+ .target_alloc = leapraid_target_alloc,
+ .target_destroy = leapraid_target_destroy,
+ .scan_finished = leapraid_scan_finished,
+ .scan_start = leapraid_scan_start,
+ .change_queue_depth = leapraid_change_queue_depth,
+ .bios_param = leapraid_bios_param,
+ .can_queue = LEAPRAID_CAN_QUEUE_MIN,
+ .this_id = LEAPRAID_THIS_ID_NONE,
+ .sg_tablesize = LEAPRAID_SG_DEPTH,
+ .max_sectors = LEAPRAID_DEF_MAX_SECTORS,
+ .max_segment_size = LEAPRAID_MAX_SEGMENT_SIZE,
+ .cmd_per_lun = LEAPRAID_CMD_PER_LUN,
+ .shost_attrs = leapraid_shost_attrs,
+ .sdev_attrs = leapraid_sdev_attrs,
+ .track_queue_depth = 1,
+};
+
+static void leapraid_lock_init(struct leapraid_adapter *adapter)
+{
+ mutex_init(&adapter->reset_desc.adapter_reset_mutex);
+ mutex_init(&adapter->reset_desc.host_diag_mutex);
+ mutex_init(&adapter->access_ctrl.pci_access_lock);
+
+ spin_lock_init(&adapter->reset_desc.adapter_reset_lock);
+ spin_lock_init(&adapter->dynamic_task_desc.task_lock);
+ spin_lock_init(&adapter->dev_topo.sas_dev_lock);
+ spin_lock_init(&adapter->dev_topo.topo_node_lock);
+ spin_lock_init(&adapter->fw_evt_s.fw_evt_lock);
+ spin_lock_init(&adapter->dev_topo.raid_volume_lock);
+}
+
+static void leapraid_list_init(struct leapraid_adapter *adapter)
+{
+ INIT_LIST_HEAD(&adapter->dev_topo.sas_dev_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.card_port_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.sas_dev_init_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.exp_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.enc_list);
+ INIT_LIST_HEAD(&adapter->fw_evt_s.fw_evt_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.raid_volume_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.card.sas_port_list);
+}
+
+static int leapraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct leapraid_adapter *adapter = NULL;
+ struct Scsi_Host *shost = NULL;
+ int rc;
+
+ shost = scsi_host_alloc(&leapraid_driver_template,
+ sizeof(struct leapraid_adapter));
+ if (!shost)
+ return -ENODEV;
+
+ adapter = shost_priv(shost);
+ memset(adapter, 0, sizeof(struct leapraid_adapter));
+ adapter->adapter_attr.id = leapraid_ids++;
+
+ adapter->adapter_attr.enable_mp = enable_mp;
+
+ adapter = shost_priv(shost);
+ INIT_LIST_HEAD(&adapter->list);
+ spin_lock(&leapraid_adapter_lock);
+ list_add_tail(&adapter->list, &leapraid_adapter_list);
+ spin_unlock(&leapraid_adapter_lock);
+
+ adapter->shost = shost;
+ adapter->pdev = pdev;
+ adapter->fw_log_desc.open_pcie_trace = open_pcie_trace;
+ leapraid_lock_init(adapter);
+ leapraid_list_init(adapter);
+ sprintf(adapter->adapter_attr.name, "%s%d",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+
+ shost->max_cmd_len = LEAPRAID_MAX_CDB_LEN;
+ shost->max_lun = LEAPRAID_MAX_LUNS;
+ shost->transportt = leapraid_transport_template;
+ shost->unique_id = adapter->adapter_attr.id;
+
+ snprintf(adapter->fw_evt_s.fw_evt_name,
+ sizeof(adapter->fw_evt_s.fw_evt_name),
+ "fw_event_%s%d", LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id);
+ adapter->fw_evt_s.fw_evt_thread =
+ alloc_ordered_workqueue(adapter->fw_evt_s.fw_evt_name, 0);
+ if (!adapter->fw_evt_s.fw_evt_thread) {
+ rc = -ENODEV;
+ goto evt_wq_fail;
+ }
+
+ adapter->scan_dev_desc.driver_loading = true;
+ if ((leapraid_ctrl_init(adapter))) {
+ rc = -ENODEV;
+ goto ctrl_init_fail;
+ }
+
+
+ rc = scsi_add_host(shost, &pdev->dev);
+ if (rc) {
+ spin_lock(&leapraid_adapter_lock);
+ list_del(&adapter->list);
+ spin_unlock(&leapraid_adapter_lock);
+ goto scsi_add_shost_fail;
+ }
+
+ scsi_scan_host(shost);
+ return 0;
+
+scsi_add_shost_fail:
+ leapraid_remove_ctrl(adapter);
+ctrl_init_fail:
+ destroy_workqueue(adapter->fw_evt_s.fw_evt_thread);
+evt_wq_fail:
+ spin_lock(&leapraid_adapter_lock);
+ list_del(&adapter->list);
+ spin_unlock(&leapraid_adapter_lock);
+ scsi_host_put(shost);
+ return rc;
+}
+
+static void leapraid_cleanup_lists(struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_volume *raid_volume, *next_raid_volume;
+ struct leapraid_starget_priv *starget_priv_data;
+ struct leapraid_sas_port *leapraid_port, *next_port;
+ struct leapraid_card_port *port, *port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+
+ list_for_each_entry_safe(raid_volume, next_raid_volume,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (raid_volume->starget) {
+ starget_priv_data = raid_volume->starget->hostdata;
+ starget_priv_data->deleted = true;
+ scsi_remove_target(&raid_volume->starget->dev);
+ }
+ pr_info("removing hdl=0x%04x, wwid=0x%016llx\n",
+ raid_volume->hdl,
+ (unsigned long long)raid_volume->wwid);
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ }
+
+ list_for_each_entry_safe(leapraid_port, next_port,
+ &adapter->dev_topo.card.sas_port_list,
+ port_list) {
+ if (leapraid_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ leapraid_sas_dev_remove_by_sas_address(adapter,
+ leapraid_port->remote_identify.sas_address,
+ leapraid_port->card_port);
+ else if (leapraid_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ leapraid_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ leapraid_exp_rm(adapter,
+ leapraid_port->remote_identify.sas_address,
+ leapraid_port->card_port);
+ }
+
+ list_for_each_entry_safe(port, port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (port->vphys_mask)
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+ list_del(&port->list);
+ kfree(port);
+ }
+
+ if (adapter->dev_topo.card.phys_num) {
+ kfree(adapter->dev_topo.card.card_phy);
+ adapter->dev_topo.card.card_phy = NULL;
+ adapter->dev_topo.card.phys_num = 0;
+ }
+}
+
+static void leapraid_remove(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "unable to remove!\n");
+ return;
+ }
+
+ while (adapter->scan_dev_desc.driver_loading)
+ ssleep(1);
+
+ while (adapter->access_ctrl.shost_recovering)
+ ssleep(1);
+
+ adapter->access_ctrl.host_removing = true;
+
+ leapraid_wait_cmds_done(adapter);
+
+ leapraid_smart_polling_stop(adapter);
+ leapraid_free_internal_scsi_cmd(adapter);
+
+ if (leapraid_pci_removed(adapter)) {
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+ }
+ leapraid_clean_active_fw_evt(adapter);
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ wq = adapter->fw_evt_s.fw_evt_thread;
+ adapter->fw_evt_s.fw_evt_thread = NULL;
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ leapraid_ir_shutdown(adapter);
+ sas_remove_host(shost);
+ leapraid_cleanup_lists(adapter);
+ leapraid_remove_ctrl(adapter);
+ spin_lock(&leapraid_adapter_lock);
+ list_del(&adapter->list);
+ spin_unlock(&leapraid_adapter_lock);
+ scsi_host_put(shost);
+}
+
+static void leapraid_shutdown(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "unable to shutdown!\n");
+ return;
+ }
+
+ adapter->access_ctrl.host_removing = true;
+ leapraid_wait_cmds_done(adapter);
+ leapraid_clean_active_fw_evt(adapter);
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ wq = adapter->fw_evt_s.fw_evt_thread;
+ adapter->fw_evt_s.fw_evt_thread = NULL;
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ leapraid_ir_shutdown(adapter);
+ leapraid_disable_controller(adapter);
+}
+
+static pci_ers_result_t leapraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "failed to error detected for device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pr_err("%s: pci error detected, state=%d\n",
+ adapter->adapter_attr.name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ adapter->access_ctrl.pcie_recovering = true;
+ scsi_block_requests(adapter->shost);
+ leapraid_smart_polling_stop(adapter);
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_disable_controller(adapter);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ adapter->access_ctrl.pcie_recovering = true;
+ leapraid_smart_polling_stop(adapter);
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t leapraid_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "failed to enable mmio for device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ dev_info(&pdev->dev, "%s: pci error mmio enabled\n",
+ adapter->adapter_attr.name);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t leapraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ int rc;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "failed to slot reset for device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ dev_err(&pdev->dev, "%s pci error slot reset\n",
+ adapter->adapter_attr.name);
+
+ adapter->access_ctrl.pcie_recovering = false;
+ adapter->pdev = pdev;
+ pci_restore_state(pdev);
+ if (leapraid_set_pcie_and_notification(adapter))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ dev_info(&pdev->dev, "%s: hard reset triggered by pci slot reset\n",
+ adapter->adapter_attr.name);
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ dev_info(&pdev->dev, "%s hard reset: %s\n",
+ adapter->adapter_attr.name, (rc == 0) ? "success" : "failed");
+
+ return (rc == 0) ? PCI_ERS_RESULT_RECOVERED :
+ PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void leapraid_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "failed to resume\n");
+ return;
+ }
+
+ dev_err(&pdev->dev, "PCI error resume!\n");
+ pci_aer_clear_nonfatal_status(pdev);
+ leapraid_check_scheduled_fault_start(adapter);
+ leapraid_fw_log_start(adapter);
+ scsi_unblock_requests(adapter->shost);
+ leapraid_smart_polling_start(adapter);
+}
+
+MODULE_DEVICE_TABLE(pci, leapraid_pci_table);
+static struct pci_error_handlers leapraid_err_handler = {
+ .error_detected = leapraid_pci_error_detected,
+ .mmio_enabled = leapraid_pci_mmio_enabled,
+ .slot_reset = leapraid_pci_slot_reset,
+ .resume = leapraid_pci_resume,
+};
+
+#ifdef CONFIG_PM
+static int leapraid_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ pci_power_t device_state;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "suspend failed, invalid host or adapter\n");
+ return -ENXIO;
+ }
+
+ leapraid_smart_polling_stop(adapter);
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ flush_scheduled_work();
+ scsi_block_requests(shost);
+ device_state = pci_choose_state(pdev, state);
+ leapraid_ir_shutdown(adapter);
+
+ dev_info(&pdev->dev, "entering PCI power state D%d, (slot=%s)\n",
+ device_state, pci_name(pdev));
+
+ pci_save_state(pdev);
+ leapraid_disable_controller(adapter);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+static int leapraid_resume(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ pci_power_t device_state = pdev->current_state;
+ int rc;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "resume failed, invalid host or adapter\n");
+ return -ENXIO;
+ }
+
+ dev_info(&pdev->dev,
+ "resuming device %s, previous state D%d\n",
+ pci_name(pdev), device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ adapter->pdev = pdev;
+ rc = leapraid_set_pcie_and_notification(adapter);
+ if (rc)
+ return rc;
+
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, PART_RESET);
+ scsi_unblock_requests(shost);
+ leapraid_check_scheduled_fault_start(adapter);
+ leapraid_fw_log_start(adapter);
+ leapraid_smart_polling_start(adapter);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver leapraid_driver = {
+ .name = LEAPRAID_DRIVER_NAME,
+ .id_table = leapraid_pci_table,
+ .probe = leapraid_probe,
+ .remove = leapraid_remove,
+ .shutdown = leapraid_shutdown,
+ .err_handler = &leapraid_err_handler,
+#ifdef CONFIG_PM
+ .suspend = leapraid_suspend,
+ .resume = leapraid_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init leapraid_init(void)
+{
+ int error;
+
+ pr_info("%s version %s loaded\n", LEAPRAID_DRIVER_NAME,
+ LEAPRAID_DRIVER_VERSION);
+
+ leapraid_transport_template =
+ sas_attach_transport(&leapraid_transport_functions);
+ if (!leapraid_transport_template)
+ return -ENODEV;
+
+ leapraid_ids = 0;
+
+ leapraid_ctl_init();
+
+ error = pci_register_driver(&leapraid_driver);
+ if (error)
+ sas_release_transport(leapraid_transport_template);
+
+ return error;
+}
+
+static void __exit leapraid_exit(void)
+{
+ pr_info("leapraid version %s unloading\n",
+ LEAPRAID_DRIVER_VERSION);
+
+ leapraid_ctl_exit();
+ pci_unregister_driver(&leapraid_driver);
+ sas_release_transport(leapraid_transport_template);
+}
+
+module_init(leapraid_init);
+module_exit(leapraid_exit);
diff --git a/drivers/scsi/leapraid/leapraid_transport.c b/drivers/scsi/leapraid/leapraid_transport.c
new file mode 100644
index 000000000000..d224449732a3
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_transport.c
@@ -0,0 +1,1256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <scsi/scsi_host.h>
+
+#include "leapraid_func.h"
+
+static struct leapraid_topo_node *leapraid_transport_topo_node_by_sas_addr(
+ struct leapraid_adapter *adapter,
+ u64 sas_addr,
+ struct leapraid_card_port *card_port)
+{
+ if (adapter->dev_topo.card.sas_address == sas_addr)
+ return &adapter->dev_topo.card;
+ else
+ return leapraid_exp_find_by_sas_address(adapter,
+ sas_addr,
+ card_port);
+}
+
+static u8 leapraid_get_port_id_by_expander(struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_for_each_entry(topo_node_exp, &adapter->dev_topo.exp_list, list) {
+ if (topo_node_exp->rphy == rphy) {
+ port_id = topo_node_exp->card_port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ return port_id;
+}
+
+static u8 leapraid_get_port_id_by_end_dev(struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(adapter,
+ rphy->identify.sas_address,
+ rphy);
+ if (sas_dev) {
+ port_id = sas_dev->card_port->port_id;
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ return port_id;
+}
+
+static u8 leapraid_transport_get_port_id_by_rphy(
+ struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy)
+{
+ if (!rphy)
+ return 0xFF;
+
+ switch (rphy->identify.device_type) {
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ return leapraid_get_port_id_by_expander(adapter, rphy);
+ case SAS_END_DEVICE:
+ return leapraid_get_port_id_by_end_dev(adapter, rphy);
+ default:
+ return 0xFF;
+ }
+}
+
+static enum sas_linkrate leapraid_transport_convert_phy_link_rate(u8 link_rate)
+{
+ unsigned int i;
+
+ #define SAS_RATE_12G SAS_LINK_RATE_12_0_GBPS
+
+ const struct linkrate_map {
+ u8 in;
+ enum sas_linkrate out;
+ } linkrate_table[] = {
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ SAS_LINK_RATE_1_5_GBPS
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_3_0,
+ SAS_LINK_RATE_3_0_GBPS
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_6_0,
+ SAS_LINK_RATE_6_0_GBPS
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_12_0,
+ SAS_RATE_12G
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_PHY_DISABLED,
+ SAS_PHY_DISABLED
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED,
+ SAS_LINK_RATE_FAILED
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_PORT_SELECTOR,
+ SAS_SATA_PORT_SELECTOR
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_SMP_RESETTING,
+ SAS_LINK_RATE_UNKNOWN
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE,
+ SAS_LINK_RATE_UNKNOWN
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE,
+ SAS_LINK_RATE_UNKNOWN
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(linkrate_table); i++) {
+ if (linkrate_table[i].in == link_rate)
+ return linkrate_table[i].out;
+ }
+
+ return SAS_LINK_RATE_UNKNOWN;
+}
+
+static void leapraid_set_identify_protocol_flags(u32 dev_info,
+ struct sas_identify *identify)
+{
+ unsigned int i;
+
+ const struct protocol_mapping {
+ u32 mask;
+ u32 *target;
+ u32 protocol;
+ } mappings[] = {
+ {
+ LEAPRAID_DEVTYP_SSP_INIT,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_SSP
+ },
+ {
+ LEAPRAID_DEVTYP_STP_INIT,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_STP
+ },
+ {
+ LEAPRAID_DEVTYP_SMP_INIT,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_SMP
+ },
+ {
+ LEAPRAID_DEVTYP_SATA_HOST,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_SATA
+ },
+ {
+ LEAPRAID_DEVTYP_SSP_TGT,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_SSP
+ },
+ {
+ LEAPRAID_DEVTYP_STP_TGT,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_STP
+ },
+ {
+ LEAPRAID_DEVTYP_SMP_TGT,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_SMP
+ },
+ {
+ LEAPRAID_DEVTYP_SATA_DEV,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_SATA
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(mappings); i++)
+ if ((dev_info & mappings[i].mask) && mappings[i].target)
+ *mappings[i].target |= mappings[i].protocol;
+}
+
+static int leapraid_transport_set_identify(struct leapraid_adapter *adapter,
+ u16 hdl,
+ struct sas_identify *identify)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_pg0;
+ u32 dev_info;
+
+ if ((adapter->access_ctrl.shost_recovering &&
+ !adapter->scan_dev_desc.driver_loading) ||
+ adapter->access_ctrl.pcie_recovering)
+ return -EFAULT;
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_pg0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0)))
+ return -ENXIO;
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ dev_info = le32_to_cpu(sas_dev_pg0.dev_info);
+ identify->sas_address = le64_to_cpu(sas_dev_pg0.sas_address);
+ identify->phy_identifier = sas_dev_pg0.phy_num;
+
+ switch (dev_info & LEAPRAID_DEVTYP_MASK_DEV_TYPE) {
+ case LEAPRAID_DEVTYP_NO_DEV:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case LEAPRAID_DEVTYP_END_DEV:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case LEAPRAID_DEVTYP_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case LEAPRAID_DEVTYP_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+
+ leapraid_set_identify_protocol_flags(dev_info, identify);
+
+ return 0;
+}
+
+static void leapraid_transport_exp_set_edev(struct leapraid_adapter *adapter,
+ void *data_out,
+ struct sas_expander_device *edev)
+{
+ struct leapraid_smp_passthrough_rep *smp_passthrough_rep;
+ struct leapraid_rep_manu_reply *rep_manu_reply;
+ u8 *component_id;
+ ssize_t __maybe_unused ret;
+
+ smp_passthrough_rep =
+ (void *)(&adapter->driver_cmds.transport_cmd.reply);
+ if (le16_to_cpu(smp_passthrough_rep->resp_data_len) !=
+ sizeof(struct leapraid_rep_manu_reply))
+ return;
+
+ rep_manu_reply = data_out + sizeof(struct leapraid_rep_manu_request);
+ ret = strscpy(edev->vendor_id, rep_manu_reply->vendor_identification,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ ret = strscpy(edev->product_id, rep_manu_reply->product_identification,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ ret = strscpy(edev->product_rev,
+ rep_manu_reply->product_revision_level,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = rep_manu_reply->sas_format & 1;
+ if (edev->level) {
+ ret = strscpy(edev->component_vendor_id,
+ rep_manu_reply->component_vendor_identification,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+
+ component_id = (u8 *)&rep_manu_reply->component_id;
+ edev->component_id = component_id[0] << 8 | component_id[1];
+ edev->component_revision_id =
+ rep_manu_reply->component_revision_level;
+ }
+}
+
+static int leapraid_transport_exp_report_manu(struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct sas_expander_device *edev,
+ u8 port_id)
+{
+ struct leapraid_smp_passthrough_req *smp_passthrough_req;
+ struct leapraid_rep_manu_request *rep_manu_request;
+ dma_addr_t h2c_dma_addr;
+ dma_addr_t c2h_dma_addr;
+ bool issue_reset = false;
+ void *data_out = NULL;
+ size_t c2h_size;
+ size_t h2c_size;
+ void *psge;
+ int rc = 0;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering) {
+ return -EFAULT;
+ }
+
+ mutex_lock(&adapter->driver_cmds.transport_cmd.mutex);
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_PENDING;
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ h2c_size = sizeof(struct leapraid_rep_manu_request);
+ c2h_size = sizeof(struct leapraid_rep_manu_reply);
+ data_out = dma_alloc_coherent(&adapter->pdev->dev,
+ h2c_size + c2h_size,
+ &h2c_dma_addr,
+ GFP_ATOMIC);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rep_manu_request = data_out;
+ rep_manu_request->smp_frame_type =
+ SMP_REPORT_MANUFACTURER_INFORMATION_FRAME_TYPE;
+ rep_manu_request->function = SMP_REPORT_MANUFACTURER_INFORMATION_FUNC;
+ rep_manu_request->allocated_response_length = 0;
+ rep_manu_request->request_length = 0;
+
+ smp_passthrough_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ memset(smp_passthrough_req, 0,
+ sizeof(struct leapraid_smp_passthrough_req));
+ smp_passthrough_req->func = LEAPRAID_FUNC_SMP_PASSTHROUGH;
+ smp_passthrough_req->physical_port = port_id;
+ smp_passthrough_req->sas_address = cpu_to_le64(sas_address);
+ smp_passthrough_req->req_data_len = cpu_to_le16(h2c_size);
+ psge = &smp_passthrough_req->sgl;
+ c2h_dma_addr = h2c_dma_addr + sizeof(struct leapraid_rep_manu_request);
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size);
+
+ init_completion(&adapter->driver_cmds.transport_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.transport_cmd.done,
+ LEAPRAID_TRANSPORT_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.transport_cmd.status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: smp passthrough to exp timeout\n",
+ __func__);
+ if (!(adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_RESET))
+ issue_reset = true;
+
+ goto hard_reset;
+ }
+
+ if (adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_REPLY_VALID)
+ leapraid_transport_exp_set_edev(adapter, data_out, edev);
+
+hard_reset:
+ if (issue_reset) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+out:
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_NOT_USED;
+ if (data_out)
+ dma_free_coherent(&adapter->pdev->dev, h2c_size + c2h_size,
+ data_out, h2c_dma_addr);
+
+ mutex_unlock(&adapter->driver_cmds.transport_cmd.mutex);
+ return rc;
+}
+
+static void leapraid_transport_del_port(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port)
+{
+ dev_info(&sas_port->port->dev,
+ "remove port: sas addr=0x%016llx\n",
+ (unsigned long long)sas_port->remote_identify.sas_address);
+ switch (sas_port->remote_identify.device_type) {
+ case SAS_END_DEVICE:
+ leapraid_sas_dev_remove_by_sas_address(adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ leapraid_exp_rm(adapter, sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_transport_del_phy(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_card_phy *card_phy)
+{
+ dev_info(&card_phy->phy->dev,
+ "remove phy: sas addr=0x%016llx, phy=%d\n",
+ (unsigned long long)sas_port->remote_identify.sas_address,
+ card_phy->phy_id);
+ list_del(&card_phy->port_siblings);
+ sas_port->phys_num--;
+ sas_port_delete_phy(sas_port->port, card_phy->phy);
+ card_phy->phy_is_assigned = false;
+}
+
+static void leapraid_transport_add_phy(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_card_phy *card_phy)
+{
+ dev_info(&card_phy->phy->dev,
+ "add phy: sas addr=0x%016llx, phy=%d\n",
+ (unsigned long long)sas_port->remote_identify.sas_address,
+ card_phy->phy_id);
+ list_add_tail(&card_phy->port_siblings, &sas_port->phy_list);
+ sas_port->phys_num++;
+ sas_port_add_phy(sas_port->port, card_phy->phy);
+ card_phy->phy_is_assigned = true;
+}
+
+void leapraid_transport_attach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_phy *card_phy,
+ u64 sas_address,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_sas_port *sas_port;
+ struct leapraid_card_phy *card_phy_srch;
+
+ if (card_phy->phy_is_assigned)
+ return;
+
+ if (!card_port)
+ return;
+
+ list_for_each_entry(sas_port, &topo_node->sas_port_list, port_list) {
+ if (sas_port->remote_identify.sas_address != sas_address)
+ continue;
+
+ if (sas_port->card_port != card_port)
+ continue;
+
+ list_for_each_entry(card_phy_srch, &sas_port->phy_list,
+ port_siblings) {
+ if (card_phy_srch == card_phy)
+ return;
+ }
+ leapraid_transport_add_phy(adapter, sas_port, card_phy);
+ return;
+ }
+}
+
+void leapraid_transport_detach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_phy *target_card_phy)
+{
+ struct leapraid_sas_port *sas_port, *sas_port_next;
+ struct leapraid_card_phy *cur_card_phy;
+
+ if (!target_card_phy->phy_is_assigned)
+ return;
+
+ list_for_each_entry_safe(sas_port, sas_port_next,
+ &topo_node->sas_port_list, port_list) {
+ list_for_each_entry(cur_card_phy, &sas_port->phy_list,
+ port_siblings) {
+ if (cur_card_phy != target_card_phy)
+ continue;
+
+ if (sas_port->phys_num == 1 &&
+ !adapter->access_ctrl.shost_recovering)
+ leapraid_transport_del_port(adapter, sas_port);
+ else
+ leapraid_transport_del_phy(adapter, sas_port,
+ target_card_phy);
+ return;
+ }
+ }
+}
+
+static void leapraid_detach_phy_from_old_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ u64 sas_address,
+ struct leapraid_card_port *card_port)
+{
+ int i;
+
+ for (i = 0; i < topo_node->phys_num; i++) {
+ if (topo_node->card_phy[i].remote_identify.sas_address !=
+ sas_address ||
+ topo_node->card_phy[i].card_port != card_port)
+ continue;
+ if (topo_node->card_phy[i].phy_is_assigned)
+ leapraid_transport_detach_phy_to_port(adapter,
+ topo_node,
+ &topo_node->card_phy[i]);
+ }
+}
+
+static struct leapraid_sas_port *leapraid_prepare_sas_port(
+ struct leapraid_adapter *adapter,
+ u16 handle, u64 sas_address,
+ struct leapraid_card_port *card_port,
+ struct leapraid_topo_node **out_topo_node)
+{
+ struct leapraid_topo_node *topo_node;
+ struct leapraid_sas_port *sas_port;
+ unsigned long flags;
+
+ sas_port = kzalloc(sizeof(*sas_port), GFP_KERNEL);
+ if (!sas_port)
+ return NULL;
+
+ INIT_LIST_HEAD(&sas_port->port_list);
+ INIT_LIST_HEAD(&sas_port->phy_list);
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node = leapraid_transport_topo_node_by_sas_addr(adapter,
+ sas_address,
+ card_port);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (!topo_node) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to find parent node for sas addr 0x%016llx!\n",
+ __func__, sas_address);
+ kfree(sas_port);
+ return NULL;
+ }
+
+ if (leapraid_transport_set_identify(adapter, handle,
+ &sas_port->remote_identify)) {
+ kfree(sas_port);
+ return NULL;
+ }
+
+ if (sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ kfree(sas_port);
+ return NULL;
+ }
+
+ sas_port->card_port = card_port;
+ *out_topo_node = topo_node;
+
+ return sas_port;
+}
+
+static int leapraid_bind_phys_and_vphy(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_port *card_port,
+ struct leapraid_vphy **out_vphy)
+{
+ struct leapraid_vphy *vphy = NULL;
+ int i;
+
+ for (i = 0; i < topo_node->phys_num; i++) {
+ if (topo_node->card_phy[i].remote_identify.sas_address !=
+ sas_port->remote_identify.sas_address ||
+ topo_node->card_phy[i].card_port != card_port)
+ continue;
+
+ list_add_tail(&topo_node->card_phy[i].port_siblings,
+ &sas_port->phy_list);
+ sas_port->phys_num++;
+
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num) {
+ if (!topo_node->card_phy[i].vphy) {
+ card_port->phy_mask |= BIT(i);
+ continue;
+ }
+
+ vphy = leapraid_get_vphy_by_phy(card_port, i);
+ if (!vphy)
+ return -1;
+ }
+ }
+
+ *out_vphy = vphy;
+ return sas_port->phys_num ? 0 : -1;
+}
+
+static struct sas_rphy *leapraid_create_and_register_rphy(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_port *card_port,
+ struct leapraid_vphy *vphy)
+{
+ struct leapraid_sas_dev *sas_dev = NULL;
+ struct leapraid_card_phy *card_phy;
+ struct sas_port *port;
+ struct sas_rphy *rphy;
+
+ if (!topo_node->parent_dev)
+ return NULL;
+
+ port = sas_port_alloc_num(topo_node->parent_dev);
+ if (sas_port_add(port))
+ return NULL;
+
+ list_for_each_entry(card_phy, &sas_port->phy_list, port_siblings) {
+ sas_port_add_phy(port, card_phy->phy);
+ card_phy->phy_is_assigned = true;
+ card_phy->card_port = card_port;
+ }
+
+ if (sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter,
+ sas_port->remote_identify.sas_address,
+ card_port);
+ if (!sas_dev)
+ return NULL;
+ sas_dev->pend_sas_rphy_add = 1;
+ rphy = sas_end_device_alloc(port);
+ sas_dev->rphy = rphy;
+
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num) {
+ if (!vphy)
+ card_port->sas_address = sas_dev->sas_addr;
+ else
+ vphy->sas_address = sas_dev->sas_addr;
+ }
+
+ } else {
+ rphy = sas_expander_alloc(port,
+ sas_port->remote_identify.device_type);
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num)
+ card_port->sas_address =
+ sas_port->remote_identify.sas_address;
+ }
+
+ rphy->identify = sas_port->remote_identify;
+
+ if (sas_rphy_add(rphy))
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to add rphy\n", __func__);
+
+ if (sas_dev) {
+ sas_dev->pend_sas_rphy_add = 0;
+ leapraid_sdev_put(sas_dev);
+ }
+
+ sas_port->port = port;
+ return rphy;
+}
+
+struct leapraid_sas_port *leapraid_transport_port_add(
+ struct leapraid_adapter *adapter,
+ u16 hdl, u64 sas_address,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_card_phy *card_phy, *card_phy_next;
+ struct leapraid_topo_node *topo_node = NULL;
+ struct leapraid_sas_port *sas_port = NULL;
+ struct leapraid_vphy *vphy = NULL;
+ struct sas_rphy *rphy = NULL;
+ unsigned long flags;
+
+ if (!card_port)
+ return NULL;
+
+ sas_port = leapraid_prepare_sas_port(adapter, hdl, sas_address,
+ card_port, &topo_node);
+ if (!sas_port)
+ return NULL;
+
+ leapraid_detach_phy_from_old_port(adapter,
+ topo_node,
+ sas_port->remote_identify.sas_address,
+ card_port);
+
+ if (leapraid_bind_phys_and_vphy(adapter, sas_port, topo_node,
+ card_port, &vphy))
+ goto out_fail;
+
+ rphy = leapraid_create_and_register_rphy(adapter, sas_port, topo_node,
+ card_port, vphy);
+ if (!rphy)
+ goto out_fail;
+
+ dev_info(&rphy->dev,
+ "%s: added dev: hdl=0x%04x, sas addr=0x%016llx\n",
+ __func__, hdl,
+ (unsigned long long)sas_port->remote_identify.sas_address);
+
+ sas_port->rphy = rphy;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_add_tail(&sas_port->port_list, &topo_node->sas_port_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (sas_port->remote_identify.device_type ==
+ LEAPRAID_DEVTYP_EDGE_EXPANDER ||
+ sas_port->remote_identify.device_type ==
+ LEAPRAID_DEVTYP_FANOUT_EXPANDER)
+ leapraid_transport_exp_report_manu(adapter,
+ sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy),
+ card_port->port_id);
+
+ return sas_port;
+
+out_fail:
+ list_for_each_entry_safe(card_phy, card_phy_next,
+ &sas_port->phy_list, port_siblings)
+ list_del(&card_phy->port_siblings);
+ kfree(sas_port);
+ return NULL;
+}
+
+static struct leapraid_sas_port *leapraid_find_and_remove_sas_port(
+ struct leapraid_topo_node *topo_node,
+ u64 sas_address,
+ struct leapraid_card_port *remove_card_port,
+ bool *found)
+{
+ struct leapraid_sas_port *sas_port, *sas_port_next;
+
+ list_for_each_entry_safe(sas_port, sas_port_next,
+ &topo_node->sas_port_list, port_list) {
+ if (sas_port->remote_identify.sas_address != sas_address)
+ continue;
+
+ if (sas_port->card_port != remove_card_port)
+ continue;
+
+ *found = true;
+ list_del(&sas_port->port_list);
+ return sas_port;
+ }
+ return NULL;
+}
+
+static void leapraid_cleanup_card_port_and_vphys(
+ struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct leapraid_card_port *remove_card_port)
+{
+ struct leapraid_card_port *card_port, *card_port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+
+ if (remove_card_port->vphys_mask) {
+ list_for_each_entry_safe(vphy, vphy_next,
+ &remove_card_port->vphys_list, list) {
+ if (vphy->sas_address != sas_address)
+ continue;
+
+ dev_info(&adapter->pdev->dev,
+ "%s: remove vphy: %p from port: %p, port_id=%d\n",
+ __func__, vphy, remove_card_port,
+ remove_card_port->port_id);
+
+ remove_card_port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+
+ if (!remove_card_port->vphys_mask &&
+ !remove_card_port->sas_address) {
+ dev_info(&adapter->pdev->dev,
+ "%s: remove empty hba_port: %p, port_id=%d\n",
+ __func__,
+ remove_card_port,
+ remove_card_port->port_id);
+ list_del(&remove_card_port->list);
+ kfree(remove_card_port);
+ remove_card_port = NULL;
+ }
+ }
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (card_port != remove_card_port)
+ continue;
+
+ if (card_port->sas_address != sas_address)
+ continue;
+
+ if (!remove_card_port->vphys_mask) {
+ dev_info(&adapter->pdev->dev,
+ "%s: remove hba_port: %p, port_id=%d\n",
+ __func__, card_port, card_port->port_id);
+ list_del(&card_port->list);
+ kfree(card_port);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: clear sas_address of hba_port: %p, port_id=%d\n",
+ __func__, card_port, card_port->port_id);
+ remove_card_port->sas_address = 0;
+ }
+ break;
+ }
+}
+
+static void leapraid_clear_topo_node_phys(struct leapraid_topo_node *topo_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < topo_node->phys_num; i++) {
+ if (topo_node->card_phy[i].remote_identify.sas_address ==
+ sas_address) {
+ memset(&topo_node->card_phy[i].remote_identify, 0,
+ sizeof(struct sas_identify));
+ topo_node->card_phy[i].vphy = false;
+ }
+ }
+}
+
+void leapraid_transport_port_remove(struct leapraid_adapter *adapter,
+ u64 sas_address, u64 sas_address_parent,
+ struct leapraid_card_port *remove_card_port)
+{
+ struct leapraid_card_phy *card_phy, *card_phy_next;
+ struct leapraid_sas_port *sas_port = NULL;
+ struct leapraid_topo_node *topo_node;
+ unsigned long flags;
+ bool found = false;
+
+ if (!remove_card_port)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+
+ topo_node = leapraid_transport_topo_node_by_sas_addr(adapter,
+ sas_address_parent,
+ remove_card_port);
+ if (!topo_node) {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return;
+ }
+
+ sas_port = leapraid_find_and_remove_sas_port(topo_node, sas_address,
+ remove_card_port, &found);
+
+ if (!found) {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return;
+ }
+
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num &&
+ adapter->adapter_attr.enable_mp)
+ leapraid_cleanup_card_port_and_vphys(adapter, sas_address,
+ remove_card_port);
+
+ leapraid_clear_topo_node_phys(topo_node, sas_address);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ list_for_each_entry_safe(card_phy, card_phy_next,
+ &sas_port->phy_list, port_siblings) {
+ card_phy->phy_is_assigned = false;
+ if (!adapter->access_ctrl.host_removing)
+ sas_port_delete_phy(sas_port->port, card_phy->phy);
+
+ list_del(&card_phy->port_siblings);
+ }
+
+ if (!adapter->access_ctrl.host_removing)
+ sas_port_delete(sas_port->port);
+
+ dev_info(&adapter->pdev->dev,
+ "%s: removed sas_port for sas addr=0x%016llx\n",
+ __func__, (unsigned long long)sas_address);
+
+ kfree(sas_port);
+}
+
+static void leapraid_init_sas_or_exp_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct sas_phy *phy,
+ struct leapraid_sas_phy_p0 *phy_pg0,
+ struct leapraid_exp_p1 *exp_pg1)
+{
+ if (exp_pg1 && phy_pg0)
+ return;
+
+ if (!exp_pg1 && !phy_pg0)
+ return;
+
+ phy->identify = card_phy->identify;
+ phy->identify.phy_identifier = card_phy->phy_id;
+ phy->negotiated_linkrate = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->neg_link_rate &
+ LEAPRAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->neg_link_rate &
+ LEAPRAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->hw_link_rate &
+ LEAPRAID_SAS_HWRATE_MIN_RATE_MASK) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->hw_link_rate &
+ LEAPRAID_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->hw_link_rate >> 4) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->hw_link_rate >> 4);
+ phy->minimum_linkrate = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->p_link_rate &
+ LEAPRAID_SAS_PRATE_MIN_RATE_MASK) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->p_link_rate &
+ LEAPRAID_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->p_link_rate >> 4) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->p_link_rate >> 4);
+ phy->hostdata = card_phy->card_port;
+}
+
+void leapraid_transport_add_card_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_sas_phy_p0 *phy_pg0,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+
+ INIT_LIST_HEAD(&card_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, card_phy->phy_id);
+ if (!phy) {
+ dev_err(&adapter->pdev->dev,
+ "%s sas_phy_alloc failed!\n", __func__);
+ return;
+ }
+
+ if ((leapraid_transport_set_identify(adapter, card_phy->hdl,
+ &card_phy->identify))) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy handle identify failed!\n", __func__);
+ sas_phy_free(phy);
+ return;
+ }
+
+ card_phy->attached_hdl = le16_to_cpu(phy_pg0->attached_dev_hdl);
+ if (card_phy->attached_hdl) {
+ if (leapraid_transport_set_identify(adapter,
+ card_phy->attached_hdl,
+ &card_phy->remote_identify)) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy attached handle identify failed!\n",
+ __func__);
+ sas_phy_free(phy);
+ return;
+ }
+ }
+
+ leapraid_init_sas_or_exp_phy(adapter, card_phy, phy, phy_pg0, NULL);
+
+ if ((sas_phy_add(phy))) {
+ sas_phy_free(phy);
+ return;
+ }
+
+ card_phy->phy = phy;
+}
+
+int leapraid_transport_add_exp_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_exp_p1 *exp_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+
+ INIT_LIST_HEAD(&card_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, card_phy->phy_id);
+ if (!phy) {
+ dev_err(&adapter->pdev->dev,
+ "%s sas_phy_alloc failed!\n", __func__);
+ return -EFAULT;
+ }
+
+ if ((leapraid_transport_set_identify(adapter, card_phy->hdl,
+ &card_phy->identify))) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy hdl identify failed!\n", __func__);
+ sas_phy_free(phy);
+ return -EFAULT;
+ }
+
+ card_phy->attached_hdl = le16_to_cpu(exp_pg1->attached_dev_hdl);
+ if (card_phy->attached_hdl) {
+ if (leapraid_transport_set_identify(adapter,
+ card_phy->attached_hdl,
+ &card_phy->remote_identify)) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy attached hdl identify failed!\n",
+ __func__);
+ sas_phy_free(phy);
+ }
+ }
+
+ leapraid_init_sas_or_exp_phy(adapter, card_phy, phy, NULL, exp_pg1);
+
+ if ((sas_phy_add(phy))) {
+ sas_phy_free(phy);
+ return -EFAULT;
+ }
+
+ card_phy->phy = phy;
+ return 0;
+}
+
+void leapraid_transport_update_links(struct leapraid_adapter *adapter,
+ u64 sas_address, u16 hdl, u8 phy_index,
+ u8 link_rate, struct leapraid_card_port *target_card_port)
+{
+ struct leapraid_topo_node *topo_node;
+ struct leapraid_card_phy *card_phy;
+ struct leapraid_card_port *card_port = NULL;
+ unsigned long flags;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node = leapraid_transport_topo_node_by_sas_addr(adapter,
+ sas_address,
+ target_card_port);
+ if (!topo_node) {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return;
+ }
+
+ card_phy = &topo_node->card_phy[phy_index];
+ card_phy->attached_hdl = hdl;
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (hdl && link_rate >= LEAPRAID_SAS_NEG_LINK_RATE_1_5) {
+ leapraid_transport_set_identify(adapter, hdl,
+ &card_phy->remote_identify);
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num &&
+ adapter->adapter_attr.enable_mp) {
+ list_for_each_entry(card_port,
+ &adapter->dev_topo.card_port_list,
+ list) {
+ if (card_port->sas_address == sas_address &&
+ card_port == target_card_port)
+ card_port->phy_mask |=
+ BIT(card_phy->phy_id);
+ }
+ }
+ leapraid_transport_attach_phy_to_port(adapter, topo_node,
+ card_phy,
+ card_phy->remote_identify.sas_address,
+ target_card_port);
+ } else {
+ memset(&card_phy->remote_identify, 0,
+ sizeof(struct sas_identify));
+ }
+
+ if (card_phy->phy)
+ card_phy->phy->negotiated_linkrate =
+ leapraid_transport_convert_phy_link_rate(link_rate);
+}
+
+static int leapraid_dma_map_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t *dma_addr,
+ size_t *dma_len, void **p)
+{
+ if (buf->sg_cnt > 1) {
+ *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+ GFP_KERNEL);
+ if (!*p)
+ return -ENOMEM;
+
+ *dma_len = buf->payload_len;
+ } else {
+ if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+ return -ENOMEM;
+
+ *dma_addr = sg_dma_address(buf->sg_list);
+ *dma_len = sg_dma_len(buf->sg_list);
+ *p = NULL;
+ }
+ return 0;
+}
+
+static void leapraid_dma_unmap_buffer(struct device *dev,
+ struct bsg_buffer *buf,
+ dma_addr_t dma_addr,
+ void *p)
+{
+ if (p)
+ dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+ else
+ dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
+static void leapraid_build_smp_task(struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size)
+{
+ struct leapraid_smp_passthrough_req *smp_passthrough_req;
+ void *psge;
+
+ smp_passthrough_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ memset(smp_passthrough_req, 0, sizeof(*smp_passthrough_req));
+
+ smp_passthrough_req->func = LEAPRAID_FUNC_SMP_PASSTHROUGH;
+ smp_passthrough_req->physical_port =
+ leapraid_transport_get_port_id_by_rphy(adapter, rphy);
+ smp_passthrough_req->sas_address = (rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(adapter->dev_topo.card.sas_address);
+ smp_passthrough_req->req_data_len =
+ cpu_to_le16(h2c_size - LEAPRAID_SMP_FRAME_HEADER_SIZE);
+ psge = &smp_passthrough_req->sgl;
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr,
+ h2c_size - LEAPRAID_SMP_FRAME_HEADER_SIZE,
+ c2h_dma_addr,
+ c2h_size - LEAPRAID_SMP_FRAME_HEADER_SIZE);
+}
+
+static int leapraid_send_smp_req(struct leapraid_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev,
+ "%s: sending smp request\n", __func__);
+ init_completion(&adapter->driver_cmds.transport_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.transport_cmd.done,
+ LEAPRAID_TRANSPORT_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.transport_cmd.status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev, "%s: timeout\n", __func__);
+ if (!(adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_RESET)) {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ return -ETIMEDOUT;
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "%s: smp request complete\n", __func__);
+ if (!(adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_REPLY_VALID)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: smp request no reply\n", __func__);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void leapraid_handle_smp_rep(struct leapraid_adapter *adapter,
+ struct bsg_job *job, void *addr_in,
+ unsigned int *reslen)
+{
+ struct leapraid_smp_passthrough_rep *smp_passthrough_rep;
+
+ smp_passthrough_rep =
+ (void *)(&adapter->driver_cmds.transport_cmd.reply);
+
+ dev_info(&adapter->pdev->dev, "%s: response data len=%d\n",
+ __func__, le16_to_cpu(smp_passthrough_rep->resp_data_len));
+
+ memcpy(job->reply, smp_passthrough_rep, sizeof(*smp_passthrough_rep));
+ job->reply_len = sizeof(*smp_passthrough_rep);
+ *reslen = le16_to_cpu(smp_passthrough_rep->resp_data_len);
+
+ if (addr_in)
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, addr_in,
+ job->reply_payload.payload_len);
+}
+
+static void leapraid_transport_smp_handler(struct bsg_job *job,
+ struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ dma_addr_t c2h_dma_addr;
+ dma_addr_t h2c_dma_addr;
+ void *addr_in = NULL;
+ void *addr_out = NULL;
+ size_t c2h_size;
+ size_t h2c_size;
+ int rc;
+ unsigned int reslen = 0;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering) {
+ rc = -EFAULT;
+ goto done;
+ }
+
+ rc = mutex_lock_interruptible(&adapter->driver_cmds.transport_cmd.mutex);
+ if (rc)
+ goto done;
+
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_PENDING;
+ rc = leapraid_dma_map_buffer(&adapter->pdev->dev,
+ &job->request_payload,
+ &h2c_dma_addr, &h2c_size, &addr_out);
+ if (rc)
+ goto release_lock;
+
+ if (addr_out)
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, addr_out,
+ job->request_payload.payload_len);
+
+ rc = leapraid_dma_map_buffer(&adapter->pdev->dev, &job->reply_payload,
+ &c2h_dma_addr, &c2h_size, &addr_in);
+ if (rc)
+ goto free_req_buf;
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto free_rep_buf;
+
+ leapraid_build_smp_task(adapter, rphy, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+
+ rc = leapraid_send_smp_req(adapter);
+ if (rc)
+ goto free_rep_buf;
+
+ leapraid_handle_smp_rep(adapter, job, addr_in, &reslen);
+
+free_rep_buf:
+ leapraid_dma_unmap_buffer(&adapter->pdev->dev, &job->reply_payload,
+ c2h_dma_addr, addr_in);
+free_req_buf:
+ leapraid_dma_unmap_buffer(&adapter->pdev->dev, &job->request_payload,
+ h2c_dma_addr, addr_out);
+release_lock:
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.transport_cmd.mutex);
+done:
+ bsg_job_done(job, rc, reslen);
+}
+
+struct sas_function_template leapraid_transport_functions = {
+ .smp_handler = leapraid_transport_smp_handler,
+};
+
+struct scsi_transport_template *leapraid_transport_template;
--
2.25.1
2
1
[PATCH OLK-6.6] crypto: af_alg - zero initialize memory allocated via sock_kmalloc
by Yi Yang 22 Jan '26
by Yi Yang 22 Jan '26
22 Jan '26
From: Shivani Agarwal <shivani.agarwal(a)broadcom.com>
stable inclusion
from stable-v6.6.120
commit 84238876e3b3b262cf62d5f4d1338e983fb27010
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13428
CVE: CVE-2025-71113
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 6f6e309328d53a10c0fe1f77dec2db73373179b6 upstream.
Several crypto user API contexts and requests allocated with
sock_kmalloc() were left uninitialized, relying on callers to
set fields explicitly. This resulted in the use of uninitialized
data in certain error paths or when new fields are added in the
future.
The ACVP patches also contain two user-space interface files:
algif_kpp.c and algif_akcipher.c. These too rely on proper
initialization of their context structures.
A particular issue has been observed with the newly added
'inflight' variable introduced in af_alg_ctx by commit:
67b164a871af ("crypto: af_alg - Disallow multiple in-flight AIO requests")
Because the context is not memset to zero after allocation,
the inflight variable has contained garbage values. As a result,
af_alg_alloc_areq() has incorrectly returned -EBUSY randomly when
the garbage value was interpreted as true:
https://github.com/gregkh/linux/blame/master/crypto/af_alg.c#L1209
The check directly tests ctx->inflight without explicitly
comparing against true/false. Since inflight is only ever set to
true or false later, an uninitialized value has triggered
-EBUSY failures. Zero-initializing memory allocated with
sock_kmalloc() ensures inflight and other fields start in a known
state, removing random issues caused by uninitialized data.
Fixes: fe869cdb89c9 ("crypto: algif_hash - User-space interface for hash operations")
Fixes: 5afdfd22e6ba ("crypto: algif_rng - add random number generator support")
Fixes: 2d97591ef43d ("crypto: af_alg - consolidation of duplicate code")
Fixes: 67b164a871af ("crypto: af_alg - Disallow multiple in-flight AIO requests")
Cc: stable(a)vger.kernel.org
Signed-off-by: Shivani Agarwal <shivani.agarwal(a)broadcom.com>
Signed-off-by: Herbert Xu <herbert(a)gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yi Yang <yiyang13(a)huawei.com>
---
crypto/af_alg.c | 5 ++---
crypto/algif_hash.c | 3 +--
crypto/algif_rng.c | 3 +--
3 files changed, 4 insertions(+), 7 deletions(-)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 886eccb97b04..3d0b7542f771 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1211,15 +1211,14 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
if (unlikely(!areq))
return ERR_PTR(-ENOMEM);
+ memset(areq, 0, areqlen);
+
ctx->inflight = true;
areq->areqlen = areqlen;
areq->sk = sk;
areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
- areq->last_rsgl = NULL;
INIT_LIST_HEAD(&areq->rsgl_list);
- areq->tsgl = NULL;
- areq->tsgl_entries = 0;
return areq;
}
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 5ab7441734b8..265cabcb95e0 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -416,9 +416,8 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
- ctx->result = NULL;
+ memset(ctx, 0, len);
ctx->len = len;
- ctx->more = false;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 10c41adac3b1..1a86e40c8372 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -248,9 +248,8 @@ static int rng_accept_parent(void *private, struct sock *sk)
if (!ctx)
return -ENOMEM;
+ memset(ctx, 0, len);
ctx->len = len;
- ctx->addtl = NULL;
- ctx->addtl_len = 0;
/*
* No seeding done at that point -- if multiple accepts are
--
2.25.1
2
1
Add new oenetcls features
Yue Haibing (9):
net/oenetcls: Balancing softirq to improve performance
net/oenetcls: Add mode 2 for rps numa affinity
net/oenetcls: Prioritize oenetcls hooks over rps
net/oenetcls: Fix possible hash collision issue
net/oenetcls: Add local flow NUMA-aware rps
net/oenetcls: Supports rxq multiplexing
net/oenetcls: Support ipv6 for ntuple mode
net/oenetcls: Make OENETCLS default as module
net/oenetcls: Add rps policy switch for phy NIC
include/linux/oenetcls.h | 66 +++++--
include/linux/skbuff.h | 4 +
include/net/sock.h | 4 +
net/core/dev.c | 32 ++--
net/core/sock.c | 3 +
net/ipv4/tcp.c | 5 +-
net/oenetcls/Kconfig | 10 +-
net/oenetcls/oenetcls.h | 30 +++-
net/oenetcls/oenetcls_flow.c | 305 +++++++++++++++++++++++++++------
net/oenetcls/oenetcls_main.c | 193 ++++++++++++++++-----
net/oenetcls/oenetcls_ntuple.c | 255 ++++++++++++++++-----------
11 files changed, 689 insertions(+), 218 deletions(-)
--
2.34.1
2
10
Use the is_leap_year() helper from rtc.h instead of
writing it by hand
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
kernel/time/time_test.c | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c
index 2889763165e5..7c2fb5f775eb 100644
--- a/kernel/time/time_test.c
+++ b/kernel/time/time_test.c
@@ -2,14 +2,7 @@
#include <kunit/test.h>
#include <linux/time.h>
-
-/*
- * Traditional implementation of leap year evaluation.
- */
-static bool is_leap(long year)
-{
- return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
-}
+#include <linux/rtc.h>
/*
* Gets the last day of a month.
@@ -17,7 +10,7 @@ static bool is_leap(long year)
static int last_day_of_month(long year, int month)
{
if (month == 2)
- return 28 + is_leap(year);
+ return 28 + is_leap_year(year);
if (month == 4 || month == 6 || month == 9 || month == 11)
return 30;
return 31;
--
2.34.1
1
0
[PATCH OLK-6.6] sched/fair: Track idle balance interval with idle_stamp in balance_fair
by Chen Jinghuang 22 Jan '26
by Chen Jinghuang 22 Jan '26
22 Jan '26
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8PIYZ
--------------------------------
Fix commit 70769fe636ef ("sched/fair: Hoist idle_stamp up from
idle_balance")forgot to add the complete idle_stamp start/end
tracking interval around sched_balance_newidle() in balance_fair.
Fixes: 70769fe636ef ("sched/fair: Hoist idle_stamp up from idle_balance")
Signed-off-by: Chen Jinghuang <chenjinghuang2(a)huawei.com>
---
kernel/sched/fair.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b21c1ba1ded1..e60f19cb0fee 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9467,10 +9467,19 @@ static void task_dead_fair(struct task_struct *p)
static int
balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
+ int new_tasks;
+
if (rq->nr_running)
return 1;
- return newidle_balance(rq, rf) != 0;
+ rq_idle_stamp_update(rq);
+
+ new_tasks = newidle_balance(rq, rf);
+
+ if (new_tasks)
+ rq_idle_stamp_clear(rq);
+
+ return new_tasks != 0;
}
#endif /* CONFIG_SMP */
--
2.34.1
2
1
*** fix CVE-2025-37800 ***
Dmitry Torokhov (1):
[Backport] driver core: fix potential NULL pointer dereference in
dev_uevent()
Lin Ruifeng (1):
[Huawei] Revert "Revert "Revert "driver core: Fix uevent_show() vs
driver detach race"""
drivers/base/base.h | 13 ++++++++++++-
drivers/base/bus.c | 2 +-
drivers/base/core.c | 42 ++++++++++++++++++++++++++++++++++--------
drivers/base/module.c | 4 ----
4 files changed, 47 insertions(+), 14 deletions(-)
--
2.43.0
2
3
[PATCH OLK-5.10] [Backport] drm/amd/pm/powerplay/hwmgr/smu7_thermal: Prevent division by zero
by Lin Ruifeng 22 Jan '26
by Lin Ruifeng 22 Jan '26
22 Jan '26
From: Denis Arefev <arefev(a)swemel.ru>
stable inclusion
from stable-v5.10.237
commit 3cdd02cb70682d7d205ca6dc02a4d1eb76758d24
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/10440
CVE: CVE-2025-37768
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 7c246a05df51c52fe0852ce56ba10c41e6ed1f39 upstream.
The user can set any speed value.
If speed is greater than UINT_MAX/8, division by zero is possible.
Found by Linux Verification Center (linuxtesting.org) with SVACE.
Fixes: c52dcf49195d ("drm/amd/pp: Avoid divide-by-zero in fan_ctrl_set_fan_speed_rpm")
Signed-off-by: Denis Arefev <arefev(a)swemel.ru>
Signed-off-by: Alex Deucher <alexander.deucher(a)amd.com>
Cc: stable(a)vger.kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com>
---
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
index 0b30f73649a8..49f97d612421 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
@@ -261,10 +261,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0) ||
- speed == 0 ||
+ (!speed || speed > UINT_MAX/8) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
- return 0;
+ return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
--
2.43.0
2
1
[PATCH OLK-5.10] [Backport] scsi: qla2xxx: Fix premature hw access after PCI error
by Lin Ruifeng 22 Jan '26
by Lin Ruifeng 22 Jan '26
22 Jan '26
From: Quinn Tran <qutran(a)marvell.com>
stable inclusion
from stable-v5.15.33
commit 5ef6319f9882699613d5182fbd7929b017e8c5ab
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/296
CVE: CVE-2022-49157
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit e35920ab7874d5e2faeb4f958a74bfa793f1ce5a upstream.
After a recoverable PCI error has been detected and recovered, qla driver
needs to check to see if the error condition still persist and/or wait
for the OS to give the resume signal.
Sep 8 22:26:03 localhost kernel: WARNING: CPU: 9 PID: 124606 at qla_tmpl.c:440
qla27xx_fwdt_entry_t266+0x55/0x60 [qla2xxx]
Sep 8 22:26:03 localhost kernel: RIP: 0010:qla27xx_fwdt_entry_t266+0x55/0x60
[qla2xxx]
Sep 8 22:26:03 localhost kernel: Call Trace:
Sep 8 22:26:03 localhost kernel: ? qla27xx_walk_template+0xb1/0x1b0 [qla2xxx]
Sep 8 22:26:03 localhost kernel: ? qla27xx_execute_fwdt_template+0x12a/0x160
[qla2xxx]
Sep 8 22:26:03 localhost kernel: ? qla27xx_fwdump+0xa0/0x1c0 [qla2xxx]
Sep 8 22:26:03 localhost kernel: ? qla2xxx_pci_mmio_enabled+0xfb/0x120
[qla2xxx]
Sep 8 22:26:03 localhost kernel: ? report_mmio_enabled+0x44/0x80
Sep 8 22:26:03 localhost kernel: ? report_slot_reset+0x80/0x80
Sep 8 22:26:03 localhost kernel: ? pci_walk_bus+0x70/0x90
Sep 8 22:26:03 localhost kernel: ? aer_dev_correctable_show+0xc0/0xc0
Sep 8 22:26:03 localhost kernel: ? pcie_do_recovery+0x1bb/0x240
Sep 8 22:26:03 localhost kernel: ? aer_recover_work_func+0xaa/0xd0
Sep 8 22:26:03 localhost kernel: ? process_one_work+0x1a7/0x360
..
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-8041:22: detected PCI
disconnect.
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-107ff:22:
qla27xx_fwdt_entry_t262: dump ram MB failed. Area 5h start 198013h end 198013h
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-107ff:22: Unable to
capture FW dump
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-1015:22: cmd=0x0,
waited 5221 msecs
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-680d:22: mmio
enabled returning.
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-d04c:22: MBX
Command timeout for cmd 0, iocontrol=ffffffff jiffies=10140f2e5
mb[0-3]=[0xffff 0xffff 0xffff 0xffff]
Link: https://lore.kernel.org/r/20220110050218.3958-6-njavali@marvell.com
Cc: stable(a)vger.kernel.org
Reviewed-by: Himanshu Madhani <himanshu.madhani(a)oracle.com>
Signed-off-by: Quinn Tran <qutran(a)marvell.com>
Signed-off-by: Nilesh Javali <njavali(a)marvell.com>
Signed-off-by: Martin K. Petersen <martin.petersen(a)oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com>
---
drivers/scsi/qla2xxx/qla_os.c | 10 +++++++++-
drivers/scsi/qla2xxx/qla_tmpl.c | 9 +++++++--
2 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2775262a57c5..50d96055fa9b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -7479,7 +7479,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) {
case pci_channel_io_normal:
- ha->flags.eeh_busy = 0;
+ qla_pci_set_eeh_busy(vha);
if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -7520,9 +7520,16 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
"mmio enabled\n");
ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
+
if (IS_QLA82XX(ha))
return PCI_ERS_RESULT_RECOVERED;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, base_vha, 0x803f,
+ "During mmio enabled, PCI/Register disconnect still detected.\n");
+ goto out;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2100(ha) || IS_QLA2200(ha)){
stat = rd_reg_word(®->hccr);
@@ -7544,6 +7551,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
"RISC paused -- mmio_enabled, Dumping firmware.\n");
qla2xxx_dump_fw(base_vha);
}
+out:
/* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
ql_dbg(ql_dbg_aer, base_vha, 0x600d,
"mmio enabled returning.\n");
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 26c13a953b97..b0a74b036cf4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -435,8 +435,13 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_misc, vha, 0xd20a,
"%s: reset risc [%lx]\n", __func__, *len);
- if (buf)
- WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
+ if (buf) {
+ if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0x5001,
+ "%s: unable to soft reset\n", __func__);
+ return INVALID_ENTRY;
+ }
+ }
return qla27xx_next_entry(ent);
}
--
2.43.0
2
1
[PATCH OLK-6.6] [Backport] ASoC: SOF: Intel: hda-dai: Ensure DAI widget is valid during params
by Lin Ruifeng 22 Jan '26
by Lin Ruifeng 22 Jan '26
22 Jan '26
From: Bard Liao <yung-chuan.liao(a)linux.intel.com>
stable inclusion
from stable-v6.12.14
commit e012a77e4d7632cf615ba9625b1600ed8985c3b5
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/30
CVE: CVE-2024-58012
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 569922b82ca660f8b24e705f6cf674e6b1f99cc7 ]
Each cpu DAI should associate with a widget. However, the topology might
not create the right number of DAI widgets for aggregated amps. And it
will cause NULL pointer deference.
Check that the DAI widget associated with the CPU DAI is valid to prevent
NULL pointer deference due to missing DAI widgets in topologies with
aggregated amps.
Signed-off-by: Bard Liao <yung-chuan.liao(a)linux.intel.com>
Reviewed-by: Ranjani Sridharan <ranjani.sridharan(a)linux.intel.com>
Reviewed-by: Péter Ujfalusi <peter.ujfalusi(a)linux.intel.com>
Reviewed-by: Liam Girdwood <liam.r.girdwood(a)intel.com>
Link: https://patch.msgid.link/20241203104853.56956-1-yung-chuan.liao@linux.intel…
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Conflicts:
sound/soc/sof/intel/hda-dai.c
sound/soc/sof/intel/hda.c
Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com>
---
sound/soc/sof/intel/hda-dai.c | 6 ++++++
sound/soc/sof/intel/hda.c | 5 +++++
2 files changed, 11 insertions(+)
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 19ec1a45737e..cebd9db94215 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -439,6 +439,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_sof_dev *sdev;
int ret;
+ if (!w) {
+ dev_err(cpu_dai->dev, "%s widget not found, check amp link num in the topology\n",
+ cpu_dai->name);
+ return -EINVAL;
+ }
+
ret = non_hda_dai_hw_params(substream, params, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "%s: non_hda_dai_hw_params failed %d\n", __func__, ret);
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 15e6779efaa3..aa3f9c961173 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -102,6 +102,11 @@ static int sdw_params_stream(struct device *dev,
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
struct snd_sof_dai_config_data data = { 0 };
+ if (!w) {
+ dev_err(dev, "%s widget not found, check amp link num in the topology\n",
+ d->name);
+ return -EINVAL;
+ }
data.dai_index = (params_data->link_id << 8) | d->id;
data.dai_data = params_data->alh_stream_id;
--
2.43.0
2
1
22 Jan '26
From: Junhao He <hejunhao3(a)h-partners.com>
driver inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8377
---------------------------------------------------------------------------
The do_sea() function defaults to using firmware-first mode, if supported.
It invoke acpi/apei/ghes ghes_notify_sea() to report and handling the SEA
error, The GHES uses a buffer to cache the most recent 4 kinds of SEA
errors. If the same kind SEA error occurs again, GHES will skip to
reporting this SEA error and will not add it to the "ghes_estatus_llist"
list until the cache times out after 10 seconds, at which point the SEA
error will be reprocessed.
The GHES invoke ghes_proc_in_irq() to handle the SEA error, which
ultimately executes memory_failure to process the page with hardware
memory corruption. If the same SEA error appears multiple times
consecutively, it indicates that the previous handling was incomplete or
unable to resolve the fault. In such cases, it is more appropriate to
return a failure when encountering the same error again, and then proceed
to arm64_do_kernel_sea for further processing.
When hardware memory corruption occurs, a memory error interrupt is
triggered. If the kernel accesses this erroneous data, it will trigget
the SEA error exception handler. All such handlers will call
memory_failure() to handle the faulty page.
If a memory error interrupt occurs first, followed by an SEA error
interrupt, the faulty page is first marked as poisoned by the memory error
interrupt process, and then the SEA error interrupt handling process will
send a SIGBUS signel to the process accessing the poisoned page.
However, if the SEA interrupt is reported first, the following exceptional
scenario occurs:
When a user process directly requests and accesses a page with hardware
memory corruption via mmap (such as with devmem), the page containing this
address may still be in a free buddy state in the kernel. At this point,
the page is marked as "poisoned" during the SEA claim memory_failure().
However, since the process does not request the page through the kernel's
MMU, the kernel cannot send SIGBUS signel to the processes. And the memory
error interrupt handling process not support send SIGBUS signel. As a
result, these processes continues to access the faulty page, causing
repeated entries into the SEA exception handler. At this time, it lead to
an SEA error interrupt storm.
The following error logs is explained using the devmem process:
NOTICE: SEA Handle
NOTICE: SpsrEl3 = 0x60001000, ELR_EL3 = 0xffffc6ab42671400
NOTICE: skt[0x0]die[0x0]cluster[0x0]core[0x1]
NOTICE: EsrEl3 = 0x92000410
NOTICE: PA is valid: 0x1000093c00
NOTICE: Hest Set GenericError Data
[ 1419.542401][ C1] {57}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 9
[ 1419.551435][ C1] {57}[Hardware Error]: event severity: recoverable
[ 1419.557865][ C1] {57}[Hardware Error]: Error 0, type: recoverable
[ 1419.564295][ C1] {57}[Hardware Error]: section_type: ARM processor error
[ 1419.571421][ C1] {57}[Hardware Error]: MIDR: 0x0000000000000000
[ 1419.571434][ C1] {57}[Hardware Error]: Multiprocessor Affinity Register (MPIDR): 0x0000000081000100
[ 1419.586813][ C1] {57}[Hardware Error]: error affinity level: 0
[ 1419.586821][ C1] {57}[Hardware Error]: running state: 0x1
[ 1419.602714][ C1] {57}[Hardware Error]: Power State Coordination Interface state: 0
[ 1419.602724][ C1] {57}[Hardware Error]: Error info structure 0:
[ 1419.614797][ C1] {57}[Hardware Error]: num errors: 1
[ 1419.614804][ C1] {57}[Hardware Error]: error_type: 0, cache error
[ 1419.629226][ C1] {57}[Hardware Error]: error_info: 0x0000000020400014
[ 1419.629234][ C1] {57}[Hardware Error]: cache level: 1
[ 1419.642006][ C1] {57}[Hardware Error]: the error has not been corrected
[ 1419.642013][ C1] {57}[Hardware Error]: physical fault address: 0x0000001000093c00
[ 1419.654001][ C1] {57}[Hardware Error]: Vendor specific error info has 48 bytes:
[ 1419.654014][ C1] {57}[Hardware Error]: 00000000: 00000000 00000000 00000000 00000000 ................
[ 1419.670685][ C1] {57}[Hardware Error]: 00000010: 00000000 00000000 00000000 00000000 ................
[ 1419.670692][ C1] {57}[Hardware Error]: 00000020: 00000000 00000000 00000000 00000000 ................
[ 1419.783606][T54990] Memory failure: 0x1000093: recovery action for free buddy page: Recovered
[ 1419.919580][ T9955] EDAC MC0: 1 UE Multi-bit ECC on unknown memory (node:0 card:1 module:71 bank:7 row:0 col:0 page:0x1000093 offset:0xc00 grain:1 - APEI location: node:0 card:257 module:71 bank:7 row:0 col:0)
NOTICE: SEA Handle
NOTICE: SpsrEl3 = 0x60001000, ELR_EL3 = 0xffffc6ab42671400
NOTICE: skt[0x0]die[0x0]cluster[0x0]core[0x1]
NOTICE: EsrEl3 = 0x92000410
NOTICE: PA is valid: 0x1000093c00
NOTICE: Hest Set GenericError Data
NOTICE: SEA Handle
NOTICE: SpsrEl3 = 0x60001000, ELR_EL3 = 0xffffc6ab42671400
NOTICE: skt[0x0]die[0x0]cluster[0x0]core[0x1]
NOTICE: EsrEl3 = 0x92000410
NOTICE: PA is valid: 0x1000093c00
NOTICE: Hest Set GenericError Data
... ---> Hapend SEA error interrupt storm
NOTICE: SEA Handle
NOTICE: SpsrEl3 = 0x60001000, ELR_EL3 = 0xffffc6ab42671400
NOTICE: skt[0x0]die[0x0]cluster[0x0]core[0x1]
NOTICE: EsrEl3 = 0x92000410
NOTICE: PA is valid: 0x1000093c00
NOTICE: Hest Set GenericError Data
[ 1429.818080][ T9955] Memory failure: 0x1000093: already hardware poisoned
[ 1429.825760][ C1] ghes_print_estatus: 1 callbacks suppressed
[ 1429.825763][ C1] {59}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 9
[ 1429.843731][ C1] {59}[Hardware Error]: event severity: recoverable
[ 1429.861800][ C1] {59}[Hardware Error]: Error 0, type: recoverable
[ 1429.874658][ C1] {59}[Hardware Error]: section_type: ARM processor error
[ 1429.887516][ C1] {59}[Hardware Error]: MIDR: 0x0000000000000000
[ 1429.901159][ C1] {59}[Hardware Error]: Multiprocessor Affinity Register (MPIDR): 0x0000000081000100
[ 1429.901166][ C1] {59}[Hardware Error]: error affinity level: 0
[ 1429.914896][ C1] {59}[Hardware Error]: running state: 0x1
[ 1429.914903][ C1] {59}[Hardware Error]: Power State Coordination Interface state: 0
[ 1429.933319][ C1] {59}[Hardware Error]: Error info structure 0:
[ 1429.946261][ C1] {59}[Hardware Error]: num errors: 1
[ 1429.946269][ C1] {59}[Hardware Error]: error_type: 0, cache error
[ 1429.970847][ C1] {59}[Hardware Error]: error_info: 0x0000000020400014
[ 1429.970854][ C1] {59}[Hardware Error]: cache level: 1
[ 1429.988406][ C1] {59}[Hardware Error]: the error has not been corrected
[ 1430.013419][ C1] {59}[Hardware Error]: physical fault address: 0x0000001000093c00
[ 1430.013425][ C1] {59}[Hardware Error]: Vendor specific error info has 48 bytes:
[ 1430.025424][ C1] {59}[Hardware Error]: 00000000: 00000000 00000000 00000000 00000000 ................
[ 1430.053736][ C1] {59}[Hardware Error]: 00000010: 00000000 00000000 00000000 00000000 ................
[ 1430.066341][ C1] {59}[Hardware Error]: 00000020: 00000000 00000000 00000000 00000000 ................
[ 1430.294255][T54990] Memory failure: 0x1000093: already hardware poisoned
[ 1430.305518][T54990] 0x1000093: Sending SIGBUS to devmem:54990 due to hardware memory corruption
Signed-off-by: Junhao He <hejunhao3(a)h-partners.com>
---
drivers/acpi/apei/ghes.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index e638fa4b7426..5b585e43a4cc 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1129,8 +1129,10 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx);
/* This error has been reported before, don't process it again. */
- if (ghes_estatus_cached(estatus))
+ if (ghes_estatus_cached(estatus)) {
+ rc = -ECANCELED;
goto no_work;
+ }
llist_add(&estatus_node->llnode, &ghes_estatus_llist);
--
2.33.0
2
1
*** fix CVE-2025-37800 ***
Dmitry Torokhov (1):
[Backport] driver core: fix potential NULL pointer dereference in
dev_uevent()
Lin Ruifeng (1):
[Huawei] Revert "Revert "Revert "driver core: Fix uevent_show() vs
driver detach race"""
drivers/base/base.h | 13 ++++++++++++-
drivers/base/bus.c | 2 +-
drivers/base/core.c | 42 ++++++++++++++++++++++++++++++++++--------
drivers/base/module.c | 4 ----
4 files changed, 47 insertions(+), 14 deletions(-)
--
2.43.0
2
3
[PATCH OLK-5.10] [Backport] scsi: qla2xxx: Fix premature hw access after PCI error
by Lin Ruifeng 22 Jan '26
by Lin Ruifeng 22 Jan '26
22 Jan '26
From: Quinn Tran <qutran(a)marvell.com>
stable inclusion
from stable-v5.15.33
commit 5ef6319f9882699613d5182fbd7929b017e8c5ab
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/296
CVE: CVE-2022-49157
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit e35920ab7874d5e2faeb4f958a74bfa793f1ce5a upstream.
After a recoverable PCI error has been detected and recovered, qla driver
needs to check to see if the error condition still persist and/or wait
for the OS to give the resume signal.
Sep 8 22:26:03 localhost kernel: WARNING: CPU: 9 PID: 124606 at qla_tmpl.c:440
qla27xx_fwdt_entry_t266+0x55/0x60 [qla2xxx]
Sep 8 22:26:03 localhost kernel: RIP: 0010:qla27xx_fwdt_entry_t266+0x55/0x60
[qla2xxx]
Sep 8 22:26:03 localhost kernel: Call Trace:
Sep 8 22:26:03 localhost kernel: ? qla27xx_walk_template+0xb1/0x1b0 [qla2xxx]
Sep 8 22:26:03 localhost kernel: ? qla27xx_execute_fwdt_template+0x12a/0x160
[qla2xxx]
Sep 8 22:26:03 localhost kernel: ? qla27xx_fwdump+0xa0/0x1c0 [qla2xxx]
Sep 8 22:26:03 localhost kernel: ? qla2xxx_pci_mmio_enabled+0xfb/0x120
[qla2xxx]
Sep 8 22:26:03 localhost kernel: ? report_mmio_enabled+0x44/0x80
Sep 8 22:26:03 localhost kernel: ? report_slot_reset+0x80/0x80
Sep 8 22:26:03 localhost kernel: ? pci_walk_bus+0x70/0x90
Sep 8 22:26:03 localhost kernel: ? aer_dev_correctable_show+0xc0/0xc0
Sep 8 22:26:03 localhost kernel: ? pcie_do_recovery+0x1bb/0x240
Sep 8 22:26:03 localhost kernel: ? aer_recover_work_func+0xaa/0xd0
Sep 8 22:26:03 localhost kernel: ? process_one_work+0x1a7/0x360
..
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-8041:22: detected PCI
disconnect.
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-107ff:22:
qla27xx_fwdt_entry_t262: dump ram MB failed. Area 5h start 198013h end 198013h
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-107ff:22: Unable to
capture FW dump
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-1015:22: cmd=0x0,
waited 5221 msecs
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-680d:22: mmio
enabled returning.
Sep 8 22:26:03 localhost kernel: qla2xxx [0000:42:00.2]-d04c:22: MBX
Command timeout for cmd 0, iocontrol=ffffffff jiffies=10140f2e5
mb[0-3]=[0xffff 0xffff 0xffff 0xffff]
Link: https://lore.kernel.org/r/20220110050218.3958-6-njavali@marvell.com
Cc: stable(a)vger.kernel.org
Reviewed-by: Himanshu Madhani <himanshu.madhani(a)oracle.com>
Signed-off-by: Quinn Tran <qutran(a)marvell.com>
Signed-off-by: Nilesh Javali <njavali(a)marvell.com>
Signed-off-by: Martin K. Petersen <martin.petersen(a)oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com>
---
drivers/scsi/qla2xxx/qla_os.c | 10 +++++++++-
drivers/scsi/qla2xxx/qla_tmpl.c | 9 +++++++--
2 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2775262a57c5..50d96055fa9b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -7479,7 +7479,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) {
case pci_channel_io_normal:
- ha->flags.eeh_busy = 0;
+ qla_pci_set_eeh_busy(vha);
if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -7520,9 +7520,16 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
"mmio enabled\n");
ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
+
if (IS_QLA82XX(ha))
return PCI_ERS_RESULT_RECOVERED;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, base_vha, 0x803f,
+ "During mmio enabled, PCI/Register disconnect still detected.\n");
+ goto out;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2100(ha) || IS_QLA2200(ha)){
stat = rd_reg_word(®->hccr);
@@ -7544,6 +7551,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
"RISC paused -- mmio_enabled, Dumping firmware.\n");
qla2xxx_dump_fw(base_vha);
}
+out:
/* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
ql_dbg(ql_dbg_aer, base_vha, 0x600d,
"mmio enabled returning.\n");
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 26c13a953b97..b0a74b036cf4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -435,8 +435,13 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_misc, vha, 0xd20a,
"%s: reset risc [%lx]\n", __func__, *len);
- if (buf)
- WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
+ if (buf) {
+ if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0x5001,
+ "%s: unable to soft reset\n", __func__);
+ return INVALID_ENTRY;
+ }
+ }
return qla27xx_next_entry(ent);
}
--
2.43.0
2
1
[PATCH OLK-6.6] [Backport] ASoC: SOF: Intel: hda-dai: Ensure DAI widget is valid during params
by Lin Ruifeng 22 Jan '26
by Lin Ruifeng 22 Jan '26
22 Jan '26
From: Bard Liao <yung-chuan.liao(a)linux.intel.com>
stable inclusion
from stable-v6.12.14
commit e012a77e4d7632cf615ba9625b1600ed8985c3b5
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/30
CVE: CVE-2024-58012
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 569922b82ca660f8b24e705f6cf674e6b1f99cc7 ]
Each cpu DAI should associate with a widget. However, the topology might
not create the right number of DAI widgets for aggregated amps. And it
will cause NULL pointer deference.
Check that the DAI widget associated with the CPU DAI is valid to prevent
NULL pointer deference due to missing DAI widgets in topologies with
aggregated amps.
Signed-off-by: Bard Liao <yung-chuan.liao(a)linux.intel.com>
Reviewed-by: Ranjani Sridharan <ranjani.sridharan(a)linux.intel.com>
Reviewed-by: Péter Ujfalusi <peter.ujfalusi(a)linux.intel.com>
Reviewed-by: Liam Girdwood <liam.r.girdwood(a)intel.com>
Link: https://patch.msgid.link/20241203104853.56956-1-yung-chuan.liao@linux.intel…
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Conflicts:
sound/soc/sof/intel/hda-dai.c
sound/soc/sof/intel/hda.c
Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com>
---
sound/soc/sof/intel/hda-dai.c | 6 ++++++
sound/soc/sof/intel/hda.c | 5 +++++
2 files changed, 11 insertions(+)
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 19ec1a45737e..cebd9db94215 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -439,6 +439,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_sof_dev *sdev;
int ret;
+ if (!w) {
+ dev_err(cpu_dai->dev, "%s widget not found, check amp link num in the topology\n",
+ cpu_dai->name);
+ return -EINVAL;
+ }
+
ret = non_hda_dai_hw_params(substream, params, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "%s: non_hda_dai_hw_params failed %d\n", __func__, ret);
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 15e6779efaa3..aa3f9c961173 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -102,6 +102,11 @@ static int sdw_params_stream(struct device *dev,
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
struct snd_sof_dai_config_data data = { 0 };
+ if (!w) {
+ dev_err(dev, "%s widget not found, check amp link num in the topology\n",
+ d->name);
+ return -EINVAL;
+ }
data.dai_index = (params_data->link_id << 8) | d->id;
data.dai_data = params_data->alh_stream_id;
--
2.43.0
2
1
[PATCH OLK-5.10] [Backport] drm/amd/pm/powerplay/hwmgr/smu7_thermal: Prevent division by zero
by Lin Ruifeng 22 Jan '26
by Lin Ruifeng 22 Jan '26
22 Jan '26
From: Denis Arefev <arefev(a)swemel.ru>
stable inclusion
from stable-v5.10.237
commit 3cdd02cb70682d7d205ca6dc02a4d1eb76758d24
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/10440
CVE: CVE-2025-37768
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 7c246a05df51c52fe0852ce56ba10c41e6ed1f39 upstream.
The user can set any speed value.
If speed is greater than UINT_MAX/8, division by zero is possible.
Found by Linux Verification Center (linuxtesting.org) with SVACE.
Fixes: c52dcf49195d ("drm/amd/pp: Avoid divide-by-zero in fan_ctrl_set_fan_speed_rpm")
Signed-off-by: Denis Arefev <arefev(a)swemel.ru>
Signed-off-by: Alex Deucher <alexander.deucher(a)amd.com>
Cc: stable(a)vger.kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Ruifeng <linruifeng4(a)huawei.com>
---
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
index 0b30f73649a8..49f97d612421 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
@@ -261,10 +261,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0) ||
- speed == 0 ||
+ (!speed || speed > UINT_MAX/8) ||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
- return 0;
+ return -EINVAL;
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
--
2.43.0
2
1
In __devm_rtc_register_device(), the callee rtc_initialize_alarm()
will check the alarm, there is no need to check in advance,
so remove it.
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
drivers/rtc/class.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index b1a2be1f9e3b..84885ba6135c 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -410,7 +410,7 @@ int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
/* Check to see if there is an ALARM already set in hw */
err = __rtc_read_alarm(rtc, &alrm);
- if (!err && !rtc_valid_tm(&alrm.time))
+ if (!err)
rtc_initialize_alarm(rtc, &alrm);
rtc_dev_prepare(rtc);
--
2.34.1
1
0
From: Grzegorz Nitka <grzegorz.nitka(a)intel.com>
mainline inclusion
from mainline-v6.14
commit 23d97f18901ef5e4e264e3b1777fe65c760186b5
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/ICCVOJ
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
Fix aRFS (accelerated Receive Flow Steering) structures memory leak by
adding a checker to verify if aRFS memory is already allocated while
configuring VSI. aRFS objects are allocated in two cases:
- as part of VSI initialization (at probe), and
- as part of reset handling
However, VSI reconfiguration executed during reset involves memory
allocation one more time, without prior releasing already allocated
resources. This led to the memory leak with the following signature:
[root@os-delivery ~]# cat /sys/kernel/debug/kmemleak
unreferenced object 0xff3c1ca7252e6000 (size 8192):
comm "kworker/0:0", pid 8, jiffies 4296833052
hex dump (first 32 bytes):
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace (crc 0):
[<ffffffff991ec485>] __kmalloc_cache_noprof+0x275/0x340
[<ffffffffc0a6e06a>] ice_init_arfs+0x3a/0xe0 [ice]
[<ffffffffc09f1027>] ice_vsi_cfg_def+0x607/0x850 [ice]
[<ffffffffc09f244b>] ice_vsi_setup+0x5b/0x130 [ice]
[<ffffffffc09c2131>] ice_init+0x1c1/0x460 [ice]
[<ffffffffc09c64af>] ice_probe+0x2af/0x520 [ice]
[<ffffffff994fbcd3>] local_pci_probe+0x43/0xa0
[<ffffffff98f07103>] work_for_cpu_fn+0x13/0x20
[<ffffffff98f0b6d9>] process_one_work+0x179/0x390
[<ffffffff98f0c1e9>] worker_thread+0x239/0x340
[<ffffffff98f14abc>] kthread+0xcc/0x100
[<ffffffff98e45a6d>] ret_from_fork+0x2d/0x50
[<ffffffff98e083ba>] ret_from_fork_asm+0x1a/0x30
...
Fixes: 28bf26724fdb ("ice: Implement aRFS")
Reviewed-by: Michal Swiatkowski <michal.swiatkowski(a)linux.intel.com>
Signed-off-by: Grzegorz Nitka <grzegorz.nitka(a)intel.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Tested-by: Rinitha S <sx.rinitha(a)intel.com> (A Contingent worker at Intel)
Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
(cherry picked from commit 5d30d256661fc11b6e73fac6c3783a702e1006a3)
Signed-off-by: Wentao Guan <guanwentao(a)uniontech.com>
---
drivers/net/ethernet/intel/ice/ice_arfs.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index cca0e753f38f..d7e0116f6773 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -510,7 +510,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
--
2.43.0
2
1
[PATCH OLK-6.6 v3 0/2] oenetcls: add a switch to enable/disable checking NIC ntuple feature
by Liu Jian 21 Jan '26
by Liu Jian 21 Jan '26
21 Jan '26
oenetcls: add a switch to enable/disable checking NIC ntuple feature
Liu Jian (1):
net/oenetcls: add a switch to enable/disable checking NIC ntuple
feature
Yue Haibing (1):
net/oenetcls: Balancing softirq to improve performance
include/linux/oenetcls.h | 32 +++++++++++----
net/core/dev.c | 19 ++++++++-
net/oenetcls/oenetcls.h | 1 +
net/oenetcls/oenetcls_flow.c | 72 +++++++++++++++++++++++++++-------
net/oenetcls/oenetcls_main.c | 17 +++++++-
net/oenetcls/oenetcls_ntuple.c | 12 +++---
6 files changed, 124 insertions(+), 29 deletions(-)
--
2.34.1
2
3
Fix CVE-2024-53179
Paulo Alcantara (1):
smb: client: fix use-after-free of signing key
Shyam Prasad N (1):
cifs: missed ref-counting smb session in find
fs/cifs/smb2proto.h | 2 --
fs/cifs/smb2transport.c | 60 ++++++++++++++++++++++++++++++-----------
2 files changed, 45 insertions(+), 17 deletions(-)
--
2.39.2
2
3
您好!
Kernel 邀请您参加 2026-01-23 14:00 召开的WeLink会议(自动录制)
会议主题:openEuler Kernel SIG双周例会
会议内容:
1. 进展update
2. 议题征集中
(新增议题可回复本邮件申请,也可直接填报至会议看板)
会议链接:https://meeting.huaweicloud.com:36443/#/j/982768127
会议纪要:https://etherpad.openeuler.org/p/Kernel-meetings
更多资讯尽在:https://www.openeuler.org/zh/
Hello!
Kernel invites you to attend the WeLink conference(auto recording) will be held at 2026-01-23 14:00,
The subject of the conference is openEuler Kernel SIG双周例会
Summary:
1. 进展update
2. 议题征集中
(新增议题可回复本邮件申请,也可直接填报至会议看板)
You can join the meeting at https://meeting.huaweicloud.com:36443/#/j/982768127
Add topics at https://etherpad.openeuler.org/p/Kernel-meetings
More information: https://www.openeuler.org/en/
1
0
[PATCH openEuler-1.0-LTS] net: Fix load-tearing on sk->sk_stamp in sock_recv_cmsgs().
by Li Xiasong 21 Jan '26
by Li Xiasong 21 Jan '26
21 Jan '26
From: Kuniyuki Iwashima <kuniyu(a)amazon.com>
stable inclusion
from stable-v4.19.284
commit 564c3150ad357d571a0de7d8b644aa1f7e6e21b7
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13243
CVE: CVE-2023-54218
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit dfd9248c071a3710c24365897459538551cb7167 ]
KCSAN found a data race in sock_recv_cmsgs() where the read access
to sk->sk_stamp needs READ_ONCE().
BUG: KCSAN: data-race in packet_recvmsg / packet_recvmsg
write (marked) to 0xffff88803c81f258 of 8 bytes by task 19171 on cpu 0:
sock_write_timestamp include/net/sock.h:2670 [inline]
sock_recv_cmsgs include/net/sock.h:2722 [inline]
packet_recvmsg+0xb97/0xd00 net/packet/af_packet.c:3489
sock_recvmsg_nosec net/socket.c:1019 [inline]
sock_recvmsg+0x11a/0x130 net/socket.c:1040
sock_read_iter+0x176/0x220 net/socket.c:1118
call_read_iter include/linux/fs.h:1845 [inline]
new_sync_read fs/read_write.c:389 [inline]
vfs_read+0x5e0/0x630 fs/read_write.c:470
ksys_read+0x163/0x1a0 fs/read_write.c:613
__do_sys_read fs/read_write.c:623 [inline]
__se_sys_read fs/read_write.c:621 [inline]
__x64_sys_read+0x41/0x50 fs/read_write.c:621
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3b/0x90 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x72/0xdc
read to 0xffff88803c81f258 of 8 bytes by task 19183 on cpu 1:
sock_recv_cmsgs include/net/sock.h:2721 [inline]
packet_recvmsg+0xb64/0xd00 net/packet/af_packet.c:3489
sock_recvmsg_nosec net/socket.c:1019 [inline]
sock_recvmsg+0x11a/0x130 net/socket.c:1040
sock_read_iter+0x176/0x220 net/socket.c:1118
call_read_iter include/linux/fs.h:1845 [inline]
new_sync_read fs/read_write.c:389 [inline]
vfs_read+0x5e0/0x630 fs/read_write.c:470
ksys_read+0x163/0x1a0 fs/read_write.c:613
__do_sys_read fs/read_write.c:623 [inline]
__se_sys_read fs/read_write.c:621 [inline]
__x64_sys_read+0x41/0x50 fs/read_write.c:621
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x3b/0x90 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x72/0xdc
value changed: 0xffffffffc4653600 -> 0x0000000000000000
Reported by Kernel Concurrency Sanitizer on:
CPU: 1 PID: 19183 Comm: syz-executor.5 Not tainted 6.3.0-rc7-02330-gca6270c12e20 #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
Fixes: 6c7c98bad488 ("sock: avoid dirtying sk_stamp, if possible")
Reported-by: syzbot <syzkaller(a)googlegroups.com>
Signed-off-by: Kuniyuki Iwashima <kuniyu(a)amazon.com>
Reviewed-by: Eric Dumazet <edumazet(a)google.com>
Link: https://lore.kernel.org/r/20230508175543.55756-1-kuniyu@amazon.com
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Li Xiasong <lixiasong1(a)huawei.com>
---
include/net/sock.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/net/sock.h b/include/net/sock.h
index 1840303a5539b..26b537ccb5638 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2494,7 +2494,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
__sock_recv_ts_and_drops(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
- else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
+ else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP))
sock_write_timestamp(sk, 0);
}
--
2.34.1
2
1
21 Jan '26
From: Eric Dumazet <edumazet(a)google.com>
stable inclusion
from stable-v4.19.272
commit 78297d513157a31fd629626fe4cbb85a7dcbb94a
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13134
CVE: CVE-2022-50816
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit d89d7ff01235f218dad37de84457717f699dee79 upstream.
Another syzbot report [1] with no reproducer hints
at a bug in ip6_gre tunnel (dev:ip6gretap0)
Since ipv6 mcast code makes sure to read dev->mtu once
and applies a sanity check on it (see commit b9b312a7a451
"ipv6: mcast: better catch silly mtu values"), a remaining
possibility is that a layer is able to set dev->mtu to
an underflowed value (high order bit set).
This could happen indeed in ip6gre_tnl_link_config_route(),
ip6_tnl_link_config() and ipip6_tunnel_bind_dev()
Make sure to sanitize mtu value in a local variable before
it is written once on dev->mtu, as lockless readers could
catch wrong temporary value.
[1]
skbuff: skb_over_panic: text:ffff80000b7a2f38 len:40 put:40 head:ffff000149dcf200 data:ffff000149dcf2b0 tail:0xd8 end:0xc0 dev:ip6gretap0
------------[ cut here ]------------
kernel BUG at net/core/skbuff.c:120
Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP
Modules linked in:
CPU: 1 PID: 10241 Comm: kworker/1:1 Not tainted 6.0.0-rc7-syzkaller-18095-gbbed346d5a96 #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/30/2022
Workqueue: mld mld_ifc_work
pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : skb_panic+0x4c/0x50 net/core/skbuff.c:116
lr : skb_panic+0x4c/0x50 net/core/skbuff.c:116
sp : ffff800020dd3b60
x29: ffff800020dd3b70 x28: 0000000000000000 x27: ffff00010df2a800
x26: 00000000000000c0 x25: 00000000000000b0 x24: ffff000149dcf200
x23: 00000000000000c0 x22: 00000000000000d8 x21: ffff80000b7a2f38
x20: ffff00014c2f7800 x19: 0000000000000028 x18: 00000000000001a9
x17: 0000000000000000 x16: ffff80000db49158 x15: ffff000113bf1a80
x14: 0000000000000000 x13: 00000000ffffffff x12: ffff000113bf1a80
x11: ff808000081c0d5c x10: 0000000000000000 x9 : 73f125dc5c63ba00
x8 : 73f125dc5c63ba00 x7 : ffff800008161d1c x6 : 0000000000000000
x5 : 0000000000000080 x4 : 0000000000000001 x3 : 0000000000000000
x2 : ffff0001fefddcd0 x1 : 0000000100000000 x0 : 0000000000000089
Call trace:
skb_panic+0x4c/0x50 net/core/skbuff.c:116
skb_over_panic net/core/skbuff.c:125 [inline]
skb_put+0xd4/0xdc net/core/skbuff.c:2049
ip6_mc_hdr net/ipv6/mcast.c:1714 [inline]
mld_newpack+0x14c/0x270 net/ipv6/mcast.c:1765
add_grhead net/ipv6/mcast.c:1851 [inline]
add_grec+0xa20/0xae0 net/ipv6/mcast.c:1989
mld_send_cr+0x438/0x5a8 net/ipv6/mcast.c:2115
mld_ifc_work+0x38/0x290 net/ipv6/mcast.c:2653
process_one_work+0x2d8/0x504 kernel/workqueue.c:2289
worker_thread+0x340/0x610 kernel/workqueue.c:2436
kthread+0x12c/0x158 kernel/kthread.c:376
ret_from_fork+0x10/0x20 arch/arm64/kernel/entry.S:860
Code: 91011400 aa0803e1 a90027ea 94373093 (d4210000)
Fixes: c12b395a4664 ("gre: Support GRE over IPv6")
Reported-by: syzbot <syzkaller(a)googlegroups.com>
Signed-off-by: Eric Dumazet <edumazet(a)google.com>
Link: https://lore.kernel.org/r/20221024020124.3756833-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
[ta: Backport patch for stable kernels < 5.10.y. Fix conflict in
net/ipv6/ip6_tunnel.c, mtu initialized with:
mtu = rt->dst.dev->mtu - t_hlen;]
Cc: <stable(a)vger.kernel.org> # 4.14.y, 4.19.y, 5.4.y
Signed-off-by: Tudor Ambarus <tudor.ambarus(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Li Xiasong <lixiasong1(a)huawei.com>
---
net/ipv6/ip6_gre.c | 12 +++++++-----
net/ipv6/ip6_tunnel.c | 10 ++++++----
net/ipv6/sit.c | 8 +++++---
3 files changed, 18 insertions(+), 12 deletions(-)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 19c5dbfe095bb..ea880805ddf53 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1156,14 +1156,16 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
dev->needed_headroom = dst_len;
if (set_mtu) {
- dev->mtu = rt->dst.dev->mtu - t_hlen;
+ int mtu = rt->dst.dev->mtu - t_hlen;
+
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- dev->mtu -= 8;
+ mtu -= 8;
if (dev->type == ARPHRD_ETHER)
- dev->mtu -= ETH_HLEN;
+ mtu -= ETH_HLEN;
- if (dev->mtu < IPV6_MIN_MTU)
- dev->mtu = IPV6_MIN_MTU;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ WRITE_ONCE(dev->mtu, mtu);
}
}
ip6_rt_put(rt);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index cd32d859e4dad..c7f98ca66753b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1450,6 +1450,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
struct __ip6_tnl_parm *p = &t->parms;
struct flowi6 *fl6 = &t->fl.u.ip6;
int t_hlen;
+ int mtu;
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -1492,12 +1493,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
dev->hard_header_len = rt->dst.dev->hard_header_len +
t_hlen;
- dev->mtu = rt->dst.dev->mtu - t_hlen;
+ mtu = rt->dst.dev->mtu - t_hlen;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- dev->mtu -= 8;
+ mtu -= 8;
- if (dev->mtu < IPV6_MIN_MTU)
- dev->mtu = IPV6_MIN_MTU;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ WRITE_ONCE(dev->mtu, mtu);
}
ip6_rt_put(rt);
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index f2a2065d640ff..6799691f565ca 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1084,10 +1084,12 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
if (tdev && !netif_is_l3_master(tdev)) {
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ int mtu;
- dev->mtu = tdev->mtu - t_hlen;
- if (dev->mtu < IPV6_MIN_MTU)
- dev->mtu = IPV6_MIN_MTU;
+ mtu = tdev->mtu - t_hlen;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+ WRITE_ONCE(dev->mtu, mtu);
}
}
--
2.34.1
2
1
21 Jan '26
From: George Kennedy <george.kennedy(a)oracle.com>
mainline inclusion
from mainline-v6.19-rc1
commit 866cf36bfee4fba6a492d2dcc5133f857e3446b0
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13351
CVE: CVE-2025-68798
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
----------------------------------------------------------------------
On AMD machines cpuc->events[idx] can become NULL in a subtle race
condition with NMI->throttle->x86_pmu_stop().
Check event for NULL in amd_pmu_enable_all() before enable to avoid a GPF.
This appears to be an AMD only issue.
Syzkaller reported a GPF in amd_pmu_enable_all.
INFO: NMI handler (perf_event_nmi_handler) took too long to run: 13.143
msecs
Oops: general protection fault, probably for non-canonical address
0xdffffc0000000034: 0000 PREEMPT SMP KASAN NOPTI
KASAN: null-ptr-deref in range [0x00000000000001a0-0x00000000000001a7]
CPU: 0 UID: 0 PID: 328415 Comm: repro_36674776 Not tainted 6.12.0-rc1-syzk
RIP: 0010:x86_pmu_enable_event (arch/x86/events/perf_event.h:1195
arch/x86/events/core.c:1430)
RSP: 0018:ffff888118009d60 EFLAGS: 00010012
RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000034 RSI: 0000000000000000 RDI: 00000000000001a0
RBP: 0000000000000001 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000002
R13: ffff88811802a440 R14: ffff88811802a240 R15: ffff8881132d8601
FS: 00007f097dfaa700(0000) GS:ffff888118000000(0000) GS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00000000200001c0 CR3: 0000000103d56000 CR4: 00000000000006f0
Call Trace:
<IRQ>
amd_pmu_enable_all (arch/x86/events/amd/core.c:760 (discriminator 2))
x86_pmu_enable (arch/x86/events/core.c:1360)
event_sched_out (kernel/events/core.c:1191 kernel/events/core.c:1186
kernel/events/core.c:2346)
__perf_remove_from_context (kernel/events/core.c:2435)
event_function (kernel/events/core.c:259)
remote_function (kernel/events/core.c:92 (discriminator 1)
kernel/events/core.c:72 (discriminator 1))
__flush_smp_call_function_queue (./arch/x86/include/asm/jump_label.h:27
./include/linux/jump_label.h:207 ./include/trace/events/csd.h:64
kernel/smp.c:135 kernel/smp.c:540)
__sysvec_call_function_single (./arch/x86/include/asm/jump_label.h:27
./include/linux/jump_label.h:207
./arch/x86/include/asm/trace/irq_vectors.h:99 arch/x86/kernel/smp.c:272)
sysvec_call_function_single (arch/x86/kernel/smp.c:266 (discriminator 47)
arch/x86/kernel/smp.c:266 (discriminator 47))
</IRQ>
Reported-by: syzkaller <syzkaller(a)googlegroups.com>
Signed-off-by: George Kennedy <george.kennedy(a)oracle.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz(a)infradead.org>
Signed-off-by: Luo Gengkun <luogengkun2(a)huawei.com>
---
arch/x86/events/amd/core.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index e7edce877878..f15aa70edefc 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -763,7 +763,12 @@ static void amd_pmu_enable_all(int added)
if (!test_bit(idx, cpuc->active_mask))
continue;
- amd_pmu_enable_event(cpuc->events[idx]);
+ /*
+ * FIXME: cpuc->events[idx] can become NULL in a subtle race
+ * condition with NMI->throttle->x86_pmu_stop().
+ */
+ if (cpuc->events[idx])
+ amd_pmu_enable_event(cpuc->events[idx]);
}
}
--
2.34.1
2
1
[PATCH OLK-5.10] [Backport] HID: usbhid: Eliminate recurrent out-of-bounds bug in usbhid_parse()
by Chen Jinghuang 20 Jan '26
by Chen Jinghuang 20 Jan '26
20 Jan '26
From: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
stable inclusion
from stable-v5.10.239
commit 41827a2dbdd7880df9881506dee13bc88d4230bb
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9660
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit fe7f7ac8e0c708446ff017453add769ffc15deed upstream.
Update struct hid_descriptor to better reflect the mandatory and
optional parts of the HID Descriptor as per USB HID 1.11 specification.
Note: the kernel currently does not parse any optional HID class
descriptors, only the mandatory report descriptor.
Update all references to member element desc[0] to rpt_desc.
Add test to verify bLength and bNumDescriptors values are valid.
Replace the for loop with direct access to the mandatory HID class
descriptor member for the report descriptor. This eliminates the
possibility of getting an out-of-bounds fault.
Add a warning message if the HID descriptor contains any unsupported
optional HID class descriptors.
Reported-by: syzbot+c52569baf0c843f35495(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=c52569baf0c843f35495
Fixes: f043bfc98c19 ("HID: usbhid: fix out-of-bounds bug")
Cc: stable(a)vger.kernel.org
Signed-off-by: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
Reviewed-by: Michael Kelley <mhklinux(a)outlook.com>
Signed-off-by: Jiri Kosina <jkosina(a)suse.com>
Signed-off-by: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Chen Jinghuang <chenjinghuang2(a)huawei.com>
---
drivers/hid/hid-hyperv.c | 5 +++--
drivers/hid/usbhid/hid-core.c | 25 ++++++++++++++-----------
drivers/usb/gadget/function/f_hid.c | 12 ++++++------
include/linux/hid.h | 3 ++-
4 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index b7704dd6809d..bf77cfb723d5 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
if (!input_device->hid_desc)
goto cleanup;
- input_device->report_desc_size = desc->desc[0].wDescriptorLength;
+ input_device->report_desc_size = le16_to_cpu(
+ desc->rpt_desc.wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
- desc->desc[0].wDescriptorLength);
+ le16_to_cpu(desc->rpt_desc.wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 009a0469d54f..c3b104c72e49 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid)
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
+ struct hid_class_descriptor *hcdesc;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
- int ret, n;
- int num_descriptors;
- size_t offset = offsetof(struct hid_descriptor, desc);
+ int ret;
quirks = hid_lookup_quirk(hid);
@@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
- if (hdesc->bLength < sizeof(struct hid_descriptor)) {
- dbg_hid("hid descriptor is too short\n");
+ if (!hdesc->bNumDescriptors ||
+ hdesc->bLength != sizeof(*hdesc) +
+ (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
+ dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
+ hdesc->bLength, hdesc->bNumDescriptors);
return -EINVAL;
}
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- num_descriptors = min_t(int, hdesc->bNumDescriptors,
- (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
-
- for (n = 0; n < num_descriptors; n++)
- if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
- rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT)
+ rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
@@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid)
goto err;
}
+ if (hdesc->bNumDescriptors > 1)
+ hid_warn(intf,
+ "%u unsupported optional hid class descriptors\n",
+ (int)(hdesc->bNumDescriptors - 1));
+
hid->quirks |= quirks;
return 0;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index ba018aeb21d8..2f30699f0426 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -114,8 +114,8 @@ static struct hid_descriptor hidg_desc = {
.bcdHID = cpu_to_le16(0x0101),
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
- /*.desc[0].bDescriptorType = DYNAMIC */
- /*.desc[0].wDescriptorLenght = DYNAMIC */
+ /*.rpt_desc.bDescriptorType = DYNAMIC */
+ /*.rpt_desc.wDescriptorLength = DYNAMIC */
};
/* Super-Speed Support */
@@ -724,8 +724,8 @@ static int hidg_setup(struct usb_function *f,
struct hid_descriptor hidg_desc_copy = hidg_desc;
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
- hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc_copy.desc[0].wDescriptorLength =
+ hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc_copy.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
length = min_t(unsigned short, length,
@@ -966,8 +966,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
* We can use hidg_desc struct here but we should not relay
* that its content won't change after returning from this function.
*/
- hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc.desc[0].wDescriptorLength =
+ hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
hidg_hs_in_ep_desc.bEndpointAddress =
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9e306bf9959d..03627c96d814 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -674,8 +674,9 @@ struct hid_descriptor {
__le16 bcdHID;
__u8 bCountryCode;
__u8 bNumDescriptors;
+ struct hid_class_descriptor rpt_desc;
- struct hid_class_descriptor desc[1];
+ struct hid_class_descriptor opt_descs[];
} __attribute__ ((packed));
#define HID_DEVICE(b, g, ven, prod) \
--
2.34.1
2
1
[PATCH OLK-5.10] [Backport] HID: usbhid: Eliminate recurrent out-of-bounds bug in usbhid_parse()
by Chen Jinghuang 20 Jan '26
by Chen Jinghuang 20 Jan '26
20 Jan '26
From: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
stable inclusion
from stable-v5.10.239
commit 41827a2dbdd7880df9881506dee13bc88d4230bb
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9660
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit fe7f7ac8e0c708446ff017453add769ffc15deed upstream.
Update struct hid_descriptor to better reflect the mandatory and
optional parts of the HID Descriptor as per USB HID 1.11 specification.
Note: the kernel currently does not parse any optional HID class
descriptors, only the mandatory report descriptor.
Update all references to member element desc[0] to rpt_desc.
Add test to verify bLength and bNumDescriptors values are valid.
Replace the for loop with direct access to the mandatory HID class
descriptor member for the report descriptor. This eliminates the
possibility of getting an out-of-bounds fault.
Add a warning message if the HID descriptor contains any unsupported
optional HID class descriptors.
Reported-by: syzbot+c52569baf0c843f35495(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=c52569baf0c843f35495
Fixes: f043bfc98c19 ("HID: usbhid: fix out-of-bounds bug")
Cc: stable(a)vger.kernel.org
Signed-off-by: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
Reviewed-by: Michael Kelley <mhklinux(a)outlook.com>
Signed-off-by: Jiri Kosina <jkosina(a)suse.com>
Signed-off-by: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Chen Jinghuang <chenjinghuang2(a)huawei.com>
---
drivers/hid/hid-hyperv.c | 5 +++--
drivers/hid/usbhid/hid-core.c | 25 ++++++++++++++-----------
drivers/usb/gadget/function/f_hid.c | 12 ++++++------
include/linux/hid.h | 3 ++-
4 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index b7704dd6809d..bf77cfb723d5 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
if (!input_device->hid_desc)
goto cleanup;
- input_device->report_desc_size = desc->desc[0].wDescriptorLength;
+ input_device->report_desc_size = le16_to_cpu(
+ desc->rpt_desc.wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
- desc->desc[0].wDescriptorLength);
+ le16_to_cpu(desc->rpt_desc.wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 009a0469d54f..c3b104c72e49 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid)
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
+ struct hid_class_descriptor *hcdesc;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
- int ret, n;
- int num_descriptors;
- size_t offset = offsetof(struct hid_descriptor, desc);
+ int ret;
quirks = hid_lookup_quirk(hid);
@@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
- if (hdesc->bLength < sizeof(struct hid_descriptor)) {
- dbg_hid("hid descriptor is too short\n");
+ if (!hdesc->bNumDescriptors ||
+ hdesc->bLength != sizeof(*hdesc) +
+ (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
+ dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
+ hdesc->bLength, hdesc->bNumDescriptors);
return -EINVAL;
}
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- num_descriptors = min_t(int, hdesc->bNumDescriptors,
- (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
-
- for (n = 0; n < num_descriptors; n++)
- if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
- rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT)
+ rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
@@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid)
goto err;
}
+ if (hdesc->bNumDescriptors > 1)
+ hid_warn(intf,
+ "%u unsupported optional hid class descriptors\n",
+ (int)(hdesc->bNumDescriptors - 1));
+
hid->quirks |= quirks;
return 0;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index ba018aeb21d8..2f30699f0426 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -114,8 +114,8 @@ static struct hid_descriptor hidg_desc = {
.bcdHID = cpu_to_le16(0x0101),
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
- /*.desc[0].bDescriptorType = DYNAMIC */
- /*.desc[0].wDescriptorLenght = DYNAMIC */
+ /*.rpt_desc.bDescriptorType = DYNAMIC */
+ /*.rpt_desc.wDescriptorLength = DYNAMIC */
};
/* Super-Speed Support */
@@ -724,8 +724,8 @@ static int hidg_setup(struct usb_function *f,
struct hid_descriptor hidg_desc_copy = hidg_desc;
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
- hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc_copy.desc[0].wDescriptorLength =
+ hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc_copy.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
length = min_t(unsigned short, length,
@@ -966,8 +966,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
* We can use hidg_desc struct here but we should not relay
* that its content won't change after returning from this function.
*/
- hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc.desc[0].wDescriptorLength =
+ hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
hidg_hs_in_ep_desc.bEndpointAddress =
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9e306bf9959d..03627c96d814 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -674,8 +674,9 @@ struct hid_descriptor {
__le16 bcdHID;
__u8 bCountryCode;
__u8 bNumDescriptors;
+ struct hid_class_descriptor rpt_desc;
- struct hid_class_descriptor desc[1];
+ struct hid_class_descriptor opt_descs[];
} __attribute__ ((packed));
#define HID_DEVICE(b, g, ven, prod) \
--
2.34.1
2
1
[PATCH OLK-5.10] [Backport] HID: usbhid: Eliminate recurrent out-of-bounds bug in usbhid_parse()
by Chen Jinghuang 20 Jan '26
by Chen Jinghuang 20 Jan '26
20 Jan '26
From: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
stable inclusion
from stable-v5.10.239
commit 41827a2dbdd7880df9881506dee13bc88d4230bb
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9660
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit fe7f7ac8e0c708446ff017453add769ffc15deed upstream.
Update struct hid_descriptor to better reflect the mandatory and
optional parts of the HID Descriptor as per USB HID 1.11 specification.
Note: the kernel currently does not parse any optional HID class
descriptors, only the mandatory report descriptor.
Update all references to member element desc[0] to rpt_desc.
Add test to verify bLength and bNumDescriptors values are valid.
Replace the for loop with direct access to the mandatory HID class
descriptor member for the report descriptor. This eliminates the
possibility of getting an out-of-bounds fault.
Add a warning message if the HID descriptor contains any unsupported
optional HID class descriptors.
Reported-by: syzbot+c52569baf0c843f35495(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=c52569baf0c843f35495
Fixes: f043bfc98c19 ("HID: usbhid: fix out-of-bounds bug")
Cc: stable(a)vger.kernel.org
Signed-off-by: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
Reviewed-by: Michael Kelley <mhklinux(a)outlook.com>
Signed-off-by: Jiri Kosina <jkosina(a)suse.com>
Signed-off-by: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Chen Jinghuang <chenjinghuang2(a)huawei.com>
---
drivers/hid/hid-hyperv.c | 5 +++--
drivers/hid/usbhid/hid-core.c | 25 ++++++++++++++-----------
drivers/usb/gadget/function/f_hid.c | 12 ++++++------
include/linux/hid.h | 3 ++-
4 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index b7704dd6809d..bf77cfb723d5 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
if (!input_device->hid_desc)
goto cleanup;
- input_device->report_desc_size = desc->desc[0].wDescriptorLength;
+ input_device->report_desc_size = le16_to_cpu(
+ desc->rpt_desc.wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
- desc->desc[0].wDescriptorLength);
+ le16_to_cpu(desc->rpt_desc.wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 009a0469d54f..c3b104c72e49 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid)
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
+ struct hid_class_descriptor *hcdesc;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
- int ret, n;
- int num_descriptors;
- size_t offset = offsetof(struct hid_descriptor, desc);
+ int ret;
quirks = hid_lookup_quirk(hid);
@@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
- if (hdesc->bLength < sizeof(struct hid_descriptor)) {
- dbg_hid("hid descriptor is too short\n");
+ if (!hdesc->bNumDescriptors ||
+ hdesc->bLength != sizeof(*hdesc) +
+ (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
+ dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
+ hdesc->bLength, hdesc->bNumDescriptors);
return -EINVAL;
}
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- num_descriptors = min_t(int, hdesc->bNumDescriptors,
- (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
-
- for (n = 0; n < num_descriptors; n++)
- if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
- rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT)
+ rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
@@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid)
goto err;
}
+ if (hdesc->bNumDescriptors > 1)
+ hid_warn(intf,
+ "%u unsupported optional hid class descriptors\n",
+ (int)(hdesc->bNumDescriptors - 1));
+
hid->quirks |= quirks;
return 0;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index ba018aeb21d8..2f30699f0426 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -114,8 +114,8 @@ static struct hid_descriptor hidg_desc = {
.bcdHID = cpu_to_le16(0x0101),
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
- /*.desc[0].bDescriptorType = DYNAMIC */
- /*.desc[0].wDescriptorLenght = DYNAMIC */
+ /*.rpt_desc.bDescriptorType = DYNAMIC */
+ /*.rpt_desc.wDescriptorLength = DYNAMIC */
};
/* Super-Speed Support */
@@ -724,8 +724,8 @@ static int hidg_setup(struct usb_function *f,
struct hid_descriptor hidg_desc_copy = hidg_desc;
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
- hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc_copy.desc[0].wDescriptorLength =
+ hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc_copy.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
length = min_t(unsigned short, length,
@@ -966,8 +966,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
* We can use hidg_desc struct here but we should not relay
* that its content won't change after returning from this function.
*/
- hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc.desc[0].wDescriptorLength =
+ hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
hidg_hs_in_ep_desc.bEndpointAddress =
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9e306bf9959d..03627c96d814 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -674,8 +674,9 @@ struct hid_descriptor {
__le16 bcdHID;
__u8 bCountryCode;
__u8 bNumDescriptors;
+ struct hid_class_descriptor rpt_desc;
- struct hid_class_descriptor desc[1];
+ struct hid_class_descriptor opt_descs[];
} __attribute__ ((packed));
#define HID_DEVICE(b, g, ven, prod) \
--
2.34.1
2
1
*** BLURB HERE ***
Zhang Xiaoxu (1):
[Backport] RDMA/rxe: Fix NULL-ptr-deref in rxe_qp_do_cleanup() when
socket create failed
Zhu Yanjun (1):
[Backport] RDMA/rxe: Fix the error caused by qp->sk
drivers/infiniband/sw/rxe/rxe_qp.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
--
2.34.1
2
3
20 Jan '26
From: "Nysal Jan K.A" <nysal(a)linux.ibm.com>
stable inclusion
from stable-v6.6.121
commit d790ef0c4819424ee0c2f448c0a8154c5ca369d1
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13434
CVE: CVE-2025-71119
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit c2296a1e42418556efbeb5636c4fa6aa6106713a upstream.
If SMT is disabled or a partial SMT state is enabled, when a new kernel
image is loaded for kexec, on reboot the following warning is observed:
kexec: Waking offline cpu 228.
WARNING: CPU: 0 PID: 9062 at arch/powerpc/kexec/core_64.c:223 kexec_prepare_cpus+0x1b0/0x1bc
[snip]
NIP kexec_prepare_cpus+0x1b0/0x1bc
LR kexec_prepare_cpus+0x1a0/0x1bc
Call Trace:
kexec_prepare_cpus+0x1a0/0x1bc (unreliable)
default_machine_kexec+0x160/0x19c
machine_kexec+0x80/0x88
kernel_kexec+0xd0/0x118
__do_sys_reboot+0x210/0x2c4
system_call_exception+0x124/0x320
system_call_vectored_common+0x15c/0x2ec
This occurs as add_cpu() fails due to cpu_bootable() returning false for
CPUs that fail the cpu_smt_thread_allowed() check or non primary
threads if SMT is disabled.
Fix the issue by enabling SMT and resetting the number of SMT threads to
the number of threads per core, before attempting to wake up all present
CPUs.
Fixes: 38253464bc82 ("cpu/SMT: Create topology_smt_thread_allowed()")
Reported-by: Sachin P Bappalige <sachinpb(a)linux.ibm.com>
Cc: stable(a)vger.kernel.org # v6.6+
Reviewed-by: Srikar Dronamraju <srikar(a)linux.ibm.com>
Signed-off-by: Nysal Jan K.A. <nysal(a)linux.ibm.com>
Tested-by: Samir M <samir(a)linux.ibm.com>
Reviewed-by: Sourabh Jain <sourabhjain(a)linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy(a)linux.ibm.com>
Link: https://patch.msgid.link/20251028105516.26258-1-nysal@linux.ibm.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Tengda Wu <wutengda2(a)huawei.com>
---
arch/powerpc/kexec/core_64.c | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index e465e4487737..ec7e909d7dcb 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -200,6 +200,23 @@ static void kexec_prepare_cpus_wait(int wait_state)
mb();
}
+
+/*
+ * The add_cpu() call in wake_offline_cpus() can fail as cpu_bootable()
+ * returns false for CPUs that fail the cpu_smt_thread_allowed() check
+ * or non primary threads if SMT is disabled. Re-enable SMT and set the
+ * number of SMT threads to threads per core.
+ */
+static void kexec_smt_reenable(void)
+{
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+ lock_device_hotplug();
+ cpu_smt_num_threads = threads_per_core;
+ cpu_smt_control = CPU_SMT_ENABLED;
+ unlock_device_hotplug();
+#endif
+}
+
/*
* We need to make sure each present CPU is online. The next kernel will scan
* the device tree and assume primary threads are online and query secondary
@@ -214,6 +231,8 @@ static void wake_offline_cpus(void)
{
int cpu = 0;
+ kexec_smt_reenable();
+
for_each_present_cpu(cpu) {
if (!cpu_online(cpu)) {
printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
--
2.34.1
2
1
20 Jan '26
From: Steven Rostedt <rostedt(a)goodmis.org>
mainline inclusion
from mainline-v6.19-rc1
commit 02e7769e38c87c92b82db59923d3b0598d153903
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8379
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
The trace file will pause tracing if the tracing instance has the
"pause-on-trace" option is set. This happens when the file is opened, and
it is unpaused when the file is closed. When this was first added, there
was only one user that paused tracing. On open, the check to pause was:
if (!iter->snapshot && (tr->trace_flags & TRACE_ITER(PAUSE_ON_TRACE)))
Where if it is not the snapshot tracer and the "pause-on-trace" option is
set, then it increments a "stop_count" of the trace instance.
On close, the check is:
if (!iter->snapshot && tr->stop_count)
That is, if it is not the snapshot buffer and it was stopped, it will
re-enable tracing.
Now there's more places that stop tracing. This means, if something else
stops tracing the tr->stop_count will be non-zero, and that means if the
trace file is closed, it will decrement the stop_count even though it
never incremented it. This causes a warning because when the user that
stopped tracing enables it again, the stop_count goes below zero.
Instead of relying on the stop_count being set to know if the close of
the trace file should enable tracing again, add a new flag to the trace
iterator. The trace iterator is unique per open of the trace file, and if
the open stops tracing set the trace iterator PAUSE flag. On close, if the
PAUSE flag is set, then re-enable it again.
Cc: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers(a)efficios.com>
Link: https://patch.msgid.link/20251202161751.24abaaf1@gandalf.local.home
Fixes: 06e0a548bad0f ("tracing: Do not disable tracing when reading the trace file")
Reported-by: syzbot+ccdec3bfe0beec58a38d(a)syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/692f44a5.a70a0220.2ea503.00c8.GAE@google.com/
Signed-off-by: Steven Rostedt (Google) <rostedt(a)goodmis.org>
Conflicts:
kernel/trace/trace.c
[The conflict is due to we did not backport commit bbec8e28ca.]
Signed-off-by: Tengda Wu <wutengda2(a)huawei.com>
---
include/linux/trace_events.h | 1 +
kernel/trace/trace.c | 6 ++++--
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 9be53cc71a34..eb2504615874 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -115,6 +115,7 @@ enum trace_iter_flags {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
TRACE_FILE_TIME_IN_NS = 4,
+ TRACE_FILE_PAUSE = 8,
};
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2a70761cbd35..6c5a503987e7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4428,8 +4428,10 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
* If pause-on-trace is enabled, then stop the trace while
* dumping, unless this is the "snapshot" file
*/
- if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
+ if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) {
+ iter->iter_flags |= TRACE_FILE_PAUSE;
tracing_stop_tr(tr);
+ }
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
@@ -4580,7 +4582,7 @@ static int tracing_release(struct inode *inode, struct file *file)
if (iter->trace && iter->trace->close)
iter->trace->close(iter);
- if (!iter->snapshot && tr->stop_count)
+ if (iter->iter_flags & TRACE_FILE_PAUSE)
/* reenable tracing if it was previously enabled */
tracing_start_tr(tr);
--
2.34.1
2
1
From: Steven Rostedt <rostedt(a)goodmis.org>
mainline inclusion
from mainline-v6.19-rc1
commit 02e7769e38c87c92b82db59923d3b0598d153903
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8379
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
The trace file will pause tracing if the tracing instance has the
"pause-on-trace" option is set. This happens when the file is opened, and
it is unpaused when the file is closed. When this was first added, there
was only one user that paused tracing. On open, the check to pause was:
if (!iter->snapshot && (tr->trace_flags & TRACE_ITER(PAUSE_ON_TRACE)))
Where if it is not the snapshot tracer and the "pause-on-trace" option is
set, then it increments a "stop_count" of the trace instance.
On close, the check is:
if (!iter->snapshot && tr->stop_count)
That is, if it is not the snapshot buffer and it was stopped, it will
re-enable tracing.
Now there's more places that stop tracing. This means, if something else
stops tracing the tr->stop_count will be non-zero, and that means if the
trace file is closed, it will decrement the stop_count even though it
never incremented it. This causes a warning because when the user that
stopped tracing enables it again, the stop_count goes below zero.
Instead of relying on the stop_count being set to know if the close of
the trace file should enable tracing again, add a new flag to the trace
iterator. The trace iterator is unique per open of the trace file, and if
the open stops tracing set the trace iterator PAUSE flag. On close, if the
PAUSE flag is set, then re-enable it again.
Cc: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers(a)efficios.com>
Link: https://patch.msgid.link/20251202161751.24abaaf1@gandalf.local.home
Fixes: 06e0a548bad0f ("tracing: Do not disable tracing when reading the trace file")
Reported-by: syzbot+ccdec3bfe0beec58a38d(a)syzkaller.appspotmail.com
Closes: https://lore.kernel.org/all/692f44a5.a70a0220.2ea503.00c8.GAE@google.com/
Signed-off-by: Steven Rostedt (Google) <rostedt(a)goodmis.org>
Conflicts:
kernel/trace/trace.c
[The conflict is due to we did not backport commit bbec8e28ca.]
Signed-off-by: Tengda Wu <wutengda2(a)huawei.com>
---
include/linux/trace_events.h | 1 +
kernel/trace/trace.c | 6 ++++--
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 6d91ba4eb4f5..f7c6515cfa71 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -137,6 +137,7 @@ enum trace_iter_flags {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
TRACE_FILE_TIME_IN_NS = 4,
+ TRACE_FILE_PAUSE = 8,
};
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 645b52ede612..1ad1c18a34d5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4806,8 +4806,10 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
* If pause-on-trace is enabled, then stop the trace while
* dumping, unless this is the "snapshot" file
*/
- if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
+ if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) {
+ iter->iter_flags |= TRACE_FILE_PAUSE;
tracing_stop_tr(tr);
+ }
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
@@ -4955,7 +4957,7 @@ static int tracing_release(struct inode *inode, struct file *file)
if (iter->trace && iter->trace->close)
iter->trace->close(iter);
- if (!iter->snapshot && tr->stop_count)
+ if (iter->iter_flags & TRACE_FILE_PAUSE)
/* reenable tracing if it was previously enabled */
tracing_start_tr(tr);
--
2.34.1
2
1
[PATCH OLK-5.10] cifs: fix session state check in reconnect to avoid use-after-free issue
by Wang Zhaolong 20 Jan '26
by Wang Zhaolong 20 Jan '26
20 Jan '26
From: Winston Wen <wentao(a)uniontech.com>
mainline inclusion
from mainline-v6.5-rc1
commit 99f280700b4cc02d5f141b8d15f8e9fad0418f65
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11378
CVE: CVE-2023-53794
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
Don't collect exiting session in smb2_reconnect_server(), because it
will be released soon.
Note that the exiting session will stay in server->smb_ses_list until
it complete the cifs_free_ipc() and logoff() and then delete itself
from the list.
Signed-off-by: Winston Wen <wentao(a)uniontech.com>
Reviewed-by: Shyam Prasad N <sprasad(a)microsoft.com>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Conflicts:
fs/cifs/smb2pdu.c
fs/smb/client/smb2pdu.c
[The code has been refactored multiple times.]
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/cifs/smb2pdu.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 696c84632617..ffc9ac29f40e 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3712,10 +3712,17 @@ void smb2_reconnect_server(struct work_struct *work)
INIT_LIST_HEAD(&tmp_list);
cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ spin_lock(&GlobalMid_Lock);
+ if (ses->status == CifsExiting) {
+ spin_unlock(&GlobalMid_Lock);
+ continue;
+ }
+ spin_unlock(&GlobalMid_Lock);
+
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->need_reconnect || tcon->need_reopen_files) {
tcon->tc_count++;
list_add_tail(&tcon->rlist, &tmp_list);
tcon_exist = true;
--
2.34.3
2
1
Edward Adam Davis (1):
Bluetooth: SCO: remove the redundant sco_conn_put
Luiz Augusto von Dentz (3):
Bluetooth: hci_core: Fix not checking skb length on hci_scodata_packet
Bluetooth: SCO: Use kref to track lifetime of sco_conn
Bluetooth: ISO: Use kref to track lifetime of iso_conn
Pauli Virtanen (4):
Bluetooth: SCO: fix sco_conn refcounting on sco_conn_ready
Bluetooth: ISO: free rx_skb if not consumed
Bluetooth: ISO: don't leak skb in ISO_CONT RX
Bluetooth: hci_core: lookup hci_conn on RX path on protocol side
include/net/bluetooth/hci_core.h | 20 ++--
net/bluetooth/hci_core.c | 82 +++++++----------
net/bluetooth/iso.c | 152 +++++++++++++++++++++++--------
net/bluetooth/l2cap_core.c | 23 ++++-
net/bluetooth/sco.c | 147 ++++++++++++++++++++++--------
5 files changed, 287 insertions(+), 137 deletions(-)
--
2.43.0
2
9
[PATCH OLK-6.6 v2 0/2] oenetcls: add a switch to enable/disable checking NIC ntuple
by Liu Jian 20 Jan '26
by Liu Jian 20 Jan '26
20 Jan '26
oenetcls: add a switch to enable/disable checking NIC ntuple
Liu Jian (1):
net/oenetcls: add a switch to enable/disable checking NIC ntuple
feature
Yue Haibing (1):
net/oenetcls: Balancing softirq to improve performance
include/linux/oenetcls.h | 32 +++++++++++----
net/core/dev.c | 19 ++++++++-
net/oenetcls/oenetcls.h | 1 +
net/oenetcls/oenetcls_flow.c | 72 +++++++++++++++++++++++++++-------
net/oenetcls/oenetcls_main.c | 17 +++++++-
net/oenetcls/oenetcls_ntuple.c | 12 +++---
6 files changed, 124 insertions(+), 29 deletions(-)
--
2.34.1
2
3
20 Jan '26
From: Li Nan <linan122(a)huawei.com>
Benjamin Marzinski (2):
dm-raid: Fix WARN_ON_ONCE check for sync_thread in raid_resume
md/raid5: recheck if reshape has finished with device_lock held
Christoph Hellwig (4):
md: add a mddev_trace_remap helper
md: add a mddev_add_trace_msg helper
md: add a mddev_is_dm helper
md/raid0: don't free conf on raid0_run failure
Christophe JAILLET (1):
md-cluster: Constify struct md_cluster_operations
Florian-Ewald Mueller (1):
md: add check for sleepers in md_wakeup_thread()
Heming Zhao (2):
md-cluster: fix hanging issue while a new disk adding
md-cluster: fix no recovery job when adding/re-adding a disk
Joel Granados (1):
raid: Remove now superfluous sentinel element from ctl_table array
Li Lingfeng (1):
md: get rdev->mddev with READ_ONCE()
Li Nan (13):
md: merge the check of capabilities into md_ioctl_valid()
md: changed the switch of RAID_VERSION to if
md: return directly before setting did_set_md_closing
md: factor out a helper to sync mddev
md: sync blockdev before stopping raid or setting readonly
md: clean up openers check in do_md_stop() and md_set_readonly()
md: check mddev->pers before calling md_set_readonly()
md: Fix overflow in is_mddev_idle
md: don't account sync_io if iostats of the disk is disabled
md: Revert "md: Fix overflow in is_mddev_idle"
md: change the return value type of md_write_start to void
md: make md_flush_request() more readable
md: prevent incorrect update of resync/recovery offset
Mateusz Jończyk (1):
md/raid1: set max_sectors during early return from choose_slow_rdev()
Mikulas Patocka (1):
md: fix a suspicious RCU usage warning
Yang Li (1):
md: Remove unneeded semicolon
Yu Kuai (38):
md: remove redundant check of 'mddev->sync_thread'
md: remove redundant md_wakeup_thread()
md: Don't ignore suspended array in md_check_recovery()
md: Don't ignore read-only array in md_check_recovery()
md: Make sure md_do_sync() will set MD_RECOVERY_DONE
md: Don't register sync_thread for reshape directly
md: Don't suspend the array for interrupted reshape
md: add a new helper rdev_has_badblock()
md/raid1: factor out helpers to add rdev to conf
md/raid1: record nonrot rdevs while adding/removing rdevs to conf
md/raid1: fix choose next idle in read_balance()
md/raid1-10: add a helper raid1_check_read_range()
md/raid1-10: factor out a new helper raid1_should_read_first()
md/raid1: factor out read_first_rdev() from read_balance()
md/raid1: factor out choose_slow_rdev() from read_balance()
md/raid1: factor out choose_bb_rdev() from read_balance()
md/raid1: factor out the code to manage sequential IO
md/raid1: factor out helpers to choose the best rdev from
read_balance()
md: don't clear MD_RECOVERY_FROZEN for new dm-raid until resume
md: export helpers to stop sync_thread
md: export helper md_is_rdwr()
md: add a new helper reshape_interrupted()
dm-raid: really frozen sync_thread during suspend
dm-raid: add a new helper prepare_suspend() in md_personality
dm-raid456, md/raid456: fix a deadlock for dm-raid456 while io
concurrent with reshape
md: rearrange recovery_flags
md: add a new enum type sync_action
md: add new helpers for sync_action
md: factor out helper to start reshape from action_store()
md: replace sysfs api sync_action with new helpers
md: remove parameter check_seq for stop_sync_thread()
md: don't fail action_store() if sync_thread is not registered
md: use new helpers in md_do_sync()
md: replace last_sync_action with new enum type
md: factor out helpers for different sync_action in md_do_sync()
md: pass in max_sectors for pers->sync_request()
md/raid5: fix spares errors about rcu usage
md/raid1: Fix data corruption for degraded array with slow disk
drivers/md/md-cluster.h | 2 +
drivers/md/md.h | 204 ++++++++--
drivers/md/raid1.h | 1 +
drivers/md/dm-raid.c | 66 ++-
drivers/md/md-bitmap.c | 9 +-
drivers/md/md-cluster.c | 51 ++-
drivers/md/md.c | 861 +++++++++++++++++++++++-----------------
drivers/md/raid0.c | 24 +-
drivers/md/raid1-10.c | 69 ++++
drivers/md/raid1.c | 601 ++++++++++++++++------------
drivers/md/raid10.c | 120 ++----
drivers/md/raid5.c | 215 +++++-----
12 files changed, 1346 insertions(+), 877 deletions(-)
--
2.39.2
2
67
[PATCH OLK-6.6 0/2] oenetcls: add a switch to enable/disable checking NIC ntuple feature
by Liu Jian 20 Jan '26
by Liu Jian 20 Jan '26
20 Jan '26
oenetcls: add a switch to enable/disable checking NIC ntuple feature
Liu Jian (1):
oenetcls: add a switch to enable/disable checking NIC ntuple feature
Yue Haibing (1):
net/oenetcls: Balancing softirq to improve performance
include/linux/oenetcls.h | 32 +++++++++++----
net/core/dev.c | 19 ++++++++-
net/oenetcls/oenetcls.h | 1 +
net/oenetcls/oenetcls_flow.c | 72 +++++++++++++++++++++++++++-------
net/oenetcls/oenetcls_main.c | 17 +++++++-
net/oenetcls/oenetcls_ntuple.c | 12 +++---
6 files changed, 124 insertions(+), 29 deletions(-)
--
2.34.1
2
3
20 Jan '26
From: Kohei Enju <enjuk(a)amazon.com>
stable inclusion
from stable-v6.6.120
commit 18de0e41d69d97fab10b91fecf10ae78a5e43232
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13398
CVE: CVE-2025-71087
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 6daa2893f323981c7894c68440823326e93a7d61 ]
There are off-by-one bugs when configuring RSS hash key and lookup
table, causing out-of-bounds reads to memory [1] and out-of-bounds
writes to device registers.
Before commit 43a3d9ba34c9 ("i40evf: Allow PF driver to configure RSS"),
the loop upper bounds were:
i <= I40E_VFQF_{HKEY,HLUT}_MAX_INDEX
which is safe since the value is the last valid index.
That commit changed the bounds to:
i <= adapter->rss_{key,lut}_size / 4
where `rss_{key,lut}_size / 4` is the number of dwords, so the last
valid index is `(rss_{key,lut}_size / 4) - 1`. Therefore, using `<=`
accesses one element past the end.
Fix the issues by using `<` instead of `<=`, ensuring we do not exceed
the bounds.
[1] KASAN splat about rss_key_size off-by-one
BUG: KASAN: slab-out-of-bounds in iavf_config_rss+0x619/0x800
Read of size 4 at addr ffff888102c50134 by task kworker/u8:6/63
CPU: 0 UID: 0 PID: 63 Comm: kworker/u8:6 Not tainted 6.18.0-rc2-enjuk-tnguy-00378-g3005f5b77652-dirty #156 PREEMPT(voluntary)
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
Workqueue: iavf iavf_watchdog_task
Call Trace:
<TASK>
dump_stack_lvl+0x6f/0xb0
print_report+0x170/0x4f3
kasan_report+0xe1/0x1a0
iavf_config_rss+0x619/0x800
iavf_watchdog_task+0x2be7/0x3230
process_one_work+0x7fd/0x1420
worker_thread+0x4d1/0xd40
kthread+0x344/0x660
ret_from_fork+0x249/0x320
ret_from_fork_asm+0x1a/0x30
</TASK>
Allocated by task 63:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_kmalloc+0x7f/0x90
__kmalloc_noprof+0x246/0x6f0
iavf_watchdog_task+0x28fc/0x3230
process_one_work+0x7fd/0x1420
worker_thread+0x4d1/0xd40
kthread+0x344/0x660
ret_from_fork+0x249/0x320
ret_from_fork_asm+0x1a/0x30
The buggy address belongs to the object at ffff888102c50100
which belongs to the cache kmalloc-64 of size 64
The buggy address is located 0 bytes to the right of
allocated 52-byte region [ffff888102c50100, ffff888102c50134)
The buggy address belongs to the physical page:
page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x102c50
flags: 0x200000000000000(node=0|zone=2)
page_type: f5(slab)
raw: 0200000000000000 ffff8881000418c0 dead000000000122 0000000000000000
raw: 0000000000000000 0000000080200020 00000000f5000000 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff888102c50000: 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc
ffff888102c50080: 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc
>ffff888102c50100: 00 00 00 00 00 00 04 fc fc fc fc fc fc fc fc fc
^
ffff888102c50180: 00 00 00 00 00 00 00 00 fc fc fc fc fc fc fc fc
ffff888102c50200: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
Fixes: 43a3d9ba34c9 ("i40evf: Allow PF driver to configure RSS")
Signed-off-by: Kohei Enju <enjuk(a)amazon.com>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov(a)intel.com>
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel(a)intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski(a)intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Pu Lehui <pulehui(a)huawei.com>
---
drivers/net/ethernet/intel/iavf/iavf_main.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index fde8d7b80ca6..f6a748ae1c95 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1750,11 +1750,11 @@ static int iavf_config_rss_reg(struct iavf_adapter *adapter)
u16 i;
dw = (u32 *)adapter->rss_key;
- for (i = 0; i <= adapter->rss_key_size / 4; i++)
+ for (i = 0; i < adapter->rss_key_size / 4; i++)
wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
dw = (u32 *)adapter->rss_lut;
- for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+ for (i = 0; i < adapter->rss_lut_size / 4; i++)
wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
iavf_flush(hw);
--
2.34.1
2
1
From: Guangshuo Li <lgs201920130244(a)gmail.com>
stable inclusion
from stable-v6.6.120
commit 2c4c0c09f9648ba766d399917d420d03e7b3e1f8
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13404
CVE: CVE-2025-71093
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 9c72a5182ed92904d01057f208c390a303f00a0f upstream.
In e1000_tbi_should_accept() we read the last byte of the frame via
'data[length - 1]' to evaluate the TBI workaround. If the descriptor-
reported length is zero or larger than the actual RX buffer size, this
read goes out of bounds and can hit unrelated slab objects. The issue
is observed from the NAPI receive path (e1000_clean_rx_irq):
==================================================================
BUG: KASAN: slab-out-of-bounds in e1000_tbi_should_accept+0x610/0x790
Read of size 1 at addr ffff888014114e54 by task sshd/363
CPU: 0 PID: 363 Comm: sshd Not tainted 5.18.0-rc1 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x5a/0x74
print_address_description+0x7b/0x440
print_report+0x101/0x200
kasan_report+0xc1/0xf0
e1000_tbi_should_accept+0x610/0x790
e1000_clean_rx_irq+0xa8c/0x1110
e1000_clean+0xde2/0x3c10
__napi_poll+0x98/0x380
net_rx_action+0x491/0xa20
__do_softirq+0x2c9/0x61d
do_softirq+0xd1/0x120
</IRQ>
<TASK>
__local_bh_enable_ip+0xfe/0x130
ip_finish_output2+0x7d5/0xb00
__ip_queue_xmit+0xe24/0x1ab0
__tcp_transmit_skb+0x1bcb/0x3340
tcp_write_xmit+0x175d/0x6bd0
__tcp_push_pending_frames+0x7b/0x280
tcp_sendmsg_locked+0x2e4f/0x32d0
tcp_sendmsg+0x24/0x40
sock_write_iter+0x322/0x430
vfs_write+0x56c/0xa60
ksys_write+0xd1/0x190
do_syscall_64+0x43/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f511b476b10
Code: 73 01 c3 48 8b 0d 88 d3 2b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 44 00 00 83 3d f9 2b 2c 00 00 75 10 b8 01 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 8e 9b 01 00 48 89 04 24
RSP: 002b:00007ffc9211d4e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 0000000000004024 RCX: 00007f511b476b10
RDX: 0000000000004024 RSI: 0000559a9385962c RDI: 0000000000000003
RBP: 0000559a9383a400 R08: fffffffffffffff0 R09: 0000000000004f00
R10: 0000000000000070 R11: 0000000000000246 R12: 0000000000000000
R13: 00007ffc9211d57f R14: 0000559a9347bde7 R15: 0000000000000003
</TASK>
Allocated by task 1:
__kasan_krealloc+0x131/0x1c0
krealloc+0x90/0xc0
add_sysfs_param+0xcb/0x8a0
kernel_add_sysfs_param+0x81/0xd4
param_sysfs_builtin+0x138/0x1a6
param_sysfs_init+0x57/0x5b
do_one_initcall+0x104/0x250
do_initcall_level+0x102/0x132
do_initcalls+0x46/0x74
kernel_init_freeable+0x28f/0x393
kernel_init+0x14/0x1a0
ret_from_fork+0x22/0x30
The buggy address belongs to the object at ffff888014114000
which belongs to the cache kmalloc-2k of size 2048
The buggy address is located 1620 bytes to the right of
2048-byte region [ffff888014114000, ffff888014114800]
The buggy address belongs to the physical page:
page:ffffea0000504400 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x14110
head:ffffea0000504400 order:3 compound_mapcount:0 compound_pincount:0
flags: 0x100000000010200(slab|head|node=0|zone=1)
raw: 0100000000010200 0000000000000000 dead000000000001 ffff888013442000
raw: 0000000000000000 0000000000080008 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
==================================================================
This happens because the TBI check unconditionally dereferences the last
byte without validating the reported length first:
u8 last_byte = *(data + length - 1);
Fix by rejecting the frame early if the length is zero, or if it exceeds
adapter->rx_buffer_len. This preserves the TBI workaround semantics for
valid frames and prevents touching memory beyond the RX buffer.
Fixes: 2037110c96d5 ("e1000: move tbi workaround code into helper function")
Cc: stable(a)vger.kernel.org
Signed-off-by: Guangshuo Li <lgs201920130244(a)gmail.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov(a)intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen(a)intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Pu Lehui <pulehui(a)huawei.com>
---
drivers/net/ethernet/intel/e1000/e1000_main.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index da6e303ad99b..d015a0a85f07 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4091,7 +4091,15 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
u32 length, const u8 *data)
{
struct e1000_hw *hw = &adapter->hw;
- u8 last_byte = *(data + length - 1);
+ u8 last_byte;
+
+ /* Guard against OOB on data[length - 1] */
+ if (unlikely(!length))
+ return false;
+ /* Upper bound: length must not exceed rx_buffer_len */
+ if (unlikely(length > adapter->rx_buffer_len))
+ return false;
+ last_byte = *(data + length - 1);
if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
unsigned long irq_flags;
--
2.34.1
2
1
Fix CVE-2025-40016
Ricardo Ribalda (2):
media: uvcvideo: Allow extra entities
media: uvcvideo: Use heuristic to find stream entity
Thadeu Lima de Souza Cascardo (1):
media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID
drivers/media/usb/uvc/uvc_driver.c | 81 +++++++++++++++++++++---------
drivers/media/usb/uvc/uvcvideo.h | 9 +++-
2 files changed, 63 insertions(+), 27 deletions(-)
--
2.39.2
2
4
20 Jan '26
From: Steven Rostedt <rostedt(a)goodmis.org>
mainline inclusion
from mainline-v6.19-rc2
commit ef7f38df890f5dcd2ae62f8dbde191d72f3bebae
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13440
CVE: CVE-2025-71125
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Synthetic events currently do not have a function to register perf events.
This leads to calling the tracepoint register functions with a NULL
function pointer which triggers:
------------[ cut here ]------------
WARNING: kernel/tracepoint.c:175 at tracepoint_add_func+0x357/0x370, CPU#2: perf/2272
Modules linked in: kvm_intel kvm irqbypass
CPU: 2 UID: 0 PID: 2272 Comm: perf Not tainted 6.18.0-ftest-11964-ge022764176fc-dirty #323 PREEMPTLAZY
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.17.0-debian-1.17.0-1 04/01/2014
RIP: 0010:tracepoint_add_func+0x357/0x370
Code: 28 9c e8 4c 0b f5 ff eb 0f 4c 89 f7 48 c7 c6 80 4d 28 9c e8 ab 89 f4 ff 31 c0 5b 41 5c 41 5d 41 5e 41 5f 5d c3 cc cc cc cc cc <0f> 0b 49 c7 c6 ea ff ff ff e9 ee fe ff ff 0f 0b e9 f9 fe ff ff 0f
RSP: 0018:ffffabc0c44d3c40 EFLAGS: 00010246
RAX: 0000000000000001 RBX: ffff9380aa9e4060 RCX: 0000000000000000
RDX: 000000000000000a RSI: ffffffff9e1d4a98 RDI: ffff937fcf5fd6c8
RBP: 0000000000000001 R08: 0000000000000007 R09: ffff937fcf5fc780
R10: 0000000000000003 R11: ffffffff9c193910 R12: 000000000000000a
R13: ffffffff9e1e5888 R14: 0000000000000000 R15: ffffabc0c44d3c78
FS: 00007f6202f5f340(0000) GS:ffff93819f00f000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 000055d3162281a8 CR3: 0000000106a56003 CR4: 0000000000172ef0
Call Trace:
<TASK>
tracepoint_probe_register+0x5d/0x90
synth_event_reg+0x3c/0x60
perf_trace_event_init+0x204/0x340
perf_trace_init+0x85/0xd0
perf_tp_event_init+0x2e/0x50
perf_try_init_event+0x6f/0x230
? perf_event_alloc+0x4bb/0xdc0
perf_event_alloc+0x65a/0xdc0
__se_sys_perf_event_open+0x290/0x9f0
do_syscall_64+0x93/0x7b0
? entry_SYSCALL_64_after_hwframe+0x76/0x7e
? trace_hardirqs_off+0x53/0xc0
entry_SYSCALL_64_after_hwframe+0x76/0x7e
Instead, have the code return -ENODEV, which doesn't warn and has perf
error out with:
# perf record -e synthetic:futex_wait
Error:
The sys_perf_event_open() syscall returned with 19 (No such device) for event (synthetic:futex_wait).
"dmesg | grep -i perf" may provide additional information.
Ideally perf should support synthetic events, but for now just fix the
warning. The support can come later.
Cc: stable(a)vger.kernel.org
Cc: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers(a)efficios.com>
Cc: Arnaldo Carvalho de Melo <acme(a)kernel.org>
Cc: Jiri Olsa <jolsa(a)kernel.org>
Cc: Namhyung Kim <namhyung(a)kernel.org>
Link: https://patch.msgid.link/20251216182440.147e4453@gandalf.local.home
Fixes: 4b147936fa509 ("tracing: Add support for 'synthetic' events")
Reported-by: Ian Rogers <irogers(a)google.com>
Signed-off-by: Steven Rostedt (Google) <rostedt(a)goodmis.org>
Signed-off-by: Tengda Wu <wutengda2(a)huawei.com>
---
kernel/trace/trace_events.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 5e9cd400ecb4..4658e8754356 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -300,6 +300,8 @@ int trace_event_reg(struct trace_event_call *call,
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
+ if (!call->class->perf_probe)
+ return -ENODEV;
return tracepoint_probe_register(call->tp,
call->class->perf_probe,
call);
--
2.34.1
2
1
From: Steven Rostedt <rostedt(a)goodmis.org>
stable inclusion
from stable-v6.6.121
commit 65b1971147ec12f0b1cee0811c859a3d7d9b04ce
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13440
CVE: CVE-2025-71125
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit ef7f38df890f5dcd2ae62f8dbde191d72f3bebae upstream.
Synthetic events currently do not have a function to register perf events.
This leads to calling the tracepoint register functions with a NULL
function pointer which triggers:
------------[ cut here ]------------
WARNING: kernel/tracepoint.c:175 at tracepoint_add_func+0x357/0x370, CPU#2: perf/2272
Modules linked in: kvm_intel kvm irqbypass
CPU: 2 UID: 0 PID: 2272 Comm: perf Not tainted 6.18.0-ftest-11964-ge022764176fc-dirty #323 PREEMPTLAZY
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.17.0-debian-1.17.0-1 04/01/2014
RIP: 0010:tracepoint_add_func+0x357/0x370
Code: 28 9c e8 4c 0b f5 ff eb 0f 4c 89 f7 48 c7 c6 80 4d 28 9c e8 ab 89 f4 ff 31 c0 5b 41 5c 41 5d 41 5e 41 5f 5d c3 cc cc cc cc cc <0f> 0b 49 c7 c6 ea ff ff ff e9 ee fe ff ff 0f 0b e9 f9 fe ff ff 0f
RSP: 0018:ffffabc0c44d3c40 EFLAGS: 00010246
RAX: 0000000000000001 RBX: ffff9380aa9e4060 RCX: 0000000000000000
RDX: 000000000000000a RSI: ffffffff9e1d4a98 RDI: ffff937fcf5fd6c8
RBP: 0000000000000001 R08: 0000000000000007 R09: ffff937fcf5fc780
R10: 0000000000000003 R11: ffffffff9c193910 R12: 000000000000000a
R13: ffffffff9e1e5888 R14: 0000000000000000 R15: ffffabc0c44d3c78
FS: 00007f6202f5f340(0000) GS:ffff93819f00f000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 000055d3162281a8 CR3: 0000000106a56003 CR4: 0000000000172ef0
Call Trace:
<TASK>
tracepoint_probe_register+0x5d/0x90
synth_event_reg+0x3c/0x60
perf_trace_event_init+0x204/0x340
perf_trace_init+0x85/0xd0
perf_tp_event_init+0x2e/0x50
perf_try_init_event+0x6f/0x230
? perf_event_alloc+0x4bb/0xdc0
perf_event_alloc+0x65a/0xdc0
__se_sys_perf_event_open+0x290/0x9f0
do_syscall_64+0x93/0x7b0
? entry_SYSCALL_64_after_hwframe+0x76/0x7e
? trace_hardirqs_off+0x53/0xc0
entry_SYSCALL_64_after_hwframe+0x76/0x7e
Instead, have the code return -ENODEV, which doesn't warn and has perf
error out with:
# perf record -e synthetic:futex_wait
Error:
The sys_perf_event_open() syscall returned with 19 (No such device) for event (synthetic:futex_wait).
"dmesg | grep -i perf" may provide additional information.
Ideally perf should support synthetic events, but for now just fix the
warning. The support can come later.
Cc: stable(a)vger.kernel.org
Cc: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers(a)efficios.com>
Cc: Arnaldo Carvalho de Melo <acme(a)kernel.org>
Cc: Jiri Olsa <jolsa(a)kernel.org>
Cc: Namhyung Kim <namhyung(a)kernel.org>
Link: https://patch.msgid.link/20251216182440.147e4453@gandalf.local.home
Fixes: 4b147936fa509 ("tracing: Add support for 'synthetic' events")
Reported-by: Ian Rogers <irogers(a)google.com>
Signed-off-by: Steven Rostedt (Google) <rostedt(a)goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Tengda Wu <wutengda2(a)huawei.com>
---
kernel/trace/trace_events.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 564fc0e9adda..068dc9209284 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -689,6 +689,8 @@ int trace_event_reg(struct trace_event_call *call,
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
+ if (!call->class->perf_probe)
+ return -ENODEV;
return tracepoint_probe_register(call->tp,
call->class->perf_probe,
call);
--
2.34.1
2
1
From: Joanne Koong <joannelkoong(a)gmail.com>
stable inclusion
from stable-v6.12.64
commit fbba8b00bbe4e4f958a2b0654cc1219a7e6597f6
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13373
CVE: CVE-2025-68821
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit bd5603eaae0aabf527bfb3ce1bb07e979ce5bd50 upstream.
Commit e26ee4efbc79 ("fuse: allocate ff->release_args only if release is
needed") skips allocating ff->release_args if the server does not
implement open. However in doing so, fuse_prepare_release() now skips
grabbing the reference on the inode, which makes it possible for an
inode to be evicted from the dcache while there are inflight readahead
requests. This causes a deadlock if the server triggers reclaim while
servicing the readahead request and reclaim attempts to evict the inode
of the file being read ahead. Since the folio is locked during
readahead, when reclaim evicts the fuse inode and fuse_evict_inode()
attempts to remove all folios associated with the inode from the page
cache (truncate_inode_pages_range()), reclaim will block forever waiting
for the lock since readahead cannot relinquish the lock because it is
itself blocked in reclaim:
>>> stack_trace(1504735)
folio_wait_bit_common (mm/filemap.c:1308:4)
folio_lock (./include/linux/pagemap.h:1052:3)
truncate_inode_pages_range (mm/truncate.c:336:10)
fuse_evict_inode (fs/fuse/inode.c:161:2)
evict (fs/inode.c:704:3)
dentry_unlink_inode (fs/dcache.c:412:3)
__dentry_kill (fs/dcache.c:615:3)
shrink_kill (fs/dcache.c:1060:12)
shrink_dentry_list (fs/dcache.c:1087:3)
prune_dcache_sb (fs/dcache.c:1168:2)
super_cache_scan (fs/super.c:221:10)
do_shrink_slab (mm/shrinker.c:435:9)
shrink_slab (mm/shrinker.c:626:10)
shrink_node (mm/vmscan.c:5951:2)
shrink_zones (mm/vmscan.c:6195:3)
do_try_to_free_pages (mm/vmscan.c:6257:3)
do_swap_page (mm/memory.c:4136:11)
handle_pte_fault (mm/memory.c:5562:10)
handle_mm_fault (mm/memory.c:5870:9)
do_user_addr_fault (arch/x86/mm/fault.c:1338:10)
handle_page_fault (arch/x86/mm/fault.c:1481:3)
exc_page_fault (arch/x86/mm/fault.c:1539:2)
asm_exc_page_fault+0x22/0x27
Fix this deadlock by allocating ff->release_args and grabbing the
reference on the inode when preparing the file for release even if the
server does not implement open. The inode reference will be dropped when
the last reference on the fuse file is dropped (see fuse_file_put() ->
fuse_release_end()).
Fixes: e26ee4efbc79 ("fuse: allocate ff->release_args only if release is needed")
Cc: stable(a)vger.kernel.org
Signed-off-by: Joanne Koong <joannelkoong(a)gmail.com>
Reported-by: Omar Sandoval <osandov(a)fb.com>
Signed-off-by: Miklos Szeredi <mszeredi(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Zizhi Wo <wozizhi(a)huawei.com>
---
fs/fuse/file.c | 26 +++++++++++++++++++-------
1 file changed, 19 insertions(+), 7 deletions(-)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4c817fb79211..abbe1c576bad 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -109,7 +109,9 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
fuse_file_io_release(ff, ra->inode);
if (!args) {
- /* Do nothing when server does not implement 'open' */
+ /* Do nothing when server does not implement 'opendir' */
+ } else if (args->opcode == FUSE_RELEASE && ff->fm->fc->no_open) {
+ fuse_release_end(ff->fm, args, 0);
} else if (sync) {
fuse_simple_request(ff->fm, args);
fuse_release_end(ff->fm, args, 0);
@@ -130,8 +132,17 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
struct fuse_file *ff;
int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
bool open = isdir ? !fc->no_opendir : !fc->no_open;
+ bool release = !isdir || open;
- ff = fuse_file_alloc(fm, open);
+ /*
+ * ff->args->release_args still needs to be allocated (so we can hold an
+ * inode reference while there are pending inflight file operations when
+ * ->release() is called, see fuse_prepare_release()) even if
+ * fc->no_open is set else it becomes possible for reclaim to deadlock
+ * if while servicing the readahead request the server triggers reclaim
+ * and reclaim evicts the inode of the file being read ahead.
+ */
+ ff = fuse_file_alloc(fm, release);
if (!ff)
return ERR_PTR(-ENOMEM);
@@ -151,13 +162,14 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
fuse_file_free(ff);
return ERR_PTR(err);
} else {
- /* No release needed */
- kfree(ff->args);
- ff->args = NULL;
- if (isdir)
+ if (isdir) {
+ /* No release needed */
+ kfree(ff->args);
+ ff->args = NULL;
fc->no_opendir = 1;
- else
+ } else {
fc->no_open = 1;
+ }
}
}
--
2.39.2
2
1
[PATCH OLK-6.6] fuse: fix io-uring list corruption for terminated non-committed requests
by Zizhi Wo 19 Jan '26
by Zizhi Wo 19 Jan '26
19 Jan '26
From: Joanne Koong <joannelkoong(a)gmail.com>
stable inclusion
from stable-v6.18.3
commit a6d1f1ace16d0e777a85f84267160052d3499b6e
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13358
CVE: CVE-2025-68805
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 95c39eef7c2b666026c69ab5b30471da94ea2874 upstream.
When a request is terminated before it has been committed, the request
is not removed from the queue's list. This leaves a dangling list entry
that leads to list corruption and use-after-free issues.
Remove the request from the queue's list for terminated non-committed
requests.
Signed-off-by: Joanne Koong <joannelkoong(a)gmail.com>
Fixes: c090c8abae4b ("fuse: Add io-uring sqe commit and fetch support")
Cc: stable(a)vger.kernel.org
Reviewed-by: Bernd Schubert <bschubert(a)ddn.com>
Signed-off-by: Miklos Szeredi <mszeredi(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Zizhi Wo <wozizhi(a)huawei.com>
---
fs/fuse/dev_uring.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index 653d154c776e..f4ca09ba0694 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -86,6 +86,7 @@ static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
lockdep_assert_not_held(&queue->lock);
spin_lock(&queue->lock);
ent->fuse_req = NULL;
+ list_del_init(&req->list);
if (test_bit(FR_BACKGROUND, &req->flags)) {
queue->active_background--;
spin_lock(&fc->bg_lock);
--
2.39.2
2
1
[PATCH OLK-6.6] fuse: missing copy_finish in fuse-over-io-uring argument copies
by Zizhi Wo 19 Jan '26
by Zizhi Wo 19 Jan '26
19 Jan '26
From: Cheng Ding <cding(a)ddn.com>
stable inclusion
from stable-v6.18.3
commit b79938863f436960eff209130f025c4bd3026bf8
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13344
CVE: CVE-2025-68791
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 6e0d7f7f4a43ac8868e98c87ecf48805aa8c24dd upstream.
Fix a possible reference count leak of payload pages during
fuse argument copies.
[Joanne: simplified error cleanup]
Fixes: c090c8abae4b ("fuse: Add io-uring sqe commit and fetch support")
Cc: stable(a)vger.kernel.org # v6.14
Signed-off-by: Cheng Ding <cding(a)ddn.com>
Signed-off-by: Bernd Schubert <bschubert(a)ddn.com>
Reviewed-by: Joanne Koong <joannelkoong(a)gmail.com>
Signed-off-by: Miklos Szeredi <mszeredi(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Conflicts:
fs/fuse/dev_uring.c
fs/fuse/fuse_dev_i.h
[Simple context conflicts because of commit 03a3617f92c2
("fuse: use boolean bit-fields in struct fuse_copy_state") not merged,
which does not affect this patch.]
Signed-off-by: Zizhi Wo <wozizhi(a)huawei.com>
---
fs/fuse/dev.c | 2 +-
fs/fuse/dev_uring.c | 5 ++++-
fs/fuse/fuse_dev_i.h | 1 +
3 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 67407e59991c..eabc00e6981a 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -748,7 +748,7 @@ void fuse_copy_init(struct fuse_copy_state *cs, int write,
}
/* Unmap and put previous page of userspace buffer */
-static void fuse_copy_finish(struct fuse_copy_state *cs)
+void fuse_copy_finish(struct fuse_copy_state *cs)
{
if (cs->currbuf) {
struct pipe_buffer *buf = cs->currbuf;
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index c14a939c5883..653d154c776e 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -555,7 +555,9 @@ static int fuse_uring_copy_from_ring(struct fuse_ring *ring,
cs.is_uring = 1;
cs.req = req;
- return fuse_copy_out_args(&cs, args, ring_in_out.payload_sz);
+ err = fuse_copy_out_args(&cs, args, ring_in_out.payload_sz);
+ fuse_copy_finish(&cs);
+ return err;
}
/*
@@ -606,6 +608,7 @@ static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req,
/* copy the payload */
err = fuse_copy_args(&cs, num_args, args->in_pages,
(struct fuse_arg *)in_args, 0);
+ fuse_copy_finish(&cs);
if (err) {
pr_info_ratelimited("%s fuse_copy_args failed\n", __func__);
return err;
diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h
index a20cbaca9ceb..a413050ceba6 100644
--- a/fs/fuse/fuse_dev_i.h
+++ b/fs/fuse/fuse_dev_i.h
@@ -55,6 +55,7 @@ void fuse_dev_end_requests(struct list_head *head);
void fuse_copy_init(struct fuse_copy_state *cs, int write,
struct iov_iter *iter);
+void fuse_copy_finish(struct fuse_copy_state *cs);
int fuse_copy_args(struct fuse_copy_state *cs, unsigned int numargs,
unsigned int argpages, struct fuse_arg *args,
int zeroing);
--
2.39.2
2
1
[PATCH OLK-6.6] fsnotify: do not generate ACCESS/MODIFY events on child for special files
by Zizhi Wo 19 Jan '26
by Zizhi Wo 19 Jan '26
19 Jan '26
From: Amir Goldstein <amir73il(a)gmail.com>
stable inclusion
from stable-v6.6.120
commit e0643d46759db8b84c0504a676043e5e341b6c81
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13341
CVE: CVE-2025-68788
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 635bc4def026a24e071436f4f356ea08c0eed6ff upstream.
inotify/fanotify do not allow users with no read access to a file to
subscribe to events (e.g. IN_ACCESS/IN_MODIFY), but they do allow the
same user to subscribe for watching events on children when the user
has access to the parent directory (e.g. /dev).
Users with no read access to a file but with read access to its parent
directory can still stat the file and see if it was accessed/modified
via atime/mtime change.
The same is not true for special files (e.g. /dev/null). Users will not
generally observe atime/mtime changes when other users read/write to
special files, only when someone sets atime/mtime via utimensat().
Align fsnotify events with this stat behavior and do not generate
ACCESS/MODIFY events to parent watchers on read/write of special files.
The events are still generated to parent watchers on utimensat(). This
closes some side-channels that could be possibly used for information
exfiltration [1].
[1] https://snee.la/pdf/pubs/file-notification-attacks.pdf
Reported-by: Sudheendra Raghav Neela <sneela(a)tugraz.at>
CC: stable(a)vger.kernel.org
Signed-off-by: Amir Goldstein <amir73il(a)gmail.com>
Signed-off-by: Jan Kara <jack(a)suse.cz>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Zizhi Wo <wozizhi(a)huawei.com>
---
fs/notify/fsnotify.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 9cc4ebb53504..82602157bcc0 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -224,8 +224,15 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
/*
* Include parent/name in notification either if some notification
* groups require parent info or the parent is interested in this event.
+ * The parent interest in ACCESS/MODIFY events does not apply to special
+ * files, where read/write are not on the filesystem of the parent and
+ * events can provide an undesirable side-channel for information
+ * exfiltration.
*/
- parent_interested = mask & p_mask & ALL_FSNOTIFY_EVENTS;
+ parent_interested = mask & p_mask & ALL_FSNOTIFY_EVENTS &&
+ !(data_type == FSNOTIFY_EVENT_PATH &&
+ d_is_special(dentry) &&
+ (mask & (FS_ACCESS | FS_MODIFY)));
if (parent_needed || parent_interested) {
/* When notifying parent, child should be passed as data */
WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type));
--
2.39.2
2
1
From: Li Nan <linan122(a)huawei.com>
Benjamin Marzinski (2):
dm-raid: Fix WARN_ON_ONCE check for sync_thread in raid_resume
md/raid5: recheck if reshape has finished with device_lock held
Christoph Hellwig (4):
md: add a mddev_trace_remap helper
md: add a mddev_add_trace_msg helper
md: add a mddev_is_dm helper
md/raid0: don't free conf on raid0_run failure
Christophe JAILLET (1):
md-cluster: Constify struct md_cluster_operations
Florian-Ewald Mueller (1):
md: add check for sleepers in md_wakeup_thread()
Heming Zhao (2):
md-cluster: fix hanging issue while a new disk adding
md-cluster: fix no recovery job when adding/re-adding a disk
Joel Granados (1):
raid: Remove now superfluous sentinel element from ctl_table array
Li Lingfeng (1):
md: get rdev->mddev with READ_ONCE()
Li Nan (12):
md: merge the check of capabilities into md_ioctl_valid()
md: changed the switch of RAID_VERSION to if
md: return directly before setting did_set_md_closing
md: factor out a helper to sync mddev
md: sync blockdev before stopping raid or setting readonly
md: clean up openers check in do_md_stop() and md_set_readonly()
md: check mddev->pers before calling md_set_readonly()
md: Fix overflow in is_mddev_idle
md: don't account sync_io if iostats of the disk is disabled
md: Revert "md: Fix overflow in is_mddev_idle"
md: change the return value type of md_write_start to void
md: make md_flush_request() more readable
Mateusz Jończyk (1):
md/raid1: set max_sectors during early return from choose_slow_rdev()
Mikulas Patocka (1):
md: fix a suspicious RCU usage warning
Yang Li (1):
md: Remove unneeded semicolon
Yu Kuai (37):
md: remove redundant check of 'mddev->sync_thread'
md: remove redundant md_wakeup_thread()
md: Don't ignore suspended array in md_check_recovery()
md: Don't ignore read-only array in md_check_recovery()
md: Make sure md_do_sync() will set MD_RECOVERY_DONE
md: Don't register sync_thread for reshape directly
md: Don't suspend the array for interrupted reshape
md: add a new helper rdev_has_badblock()
md/raid1: factor out helpers to add rdev to conf
md/raid1: record nonrot rdevs while adding/removing rdevs to conf
md/raid1: fix choose next idle in read_balance()
md/raid1-10: add a helper raid1_check_read_range()
md/raid1-10: factor out a new helper raid1_should_read_first()
md/raid1: factor out read_first_rdev() from read_balance()
md/raid1: factor out choose_slow_rdev() from read_balance()
md/raid1: factor out choose_bb_rdev() from read_balance()
md/raid1: factor out the code to manage sequential IO
md/raid1: factor out helpers to choose the best rdev from
read_balance()
md: don't clear MD_RECOVERY_FROZEN for new dm-raid until resume
md: export helpers to stop sync_thread
md: export helper md_is_rdwr()
md: add a new helper reshape_interrupted()
dm-raid: really frozen sync_thread during suspend
dm-raid: add a new helper prepare_suspend() in md_personality
dm-raid456, md/raid456: fix a deadlock for dm-raid456 while io
concurrent with reshape
md: rearrange recovery_flags
md: add a new enum type sync_action
md: add new helpers for sync_action
md: factor out helper to start reshape from action_store()
md: replace sysfs api sync_action with new helpers
md: remove parameter check_seq for stop_sync_thread()
md: don't fail action_store() if sync_thread is not registered
md: use new helpers in md_do_sync()
md: replace last_sync_action with new enum type
md: factor out helpers for different sync_action in md_do_sync()
md: pass in max_sectors for pers->sync_request()
md/raid5: fix spares errors about rcu usage
drivers/md/md-cluster.h | 2 +
drivers/md/md.h | 204 ++++++++--
drivers/md/raid1.h | 1 +
drivers/md/dm-raid.c | 66 +++-
drivers/md/md-bitmap.c | 9 +-
drivers/md/md-cluster.c | 51 ++-
drivers/md/md.c | 858 +++++++++++++++++++++++-----------------
drivers/md/raid0.c | 24 +-
drivers/md/raid1-10.c | 69 ++++
drivers/md/raid1.c | 595 ++++++++++++++++------------
drivers/md/raid10.c | 120 ++----
drivers/md/raid5.c | 215 +++++-----
12 files changed, 1336 insertions(+), 878 deletions(-)
--
2.39.2
2
65
Fix CVE-2025-68803
Chuck Lever (1):
NFSD: NFSv4 file creation neglects setting ACL
Stephen Smalley (1):
nfsd: set security label during create operations
fs/nfsd/vfs.c | 2 +-
fs/nfsd/vfs.h | 9 +++++++++
2 files changed, 10 insertions(+), 1 deletion(-)
--
2.52.0
2
3
19 Jan '26
From: Ido Schimmel <idosch(a)nvidia.com>
stable inclusion
from stable-v6.6.120
commit 4a3c569005f42ab5e5b2ad637132a33bf102cc08
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13354
CVE: CVE-2025-68801
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 8b0e69763ef948fb872a7767df4be665d18f5fd4 ]
We sometimes observe use-after-free when dereferencing a neighbour [1].
The problem seems to be that the driver stores a pointer to the
neighbour, but without holding a reference on it. A reference is only
taken when the neighbour is used by a nexthop.
Fix by simplifying the reference counting scheme. Always take a
reference when storing a neighbour pointer in a neighbour entry. Avoid
taking a referencing when the neighbour is used by a nexthop as the
neighbour entry associated with the nexthop already holds a reference.
Tested by running the test that uncovered the problem over 300 times.
Without this patch the problem was reproduced after a handful of
iterations.
[1]
BUG: KASAN: slab-use-after-free in mlxsw_sp_neigh_entry_update+0x2d4/0x310
Read of size 8 at addr ffff88817f8e3420 by task ip/3929
CPU: 3 UID: 0 PID: 3929 Comm: ip Not tainted 6.18.0-rc4-virtme-g36b21a067510 #3 PREEMPT(full)
Hardware name: Nvidia SN5600/VMOD0013, BIOS 5.13 05/31/2023
Call Trace:
<TASK>
dump_stack_lvl+0x6f/0xa0
print_address_description.constprop.0+0x6e/0x300
print_report+0xfc/0x1fb
kasan_report+0xe4/0x110
mlxsw_sp_neigh_entry_update+0x2d4/0x310
mlxsw_sp_router_rif_gone_sync+0x35f/0x510
mlxsw_sp_rif_destroy+0x1ea/0x730
mlxsw_sp_inetaddr_port_vlan_event+0xa1/0x1b0
__mlxsw_sp_inetaddr_lag_event+0xcc/0x130
__mlxsw_sp_inetaddr_event+0xf5/0x3c0
mlxsw_sp_router_netdevice_event+0x1015/0x1580
notifier_call_chain+0xcc/0x150
call_netdevice_notifiers_info+0x7e/0x100
__netdev_upper_dev_unlink+0x10b/0x210
netdev_upper_dev_unlink+0x79/0xa0
vrf_del_slave+0x18/0x50
do_set_master+0x146/0x7d0
do_setlink.isra.0+0x9a0/0x2880
rtnl_newlink+0x637/0xb20
rtnetlink_rcv_msg+0x6fe/0xb90
netlink_rcv_skb+0x123/0x380
netlink_unicast+0x4a3/0x770
netlink_sendmsg+0x75b/0xc90
__sock_sendmsg+0xbe/0x160
____sys_sendmsg+0x5b2/0x7d0
___sys_sendmsg+0xfd/0x180
__sys_sendmsg+0x124/0x1c0
do_syscall_64+0xbb/0xfd0
entry_SYSCALL_64_after_hwframe+0x4b/0x53
[...]
Allocated by task 109:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_kmalloc+0x7b/0x90
__kmalloc_noprof+0x2c1/0x790
neigh_alloc+0x6af/0x8f0
___neigh_create+0x63/0xe90
mlxsw_sp_nexthop_neigh_init+0x430/0x7e0
mlxsw_sp_nexthop_type_init+0x212/0x960
mlxsw_sp_nexthop6_group_info_init.constprop.0+0x81f/0x1280
mlxsw_sp_nexthop6_group_get+0x392/0x6a0
mlxsw_sp_fib6_entry_create+0x46a/0xfd0
mlxsw_sp_router_fib6_replace+0x1ed/0x5f0
mlxsw_sp_router_fib6_event_work+0x10a/0x2a0
process_one_work+0xd57/0x1390
worker_thread+0x4d6/0xd40
kthread+0x355/0x5b0
ret_from_fork+0x1d4/0x270
ret_from_fork_asm+0x11/0x20
Freed by task 154:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_save_free_info+0x3b/0x60
__kasan_slab_free+0x43/0x70
kmem_cache_free_bulk.part.0+0x1eb/0x5e0
kvfree_rcu_bulk+0x1f2/0x260
kfree_rcu_work+0x130/0x1b0
process_one_work+0xd57/0x1390
worker_thread+0x4d6/0xd40
kthread+0x355/0x5b0
ret_from_fork+0x1d4/0x270
ret_from_fork_asm+0x11/0x20
Last potentially related work creation:
kasan_save_stack+0x30/0x50
kasan_record_aux_stack+0x8c/0xa0
kvfree_call_rcu+0x93/0x5b0
mlxsw_sp_router_neigh_event_work+0x67d/0x860
process_one_work+0xd57/0x1390
worker_thread+0x4d6/0xd40
kthread+0x355/0x5b0
ret_from_fork+0x1d4/0x270
ret_from_fork_asm+0x11/0x20
Fixes: 6cf3c971dc84 ("mlxsw: spectrum_router: Add private neigh table")
Signed-off-by: Ido Schimmel <idosch(a)nvidia.com>
Reviewed-by: Petr Machata <petrm(a)nvidia.com>
Signed-off-by: Petr Machata <petrm(a)nvidia.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Link: https://patch.msgid.link/92d75e21d95d163a41b5cea67a15cd33f547cba6.176469565…
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Cai Xinchen <caixinchen1(a)huawei.com>
---
.../ethernet/mellanox/mlxsw/spectrum_router.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 0534b10e29c5..2bf332838d2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2264,6 +2264,7 @@ mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
if (!neigh_entry)
return NULL;
+ neigh_hold(n);
neigh_entry->key.n = n;
neigh_entry->rif = rif;
INIT_LIST_HEAD(&neigh_entry->nexthop_list);
@@ -2273,6 +2274,7 @@ mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
{
+ neigh_release(neigh_entry->key.n);
kfree(neigh_entry);
}
@@ -4203,6 +4205,8 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_neigh_entry_insert;
+ neigh_release(old_n);
+
read_lock_bh(&n->lock);
nud_state = n->nud_state;
dead = n->dead;
@@ -4211,14 +4215,10 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(nh, &neigh_entry->nexthop_list,
neigh_list_node) {
- neigh_release(old_n);
- neigh_clone(n);
__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
- neigh_release(n);
-
return 0;
err_neigh_entry_insert:
@@ -4311,6 +4311,11 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
}
}
+ /* Release the reference taken by neigh_lookup() / neigh_create() since
+ * neigh_entry already holds one.
+ */
+ neigh_release(n);
+
/* If that is the first nexthop connected to that neigh, add to
* nexthop_neighs_list
*/
@@ -4337,11 +4342,9 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
- struct neighbour *n;
if (!neigh_entry)
return;
- n = neigh_entry->key.n;
__mlxsw_sp_nexthop_neigh_update(nh, true);
list_del(&nh->neigh_list_node);
@@ -4355,8 +4358,6 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
-
- neigh_release(n);
}
static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
--
2.34.1
2
1
[PATCH OLK-6.6] mlxsw: spectrum_mr: Fix use-after-free when updating multicast route stats
by Cai Xinchen 19 Jan '26
by Cai Xinchen 19 Jan '26
19 Jan '26
From: Ido Schimmel <idosch(a)nvidia.com>
stable inclusion
from stable-v6.6.120
commit 5f2831fc593c2b2efbff7dd0dd7441cec76adcd5
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13353
CVE: CVE-2025-68800
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 8ac1dacec458f55f871f7153242ed6ab60373b90 ]
Cited commit added a dedicated mutex (instead of RTNL) to protect the
multicast route list, so that it will not change while the driver
periodically traverses it in order to update the kernel about multicast
route stats that were queried from the device.
One instance of list entry deletion (during route replace) was missed
and it can result in a use-after-free [1].
Fix by acquiring the mutex before deleting the entry from the list and
releasing it afterwards.
[1]
BUG: KASAN: slab-use-after-free in mlxsw_sp_mr_stats_update+0x4a5/0x540 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c:1006 [mlxsw_spectrum]
Read of size 8 at addr ffff8881523c2fa8 by task kworker/2:5/22043
CPU: 2 UID: 0 PID: 22043 Comm: kworker/2:5 Not tainted 6.18.0-rc1-custom-g1a3d6d7cd014 #1 PREEMPT(full)
Hardware name: Mellanox Technologies Ltd. MSN2010/SA002610, BIOS 5.6.5 08/24/2017
Workqueue: mlxsw_core mlxsw_sp_mr_stats_update [mlxsw_spectrum]
Call Trace:
<TASK>
dump_stack_lvl+0xba/0x110
print_report+0x174/0x4f5
kasan_report+0xdf/0x110
mlxsw_sp_mr_stats_update+0x4a5/0x540 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c:1006 [mlxsw_spectrum]
process_one_work+0x9cc/0x18e0
worker_thread+0x5df/0xe40
kthread+0x3b8/0x730
ret_from_fork+0x3e9/0x560
ret_from_fork_asm+0x1a/0x30
</TASK>
Allocated by task 29933:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_kmalloc+0x8f/0xa0
mlxsw_sp_mr_route_add+0xd8/0x4770 [mlxsw_spectrum]
mlxsw_sp_router_fibmr_event_work+0x371/0xad0 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c:7965 [mlxsw_spectrum]
process_one_work+0x9cc/0x18e0
worker_thread+0x5df/0xe40
kthread+0x3b8/0x730
ret_from_fork+0x3e9/0x560
ret_from_fork_asm+0x1a/0x30
Freed by task 29933:
kasan_save_stack+0x30/0x50
kasan_save_track+0x14/0x30
__kasan_save_free_info+0x3b/0x70
__kasan_slab_free+0x43/0x70
kfree+0x14e/0x700
mlxsw_sp_mr_route_add+0x2dea/0x4770 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c:444 [mlxsw_spectrum]
mlxsw_sp_router_fibmr_event_work+0x371/0xad0 drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c:7965 [mlxsw_spectrum]
process_one_work+0x9cc/0x18e0
worker_thread+0x5df/0xe40
kthread+0x3b8/0x730
ret_from_fork+0x3e9/0x560
ret_from_fork_asm+0x1a/0x30
Fixes: f38656d06725 ("mlxsw: spectrum_mr: Protect multicast route list with a lock")
Signed-off-by: Ido Schimmel <idosch(a)nvidia.com>
Reviewed-by: Petr Machata <petrm(a)nvidia.com>
Signed-off-by: Petr Machata <petrm(a)nvidia.com>
Reviewed-by: Simon Horman <horms(a)kernel.org>
Link: https://patch.msgid.link/f996feecfd59fde297964bfc85040b6d83ec6089.176469565…
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Cai Xinchen <caixinchen1(a)huawei.com>
---
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 5afe6b155ef0..81935f87bfcd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -440,7 +440,9 @@ int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table,
rhashtable_remove_fast(&mr_table->route_ht,
&mr_orig_route->ht_node,
mlxsw_sp_mr_route_ht_params);
+ mutex_lock(&mr_table->route_list_lock);
list_del(&mr_orig_route->node);
+ mutex_unlock(&mr_table->route_list_lock);
mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route);
}
--
2.34.1
2
1
*** BLURB HERE ***
Zhang Xiaoxu (1):
[Backport] RDMA/rxe: Fix NULL-ptr-deref in rxe_qp_do_cleanup() when
socket create failed
Zhu Yanjun (1):
[Backport] RDMA/rxe: Fix the error caused by qp->sk
drivers/infiniband/sw/rxe/rxe_qp.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
--
2.34.1
2
3
19 Jan '26
From: Dmitry Antipov <dmantipov(a)yandex.ru>
stable inclusion
from stable-v5.10.243
commit a97a9791e455bb0cd5e7a38b5abcb05523d4e21c
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8428
CVE: CVE-2025-39864
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 26e84445f02ce6b2fe5f3e0e28ff7add77f35e08 ]
Following bss_free() quirk introduced in commit 776b3580178f
("cfg80211: track hidden SSID networks properly"), adjust
cfg80211_update_known_bss() to free the last beacon frame
elements only if they're not shared via the corresponding
'hidden_beacon_bss' pointer.
Reported-by: syzbot+30754ca335e6fb7e3092(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=30754ca335e6fb7e3092
Fixes: 3ab8227d3e7d ("cfg80211: refactor cfg80211_bss_update")
Signed-off-by: Dmitry Antipov <dmantipov(a)yandex.ru>
Link: https://patch.msgid.link/20250813135236.799384-1-dmantipov@yandex.ru
Signed-off-by: Johannes Berg <johannes.berg(a)intel.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Zhang Changzhong <zhangchangzhong(a)huawei.com>
---
net/wireless/scan.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 76a27b6..a9bb676f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1697,7 +1697,8 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
*/
f = rcu_access_pointer(new->pub.beacon_ies);
- kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head);
+ if (!new->pub.hidden_beacon_bss)
+ kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head);
return false;
}
--
2.9.5
2
1
[PATCH OLK-6.6] ptr_ring: do not block hard interrupts in ptr_ring_resize_multiple()
by Zhang Changzhong 19 Jan '26
by Zhang Changzhong 19 Jan '26
19 Jan '26
From: Eric Dumazet <edumazet(a)google.com>
mainline inclusion
from mainline-v6.14-rc1
commit a126061c80d5efb4baef4bcf346094139cd81df6
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/135
CVE: CVE-2024-57994
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
-------------------------------------------------
Jakub added a lockdep_assert_no_hardirq() check in __page_pool_put_page()
to increase test coverage.
syzbot found a splat caused by hard irq blocking in
ptr_ring_resize_multiple() [1]
As current users of ptr_ring_resize_multiple() do not require
hard irqs being masked, replace it to only block BH.
Rename helpers to better reflect they are safe against BH only.
- ptr_ring_resize_multiple() to ptr_ring_resize_multiple_bh()
- skb_array_resize_multiple() to skb_array_resize_multiple_bh()
[1]
WARNING: CPU: 1 PID: 9150 at net/core/page_pool.c:709 __page_pool_put_page net/core/page_pool.c:709 [inline]
WARNING: CPU: 1 PID: 9150 at net/core/page_pool.c:709 page_pool_put_unrefed_netmem+0x157/0xa40 net/core/page_pool.c:780
Modules linked in:
CPU: 1 UID: 0 PID: 9150 Comm: syz.1.1052 Not tainted 6.11.0-rc3-syzkaller-00202-gf8669d7b5f5d #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/06/2024
RIP: 0010:__page_pool_put_page net/core/page_pool.c:709 [inline]
RIP: 0010:page_pool_put_unrefed_netmem+0x157/0xa40 net/core/page_pool.c:780
Code: 74 0e e8 7c aa fb f7 eb 43 e8 75 aa fb f7 eb 3c 65 8b 1d 38 a8 6a 76 31 ff 89 de e8 a3 ae fb f7 85 db 74 0b e8 5a aa fb f7 90 <0f> 0b 90 eb 1d 65 8b 1d 15 a8 6a 76 31 ff 89 de e8 84 ae fb f7 85
RSP: 0018:ffffc9000bda6b58 EFLAGS: 00010083
RAX: ffffffff8997e523 RBX: 0000000000000000 RCX: 0000000000040000
RDX: ffffc9000fbd0000 RSI: 0000000000001842 RDI: 0000000000001843
RBP: 0000000000000000 R08: ffffffff8997df2c R09: 1ffffd40003a000d
R10: dffffc0000000000 R11: fffff940003a000e R12: ffffea0001d00040
R13: ffff88802e8a4000 R14: dffffc0000000000 R15: 00000000ffffffff
FS: 00007fb7aaf716c0(0000) GS:ffff8880b9300000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fa15a0d4b72 CR3: 00000000561b0000 CR4: 00000000003506f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
<TASK>
tun_ptr_free drivers/net/tun.c:617 [inline]
__ptr_ring_swap_queue include/linux/ptr_ring.h:571 [inline]
ptr_ring_resize_multiple_noprof include/linux/ptr_ring.h:643 [inline]
tun_queue_resize drivers/net/tun.c:3694 [inline]
tun_device_event+0xaaf/0x1080 drivers/net/tun.c:3714
notifier_call_chain+0x19f/0x3e0 kernel/notifier.c:93
call_netdevice_notifiers_extack net/core/dev.c:2032 [inline]
call_netdevice_notifiers net/core/dev.c:2046 [inline]
dev_change_tx_queue_len+0x158/0x2a0 net/core/dev.c:9024
do_setlink+0xff6/0x41f0 net/core/rtnetlink.c:2923
rtnl_setlink+0x40d/0x5a0 net/core/rtnetlink.c:3201
rtnetlink_rcv_msg+0x73f/0xcf0 net/core/rtnetlink.c:6647
netlink_rcv_skb+0x1e3/0x430 net/netlink/af_netlink.c:2550
Fixes: ff4e538c8c3e ("page_pool: add a lockdep check for recycling in hardirq")
Reported-by: syzbot+f56a5c5eac2b28439810(a)syzkaller.appspotmail.com
Closes: https://lore.kernel.org/netdev/671e10df.050a0220.2b8c0f.01cf.GAE@google.com…
Signed-off-by: Eric Dumazet <edumazet(a)google.com>
Acked-by: Michael S. Tsirkin <mst(a)redhat.com>
Acked-by: Jason Wang <jasowang(a)redhat.com>
Link: https://patch.msgid.link/20241217135121.326370-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Conflicts:
include/linux/ptr_ring.h
include/linux/skb_array.h
[Did not backport 2c321f3f70bc.]
Signed-off-by: Liu Jian <liujian56(a)huawei.com>
Signed-off-by: Zhang Changzhong <zhangchangzhong(a)huawei.com>
---
drivers/net/tap.c | 6 +++---
drivers/net/tun.c | 6 +++---
include/linux/ptr_ring.h | 17 ++++++++---------
include/linux/skb_array.h | 13 +++++++------
net/sched/sch_generic.c | 4 ++--
5 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index e7212a64..2c4f9d1 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1330,9 +1330,9 @@ int tap_queue_resize(struct tap_dev *tap)
list_for_each_entry(q, &tap->queue_list, next)
rings[i++] = &q->ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- __skb_array_destroy_skb);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ __skb_array_destroy_skb);
kfree(rings);
return ret;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index c1fdf88..97dbec8d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -3682,9 +3682,9 @@ static int tun_queue_resize(struct tun_struct *tun)
list_for_each_entry(tfile, &tun->disabled, next)
rings[i++] = &tfile->tx_ring;
- ret = ptr_ring_resize_multiple(rings, n,
- dev->tx_queue_len, GFP_KERNEL,
- tun_ptr_free);
+ ret = ptr_ring_resize_multiple_bh(rings, n,
+ dev->tx_queue_len, GFP_KERNEL,
+ tun_ptr_free);
kfree(rings);
return ret;
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 808f9d3..65da2155 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -613,15 +613,14 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
/*
* Note: producer lock is nested within consumer lock, so if you
* resize you must make sure all uses nest correctly.
- * In particular if you consume ring in interrupt or BH context, you must
- * disable interrupts/BH when doing so.
+ * In particular if you consume ring in BH context, you must
+ * disable BH when doing so.
*/
-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
- unsigned int nrings,
- int size,
- gfp_t gfp, void (*destroy)(void *))
+static inline int ptr_ring_resize_multiple_bh(struct ptr_ring **rings,
+ unsigned int nrings,
+ int size, gfp_t gfp,
+ void (*destroy)(void *))
{
- unsigned long flags;
void ***queues;
int i;
@@ -636,12 +635,12 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
}
for (i = 0; i < nrings; ++i) {
- spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
+ spin_lock_bh(&(rings[i])->consumer_lock);
spin_lock(&(rings[i])->producer_lock);
queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
size, gfp, destroy);
spin_unlock(&(rings[i])->producer_lock);
- spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
+ spin_unlock_bh(&(rings[i])->consumer_lock);
}
for (i = 0; i < nrings; ++i)
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index e2d45b7..4ef109a 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -198,14 +198,15 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
}
-static inline int skb_array_resize_multiple(struct skb_array **rings,
- int nrings, unsigned int size,
- gfp_t gfp)
+static inline int skb_array_resize_multiple_bh(struct skb_array **rings,
+ int nrings,
+ unsigned int size,
+ gfp_t gfp)
{
BUILD_BUG_ON(offsetof(struct skb_array, ring));
- return ptr_ring_resize_multiple((struct ptr_ring **)rings,
- nrings, size, gfp,
- __skb_array_destroy_skb);
+ return ptr_ring_resize_multiple_bh((struct ptr_ring **)rings,
+ nrings, size, gfp,
+ __skb_array_destroy_skb);
}
static inline void skb_array_cleanup(struct skb_array *a)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9664a63..47cf61e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -907,8 +907,8 @@ static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
bands[prio] = q;
}
- return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
- GFP_KERNEL);
+ return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len,
+ GFP_KERNEL);
}
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
--
2.9.5
2
1
19 Jan '26
*** BLURB HERE ***
Zhang Xiaoxu (1):
[Backport] RDMA/rxe: Fix NULL-ptr-deref in rxe_qp_do_cleanup() when
socket create failed
Zhu Yanjun (1):
[Backport] RDMA/rxe: Fix the error caused by qp->sk
drivers/infiniband/sw/rxe/rxe_qp.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
--
2.34.1
2
3
[PATCH OLK-6.6] NFS: Automounted filesystems should inherit ro,noexec,nodev,sync flags
by Wang Zhaolong 19 Jan '26
by Wang Zhaolong 19 Jan '26
19 Jan '26
From: Trond Myklebust <trond.myklebust(a)hammerspace.com>
stable inclusion
from stable-v6.6.120
commit dce10c59211e5cd763a62ea01e79b82a629811e3
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13299
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 8675c69816e4276b979ff475ee5fac4688f80125 ]
When a filesystem is being automounted, it needs to preserve the
user-set superblock mount options, such as the "ro" flag.
Reported-by: Li Lingfeng <lilingfeng3(a)huawei.com>
Link: https://lore.kernel.org/all/20240604112636.236517-3-lilingfeng@huaweicloud.…
Fixes: f2aedb713c28 ("NFS: Add fs_context support.")
Signed-off-by: Trond Myklebust <trond.myklebust(a)hammerspace.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/nfs/namespace.c | 6 ++++++
fs/nfs/super.c | 4 ----
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index e7494cdd957e..40d7163bca87 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -147,10 +147,11 @@ struct vfsmount *nfs_d_automount(struct path *path)
struct nfs_fs_context *ctx;
struct fs_context *fc;
struct vfsmount *mnt = ERR_PTR(-ENOMEM);
struct nfs_server *server = NFS_SB(path->dentry->d_sb);
struct nfs_client *client = server->nfs_client;
+ unsigned long s_flags = path->dentry->d_sb->s_flags;
int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
int ret;
if (IS_ROOT(path->dentry))
return ERR_PTR(-ESTALE);
@@ -172,10 +173,15 @@ struct vfsmount *nfs_d_automount(struct path *path)
if (fc->net_ns != client->cl_net) {
put_net(fc->net_ns);
fc->net_ns = get_net(client->cl_net);
}
+ /* Inherit the flags covered by NFS_SB_MASK */
+ fc->sb_flags_mask |= NFS_SB_MASK;
+ fc->sb_flags &= ~NFS_SB_MASK;
+ fc->sb_flags |= s_flags & NFS_SB_MASK;
+
/* for submounts we want the same server; referrals will reassign */
memcpy(&ctx->nfs_server._address, &client->cl_addr, client->cl_addrlen);
ctx->nfs_server.addrlen = client->cl_addrlen;
ctx->nfs_server.port = server->port;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 1c2969cb907e..9fa3d17981bd 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1318,14 +1318,10 @@ int nfs_get_tree_common(struct fs_context *fc)
/* -o noac implies -o sync */
if (server->flags & NFS_MOUNT_NOAC)
fc->sb_flags |= SB_SYNCHRONOUS;
- if (ctx->clone_data.sb)
- if (ctx->clone_data.sb->s_flags & SB_SYNCHRONOUS)
- fc->sb_flags |= SB_SYNCHRONOUS;
-
/* Get a superblock - note that we may end up sharing one that already exists */
fc->s_fs_info = server;
s = sget_fc(fc, compare_super, nfs_set_super);
fc->s_fs_info = NULL;
if (IS_ERR(s)) {
--
2.34.3
2
1
From: 岳智超 <yuezhichao1(a)h-partners.com>
driver inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8291
CVE: NA
--------------------------------
Add thread irq for io queue
Add stream detect
Signed-off-by: 岳智超 <yuezhichao1(a)h-partners.com>
---
drivers/scsi/hisi_raid/hiraid.h | 34 ++
drivers/scsi/hisi_raid/hiraid_main.c | 586 ++++++++++++++++++++++++++-
2 files changed, 603 insertions(+), 17 deletions(-)
diff --git a/drivers/scsi/hisi_raid/hiraid.h b/drivers/scsi/hisi_raid/hiraid.h
index 1ebc3dd..d107951 100644
--- a/drivers/scsi/hisi_raid/hiraid.h
+++ b/drivers/scsi/hisi_raid/hiraid.h
@@ -683,6 +683,7 @@ struct hiraid_queue {
atomic_t inflight;
void *sense_buffer_virt;
dma_addr_t sense_buffer_phy;
+ s32 pci_irq;
struct dma_pool *prp_small_pool;
};
@@ -756,5 +757,38 @@ struct hiraid_sdev_hostdata {
u16 pend_count;
};
+enum stream_type {
+ TYPE_TOTAL,
+ TYPE_WRITE,
+ TYPE_READ,
+ TYPE_CLEAN,
+ TYPE_BOTTOM
+};
+
+struct HIRAID_STREAM_S {
+ /* recog-window */
+ u64 stream_lba;
+ u32 stream_len;
+ u16 vd_id;
+ u16 type;
+ /* aging ctrl */
+ int aging_credit;
+ int aging_grade;
+ u16 stream_id;
+ u16 using;
+};
+
+struct IO_LIST_S {
+ struct list_head list;
+ struct hiraid_scsi_io_cmd io_cmd;
+ struct hiraid_queue *submit_queue;
+ unsigned int sector_size;
+};
+
+struct spinlock_list_head_s {
+ struct list_head list;
+ spinlock_t lock;
+};
+
#endif
diff --git a/drivers/scsi/hisi_raid/hiraid_main.c b/drivers/scsi/hisi_raid/hiraid_main.c
index f84182f..281fe79 100644
--- a/drivers/scsi/hisi_raid/hiraid_main.c
+++ b/drivers/scsi/hisi_raid/hiraid_main.c
@@ -35,6 +35,10 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_dbg.h>
#include <scsi/sg.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sched/prio.h>
#include "hiraid.h"
@@ -107,6 +111,13 @@ static u32 log_debug_switch;
module_param(log_debug_switch, uint, 0644);
MODULE_PARM_DESC(log_debug_switch, "set log state, default zero for switch off");
+static bool threaded_irq = false;
+module_param(threaded_irq, bool, 0444);
+MODULE_PARM_DESC(threaded_irq, "use threaded irq for io queue, default off");
+
+static u32 poll_delay_min = 9;
+static u32 poll_delay_max = 19;
+
static int extra_pool_num_set(const char *val, const struct kernel_param *kp)
{
u8 n = 0;
@@ -153,7 +164,7 @@ static struct workqueue_struct *work_queue;
__func__, ##__VA_ARGS__); \
} while (0)
-#define HIRAID_DRV_VERSION "1.1.0.0"
+#define HIRAID_DRV_VERSION "1.1.0.1"
#define ADMIN_TIMEOUT (admin_tmout * HZ)
#define USRCMD_TIMEOUT (180 * HZ)
@@ -170,6 +181,15 @@ static struct workqueue_struct *work_queue;
#define MAX_CAN_QUEUE (4096 - 1)
#define MIN_CAN_QUEUE (1024 - 1)
+#define MAX_DECREASE_GRADE (-8)
+#define MAX_INCREASE_GRADE 8
+#define INC_GRADE 1
+#define MIN_CREDIT 0
+#define MAX_CREDIT 64
+#define CREDIT_THRES 32
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
enum SENSE_STATE_CODE {
SENSE_STATE_OK = 0,
SENSE_STATE_NEED_CHECK,
@@ -765,6 +785,404 @@ static int hiraid_build_sgl(struct hiraid_dev *hdev,
return 0;
}
+#define MAX_PD_NUM (40 + 1)
+#define MAX_STREAM_NUM 8
+#define PER_MB (1024 * 1024)
+#define MAX_IO_NUM (200 * PER_MB)
+#define STREAM_LEN (4 * PER_MB)
+#define MAX_IO_NUM_ONCE 128
+#define IO_SUBMIT_TIME_OUT 100
+#define MAX_AGING_NUM 130
+
+#define MIN_IO_SEND_TIME 10
+#define MAX_IO_SEND_TIME 50
+
+#define MIN_WAIT_IO_SEND_TIME 10
+#define MAX_WAIT_IO_SEND_TIME 20
+
+enum io_operation_type {
+ TYPE_DELETE_SINGLE_IO = 1,
+ TYPE_DELETE_SINGLE_IO_LIST,
+ TYPE_DELETE_ALL_IO_LIST
+};
+
+struct HIRAID_STREAM_S stream_array[MAX_PD_NUM][MAX_STREAM_NUM] = {0};
+struct spinlock_list_head_s io_heads_per_stream[MAX_PD_NUM * MAX_STREAM_NUM];
+spinlock_t stream_array_lock;
+
+u64 g_io_transport_num[MAX_PD_NUM][MAX_STREAM_NUM] = {0};
+u16 g_io_stream_num[MAX_PD_NUM][TYPE_BOTTOM] = {0};
+u16 g_io_count = 1;
+
+void hiraid_inc_io_transport_num(u16 disk_id, u16 streamd_id, u16 nlb)
+{
+ g_io_transport_num[disk_id][streamd_id] += nlb;
+}
+
+void hiraid_refresh_io_transport_num(u16 disk_id, u16 streamd_id)
+{
+ g_io_transport_num[disk_id][streamd_id] = 0;
+}
+
+void hiraid_inc_stream_num(u16 disk_id)
+{
+ spin_lock(&stream_array_lock);
+ g_io_stream_num[disk_id][TYPE_TOTAL]++;
+ spin_unlock(&stream_array_lock);
+}
+
+void hiraid_dec_stream_num(u16 disk_id)
+{
+ spin_lock(&stream_array_lock);
+ if (g_io_stream_num[disk_id][TYPE_TOTAL] > 0)
+ g_io_stream_num[disk_id][TYPE_TOTAL]--;
+ spin_unlock(&stream_array_lock);
+}
+
+static bool hiraid_io_recog_check_stream_exceed(u16 disk_id)
+{
+ bool exceed_flag;
+
+ spin_lock(&stream_array_lock);
+ exceed_flag = (g_io_stream_num[disk_id][TYPE_TOTAL] >= MAX_STREAM_NUM);
+ spin_unlock(&stream_array_lock);
+ return exceed_flag;
+}
+
+static u16 hiraid_get_stream_num(u16 disk_id)
+{
+ return g_io_stream_num[disk_id][TYPE_TOTAL];
+}
+
+static inline struct HIRAID_STREAM_S *hiraid_get_stream(u16 disk_id,
+ u16 stream_id)
+{
+ return &stream_array[disk_id][stream_id];
+}
+
+static inline struct spinlock_list_head_s *hiraid_get_io_head(u16 disk_id)
+{
+ return &(io_heads_per_stream[disk_id]);
+}
+
+static bool hiraid_recognition_acknowledge(const struct HIRAID_STREAM_S *stream)
+{
+ return (stream->aging_credit >= CREDIT_THRES) ? true : false;
+}
+
+void hiraid_io_recognition_init(void)
+{
+ u16 i;
+
+ spin_lock_init(&stream_array_lock);
+ for (i = 0; i < (MAX_PD_NUM * MAX_STREAM_NUM); i++) {
+ INIT_LIST_HEAD(&hiraid_get_io_head(i)->list);
+ spin_lock_init(&hiraid_get_io_head(i)->lock);
+ }
+}
+
+static void hiraid_io_recognition_iterator(struct HIRAID_STREAM_S *stream,
+ int direction)
+{
+ stream->aging_grade = stream->aging_grade + direction * INC_GRADE;
+ stream->aging_grade = MAX(stream->aging_grade, MAX_DECREASE_GRADE);
+ stream->aging_grade = MIN(stream->aging_grade, MAX_INCREASE_GRADE);
+ stream->aging_credit = stream->aging_credit + stream->aging_grade;
+ stream->aging_credit = MAX(stream->aging_credit, MIN_CREDIT);
+ stream->aging_credit = MIN(stream->aging_credit, MAX_CREDIT);
+}
+
+struct HIRAID_STREAM_S *hiraid_io_pick_stream(
+ struct hiraid_scsi_rw_cmd *req, u16 type)
+{
+ struct HIRAID_STREAM_S *first_hit_stream = NULL;
+ struct HIRAID_STREAM_S *temp_stream = NULL;
+ u16 pick_flag = 0;
+ u8 i;
+
+ for (i = 0; i < MAX_STREAM_NUM; i++) {
+ temp_stream = &stream_array[req->hdid][i];
+ temp_stream->stream_id = i;
+ if (req->slba < temp_stream->stream_lba ||
+ req->slba >= temp_stream->stream_lba +
+ temp_stream->stream_len ||
+ temp_stream->type != type) {
+ continue;
+ }
+ if (!pick_flag) {
+ temp_stream->stream_lba = req->slba;
+ first_hit_stream = temp_stream;
+ pick_flag = 1;
+ continue;
+ }
+ hiraid_dec_stream_num(req->hdid);
+ memset(temp_stream, 0,
+ sizeof(struct HIRAID_STREAM_S)); // 去重影
+ }
+ return first_hit_stream;
+}
+
+static struct HIRAID_STREAM_S *hiraid_init_flow_stream(struct hiraid_scsi_rw_cmd *req,
+ u16 type)
+{
+ int i;
+ struct HIRAID_STREAM_S *stream = NULL;
+
+ for (i = 0; i < MAX_STREAM_NUM; i++) {
+ stream = hiraid_get_stream(req->hdid, i);
+ if (!stream->using) {
+ stream->using = 1;
+ stream->stream_id = i;
+ break;
+ }
+ }
+ stream->stream_lba = req->slba;
+ stream->vd_id = req->hdid;
+ stream->type = type;
+ stream->aging_credit = 0;
+ stream->aging_grade = 0;
+ stream->stream_len = STREAM_LEN;
+ return stream;
+}
+
+static struct HIRAID_STREAM_S *hiraid_stream_detect(struct hiraid_dev *hdev,
+ struct hiraid_scsi_rw_cmd *io_cmd)
+{
+ u16 type = io_cmd->opcode == HIRAID_CMD_WRITE ? TYPE_WRITE : TYPE_READ;
+ struct HIRAID_STREAM_S *stream = hiraid_io_pick_stream(io_cmd, type);
+
+ if (stream != NULL) { /* 可以命中一个stream */
+ return stream;
+ }
+
+ if (hiraid_io_recog_check_stream_exceed(io_cmd->hdid))
+ return NULL;
+ stream = hiraid_init_flow_stream(io_cmd, type);
+ hiraid_inc_stream_num(io_cmd->hdid);
+ return stream;
+}
+
+u64 g_io_last_pull_time[MAX_PD_NUM] = {0};
+
+static u16 hiraid_get_submit_io_stream(u16 did, struct hiraid_dev *hdev)
+{
+ u64 temp_num, i;
+ static u16 stream_num[MAX_PD_NUM] = {0};
+
+ if (g_io_last_pull_time[did] == 0)
+ g_io_last_pull_time[did] = jiffies_to_msecs(jiffies);
+
+ for (i = 0; i < MAX_STREAM_NUM; i++) {
+ temp_num = g_io_transport_num[did][i];
+ if (temp_num != 0) {
+ if ((temp_num < MAX_IO_NUM) &&
+ ((jiffies_to_msecs(jiffies) - g_io_last_pull_time[did])
+ < IO_SUBMIT_TIME_OUT)) {
+ stream_num[did] = i;
+ return i;
+ }
+ g_io_last_pull_time[did] = jiffies_to_msecs(jiffies);
+ hiraid_refresh_io_transport_num(did, i);
+ stream_num[did] = ((i+1) % MAX_STREAM_NUM);
+ return ((i+1) % MAX_STREAM_NUM);
+ }
+ }
+ g_io_last_pull_time[did] = jiffies_to_msecs(jiffies);
+ return ((stream_num[did]++) % MAX_STREAM_NUM);
+}
+
+static void hiraid_submit_io_stream(u16 hdid, struct hiraid_dev *hdev)
+{
+ struct spinlock_list_head_s *io_slist = NULL;
+ struct list_head *node = NULL;
+ struct list_head *next_node = NULL;
+ struct hiraid_scsi_io_cmd io_cmd = {0};
+ struct hiraid_queue *submit_queue = NULL;
+ unsigned int sector_size = 0;
+ u16 submit_stream_id = hiraid_get_submit_io_stream(hdid, hdev);
+
+ struct IO_LIST_S *temp_io_stream = NULL;
+ u16 count = 0;
+
+ io_slist = hiraid_get_io_head(hdid * MAX_STREAM_NUM + submit_stream_id);
+ spin_lock(&io_slist->lock);
+ list_for_each_safe(node, next_node, &io_slist->list) {
+ temp_io_stream = list_entry(node, struct IO_LIST_S, list);
+ list_del_init(node);
+ io_cmd = temp_io_stream->io_cmd;
+ submit_queue = temp_io_stream->submit_queue;
+ sector_size = temp_io_stream->sector_size;
+ kfree(temp_io_stream);
+ temp_io_stream = NULL;
+ spin_unlock(&io_slist->lock);
+ hiraid_submit_cmd(submit_queue, &io_cmd);
+ hiraid_inc_io_transport_num(hdid,
+ submit_stream_id, io_cmd.rw.nlb * sector_size);
+ // 单次下发不超过MAX_IO_NUM_ONCE,避免仅发送单盘
+ if (++count >= MAX_IO_NUM_ONCE) {
+ spin_lock(&io_slist->lock);
+ break;
+ }
+ spin_lock(&io_slist->lock);
+ }
+ spin_unlock(&io_slist->lock);
+}
+
+static u8 hiraid_detect_if_aging(void)
+{
+ if (++g_io_count == MAX_AGING_NUM) {
+ g_io_count = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static void hiraid_aging(struct hiraid_dev *hdev)
+{
+ struct HIRAID_STREAM_S *temp_stream = NULL;
+ int i = 0;
+ int j = 0;
+
+ for (i = 1; i < MAX_PD_NUM; i++) {
+ for (j = 0; j < MAX_STREAM_NUM; j++) {
+ temp_stream = hiraid_get_stream(i, j);
+ if (temp_stream->using) {
+ hiraid_io_recognition_iterator(temp_stream, -1);
+ if (temp_stream->aging_credit <= 0) {
+ hiraid_dec_stream_num(i);
+ memset(temp_stream,
+ 0, sizeof(struct HIRAID_STREAM_S)); // 老化
+ }
+ }
+ }
+ }
+}
+
+static u8 hiraid_io_list_operation(u32 hdid, u16 cid, u16 hwq, u8 operation)
+{
+ int i, j;
+
+ struct spinlock_list_head_s *io_slist = NULL;
+ struct list_head *node = NULL;
+ struct list_head *next_node = NULL;
+ struct hiraid_scsi_io_cmd *io_cmd = NULL;
+ struct hiraid_queue *hiraidq = NULL;
+ struct IO_LIST_S *temp_io_stream = NULL;
+
+ u8 max_hd_num = operation == TYPE_DELETE_ALL_IO_LIST ?
+ MAX_PD_NUM : hdid + 1;
+ for (i = hdid; i < max_hd_num; i++) {
+ for (j = 0; j < MAX_STREAM_NUM; j++) {
+ io_slist = hiraid_get_io_head(i * MAX_STREAM_NUM + j);
+ spin_lock(&io_slist->lock);
+ list_for_each_safe(node, next_node, &io_slist->list) {
+ temp_io_stream = list_entry(node,
+ struct IO_LIST_S, list);
+ io_cmd = &(temp_io_stream->io_cmd);
+ hiraidq = temp_io_stream->submit_queue;
+ if (operation >= TYPE_DELETE_SINGLE_IO_LIST) {
+ list_del_init(node);
+ kfree(temp_io_stream);
+ temp_io_stream = NULL;
+ } else {
+ if ((io_cmd->rw.cmd_id == cid) &&
+ (hiraidq->qid == hwq)) {
+ list_del_init(node);
+ spin_unlock(&io_slist->lock);
+ kfree(temp_io_stream);
+ return 1;
+ }
+ }
+ }
+ spin_unlock(&io_slist->lock);
+ }
+ }
+ return 0;
+}
+
+static u8 hiraid_check_io_list(u32 hdid, u16 cid, u16 hwq)
+{
+ return hiraid_io_list_operation(hdid, cid, hwq, TYPE_DELETE_SINGLE_IO);
+}
+
+static u8 hiraid_delete_single_pd_io_list(u32 hdid)
+{
+ return hiraid_io_list_operation(hdid, 0, 0, TYPE_DELETE_SINGLE_IO_LIST);
+}
+
+static u8 hiraid_delete_all_io_list(void)
+{
+ return hiraid_io_list_operation(0, 0, 0, TYPE_DELETE_ALL_IO_LIST);
+}
+
+static void hiraid_wait_for_io_submit(struct hiraid_dev *hdev)
+{
+ struct spinlock_list_head_s *io_slist = NULL;
+ int i = 0;
+ int io_flush_finished;
+
+ do {
+ io_flush_finished = 1;
+ for (i = 0; i < (MAX_PD_NUM * MAX_STREAM_NUM); i++) {
+ io_slist = hiraid_get_io_head(i);
+ if (!list_empty(&io_slist->list)) {
+ io_flush_finished = 0;
+ break;
+ }
+ }
+ usleep_range(MIN_WAIT_IO_SEND_TIME, MAX_WAIT_IO_SEND_TIME);
+ } while (!io_flush_finished);
+}
+
+static u8 hiraid_add_io_to_list(struct hiraid_queue *submit_queue,
+ struct HIRAID_STREAM_S *tmp_stream, struct hiraid_scsi_io_cmd io_cmd,
+ unsigned int sector_size)
+{
+ struct spinlock_list_head_s *io_slist = NULL;
+ struct IO_LIST_S *new_io_node = NULL;
+
+ new_io_node = kmalloc(sizeof(struct IO_LIST_S), GFP_KERNEL);
+ if (!new_io_node)
+ return 0;
+ new_io_node->io_cmd = io_cmd;
+ new_io_node->submit_queue = submit_queue;
+ new_io_node->sector_size = sector_size;
+ io_slist = hiraid_get_io_head(io_cmd.rw.hdid *
+ MAX_STREAM_NUM + tmp_stream->stream_id);
+ spin_lock(&io_slist->lock);
+ INIT_LIST_HEAD(&(new_io_node->list));
+ list_add_tail(&(new_io_node->list), &io_slist->list);
+ spin_unlock(&io_slist->lock);
+ return 1;
+}
+
+static void hiraid_submit_io_threading(struct hiraid_dev *hdev)
+{
+ int i = 1;
+
+ while (!kthread_should_stop()) {
+ for (i = 1; i < MAX_PD_NUM; i++)
+ hiraid_submit_io_stream(i, hdev);
+ usleep_range(MIN_IO_SEND_TIME, MAX_IO_SEND_TIME);
+ }
+}
+
+static void hiraid_destroy_io_stream_resource(struct hiraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < (MAX_PD_NUM * MAX_STREAM_NUM); i++)
+ list_del_init(&hiraid_get_io_head(i)->list);
+}
+
+struct task_struct *g_hiraid_submit_task;
+static void hiraid_init_io_stream(struct hiraid_dev *hdev)
+{
+ hiraid_io_recognition_init();
+ g_hiraid_submit_task = kthread_run((void *)hiraid_submit_io_threading,
+ hdev, "hiraid_submit_thread");
+}
+
#define HIRAID_RW_FUA BIT(14)
static int hiraid_setup_rw_cmd(struct hiraid_dev *hdev,
@@ -866,6 +1284,30 @@ static int hiraid_setup_nonrw_cmd(struct hiraid_dev *hdev,
return 0;
}
+static bool hiraid_disk_is_hdd(u8 attr)
+{
+ switch (HIRAID_DEV_DISK_TYPE(attr)) {
+ case HIRAID_SAS_HDD_VD:
+ case HIRAID_SATA_HDD_VD:
+ case HIRAID_SAS_HDD_PD:
+ case HIRAID_SATA_HDD_PD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool hiraid_disk_is_hdd_rawdrive(u8 attr)
+{
+ switch (HIRAID_DEV_DISK_TYPE(attr)) {
+ case HIRAID_SAS_HDD_PD:
+ case HIRAID_SATA_HDD_PD:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int hiraid_setup_io_cmd(struct hiraid_dev *hdev,
struct hiraid_scsi_io_cmd *io_cmd,
struct scsi_cmnd *scmd)
@@ -1023,6 +1465,7 @@ static int hiraid_queue_command(struct Scsi_Host *shost,
struct hiraid_sdev_hostdata *hostdata;
struct hiraid_scsi_io_cmd io_cmd;
struct hiraid_queue *ioq;
+ struct HIRAID_STREAM_S *tmp_stm = NULL;
u16 hwq, cid;
int ret;
@@ -1085,6 +1528,23 @@ static int hiraid_queue_command(struct Scsi_Host *shost,
}
WRITE_ONCE(mapbuf->state, CMD_FLIGHT);
+
+ if (hiraid_is_rw_scmd(scmd) &&
+ hiraid_disk_is_hdd_rawdrive(hostdata->attr)) {
+ if (hiraid_detect_if_aging())
+ hiraid_aging(hdev);
+ tmp_stm = hiraid_stream_detect(hdev, &(io_cmd.rw));
+ if (tmp_stm != NULL) {
+ hiraid_io_recognition_iterator(tmp_stm, 1);
+ if (hiraid_recognition_acknowledge(tmp_stm) &&
+ (hiraid_get_stream_num(io_cmd.rw.hdid) > 1)) {
+ if (hiraid_add_io_to_list(ioq,
+ tmp_stm, io_cmd, sdev->sector_size)) {
+ return 0;
+ }
+ }
+ }
+ }
hiraid_submit_cmd(ioq, &io_cmd);
return 0;
@@ -1135,19 +1595,6 @@ static int hiraid_disk_qd(u8 attr)
}
}
-static bool hiraid_disk_is_hdd(u8 attr)
-{
- switch (HIRAID_DEV_DISK_TYPE(attr)) {
- case HIRAID_SAS_HDD_VD:
- case HIRAID_SATA_HDD_VD:
- case HIRAID_SAS_HDD_PD:
- case HIRAID_SATA_HDD_PD:
- return true;
- default:
- return false;
- }
-}
-
static int hiraid_slave_alloc(struct scsi_device *sdev)
{
struct hiraid_sdev_hostdata *hostdata;
@@ -1305,6 +1752,7 @@ static int hiraid_alloc_queue(struct hiraid_dev *hdev, u16 qid, u16 depth)
hiraidq->q_depth = depth;
hiraidq->qid = qid;
hiraidq->cq_vector = -1;
+ hiraidq->pci_irq = -1;
hdev->queue_count++;
return 0;
@@ -1593,12 +2041,38 @@ static inline bool hiraid_process_cq(struct hiraid_queue *hiraidq,
u16 *start, u16 *end, int tag)
{
bool found = false;
+ *start = hiraidq->cq_head;
+ while (!found && hiraid_cqe_pending(hiraidq)) {
+ if (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].cmd_id) == tag)
+ found = true;
+ hiraid_update_cq_head(hiraidq);
+ }
+ *end = hiraidq->cq_head;
+
+ if (*start != *end)
+ writel(hiraidq->cq_head,
+ hiraidq->q_db + hiraidq->hdev->db_stride);
+
+ return found;
+}
+static inline bool hiraid_process_cq_for_thread(struct hiraid_queue *hiraidq,
+ u16 *start, u16 *end,
+ u8 *wakeup_thread_flag, int tag)
+{
+ bool found = false;
+ u16 max_io_num = hiraidq->q_depth / 4;
+ u16 io_count = 0;
*start = hiraidq->cq_head;
while (!found && hiraid_cqe_pending(hiraidq)) {
if (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].cmd_id) == tag)
found = true;
hiraid_update_cq_head(hiraidq);
+
+ if (++io_count >= max_io_num) {
+ *wakeup_thread_flag = 1;
+ break;
+ }
}
*end = hiraidq->cq_head;
@@ -1646,6 +2120,55 @@ static irqreturn_t hiraid_handle_irq(int irq, void *data)
return ret;
}
+static irqreturn_t hiraid_io_poll(int irq, void *data)
+{
+ struct hiraid_queue *hiraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+
+ do {
+ spin_lock(&hiraidq->cq_lock);
+ hiraid_process_cq(hiraidq, &start, &end, -1);
+ hiraidq->last_cq_head = hiraidq->cq_head;
+ spin_unlock(&hiraidq->cq_lock);
+
+ if (start != end) {
+ hiraid_complete_cqes(hiraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ usleep_range(poll_delay_min, poll_delay_max);
+ } while (start != end);
+ enable_irq(hiraidq->pci_irq);
+ return ret;
+}
+
+static irqreturn_t hiraid_io_irq(int irq, void *data)
+{
+ struct hiraid_queue *hiraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+ u8 wakeup_thread_flag = 0;
+
+ spin_lock(&hiraidq->cq_lock);
+ if (hiraidq->cq_head != hiraidq->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ hiraid_process_cq_for_thread(hiraidq, &start,
+ &end, &wakeup_thread_flag, -1);
+ hiraidq->last_cq_head = hiraidq->cq_head;
+ spin_unlock(&hiraidq->cq_lock);
+
+ if (start != end) {
+ hiraid_complete_cqes(hiraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ if (wakeup_thread_flag) {
+ disable_irq_nosync(hiraidq->pci_irq);
+ ret = IRQ_WAKE_THREAD;
+ }
+ return ret;
+}
+
static int hiraid_setup_admin_queue(struct hiraid_dev *hdev)
{
struct hiraid_queue *adminq = &hdev->queues[0];
@@ -1681,9 +2204,11 @@ static int hiraid_setup_admin_queue(struct hiraid_dev *hdev)
NULL, adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
if (ret) {
adminq->cq_vector = -1;
+ adminq->pci_irq = -1;
return ret;
}
+ adminq->pci_irq = pci_irq_vector(hdev->pdev, adminq->cq_vector);
hiraid_init_queue(adminq, 0);
dev_info(hdev->dev, "setup admin queue success, queuecount[%d] online[%d] pagesize[%d]\n",
@@ -1958,14 +2483,23 @@ static int hiraid_create_queue(struct hiraid_queue *hiraidq, u16 qid)
goto delete_cq;
hiraidq->cq_vector = cq_vector;
- ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq, NULL,
- hiraidq, "hiraid%d_q%d", hdev->instance, qid);
+ if (threaded_irq)
+ ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_io_irq,
+ hiraid_io_poll, hiraidq, "hiraid%d_q%d",
+ hdev->instance, qid);
+ else
+ ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq,
+ NULL, hiraidq, "hiraid%d_q%d",
+ hdev->instance, qid);
+
if (ret) {
hiraidq->cq_vector = -1;
+ hiraidq->pci_irq = -1;
dev_err(hdev->dev, "request queue[%d] irq failed\n", qid);
goto delete_sq;
}
+ hiraidq->pci_irq = pci_irq_vector(hdev->pdev, hiraidq->cq_vector);
hiraid_init_queue(hiraidq, qid);
return 0;
@@ -2122,10 +2656,11 @@ static int hiraid_setup_io_queues(struct hiraid_dev *hdev)
adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
if (ret) {
dev_err(hdev->dev, "request admin irq failed\n");
+ adminq->pci_irq = -1;
adminq->cq_vector = -1;
return ret;
}
-
+ adminq->pci_irq = pci_irq_vector(hdev->pdev, adminq->cq_vector);
hdev->online_queues++;
for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
@@ -3304,6 +3839,12 @@ static int hiraid_abort(struct scsi_cmnd *scmd)
cid = mapbuf->cid;
hwq = mapbuf->hiraidq->qid;
+ if (hiraid_check_io_list(hostdata->hdid, cid, hwq)) {
+ dev_warn(hdev->dev, "find cid[%d] qid[%d] in host, abort succ\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, send abort\n", cid, hwq);
ret = hiraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
if (ret != -ETIME) {
@@ -3339,6 +3880,7 @@ static int hiraid_scsi_reset(struct scsi_cmnd *scmd, enum hiraid_rst_type rst)
if ((ret == 0) ||
(ret == FW_EH_DEV_NONE && rst == HIRAID_RESET_TARGET)) {
if (rst == HIRAID_RESET_TARGET) {
+ hiraid_delete_single_pd_io_list(hostdata->hdid);
ret = wait_tgt_reset_io_done(scmd);
if (ret) {
dev_warn(hdev->dev, "sdev[%d:%d] target has %d peding cmd, target reset failed\n",
@@ -3378,6 +3920,7 @@ static int hiraid_host_reset(struct scsi_cmnd *scmd)
dev_warn(hdev->dev, "sdev[%d:%d] send host reset\n",
scmd->device->channel, scmd->device->id);
+ hiraid_delete_all_io_list();
if (hiraid_reset_work_sync(hdev) == -EBUSY)
flush_work(&hdev->reset_work);
@@ -3411,6 +3954,7 @@ static pci_ers_result_t hiraid_pci_error_detected(struct pci_dev *pdev,
scsi_block_requests(hdev->shost);
hiraid_dev_state_trans(hdev, DEV_RESETTING);
+ hiraid_delete_all_io_list();
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
@@ -4044,6 +4588,7 @@ static int hiraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto unregist_bsg;
scsi_scan_host(hdev->shost);
+ hiraid_init_io_stream(hdev);
return 0;
@@ -4075,6 +4620,13 @@ static void hiraid_remove(struct pci_dev *pdev)
dev_info(hdev->dev, "enter hiraid remove\n");
+ if (pci_device_is_present(pdev))
+ hiraid_wait_for_io_submit(hdev);
+
+ kthread_stop(g_hiraid_submit_task);
+ hiraid_delete_all_io_list();
+ hiraid_destroy_io_stream_resource(hdev);
+
hiraid_dev_state_trans(hdev, DEV_DELETING);
flush_work(&hdev->reset_work);
--
2.45.1.windows.1
2
1
[PATCH OLK-5.10] [Backport] HID: usbhid: Eliminate recurrent out-of-bounds bug in usbhid_parse()
by Chen Jinghuang 19 Jan '26
by Chen Jinghuang 19 Jan '26
19 Jan '26
From: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
stable inclusion
from stable-v5.10.239
commit 41827a2dbdd7880df9881506dee13bc88d4230bb
category: bugfix
bugzilla: 9660
CVE: CVE-2025-38103
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit fe7f7ac8e0c708446ff017453add769ffc15deed upstream.
Update struct hid_descriptor to better reflect the mandatory and
optional parts of the HID Descriptor as per USB HID 1.11 specification.
Note: the kernel currently does not parse any optional HID class
descriptors, only the mandatory report descriptor.
Update all references to member element desc[0] to rpt_desc.
Add test to verify bLength and bNumDescriptors values are valid.
Replace the for loop with direct access to the mandatory HID class
descriptor member for the report descriptor. This eliminates the
possibility of getting an out-of-bounds fault.
Add a warning message if the HID descriptor contains any unsupported
optional HID class descriptors.
Reported-by: syzbot+c52569baf0c843f35495(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=c52569baf0c843f35495
Fixes: f043bfc98c19 ("HID: usbhid: fix out-of-bounds bug")
Cc: stable(a)vger.kernel.org
Signed-off-by: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
Reviewed-by: Michael Kelley <mhklinux(a)outlook.com>
Signed-off-by: Jiri Kosina <jkosina(a)suse.com>
Signed-off-by: Terry Junge <linuxhid(a)cosmicgizmosystems.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Chen Jinghuang <chenjinghuang2(a)huawei.com>
---
drivers/hid/hid-hyperv.c | 5 +++--
drivers/hid/usbhid/hid-core.c | 25 ++++++++++++++-----------
drivers/usb/gadget/function/f_hid.c | 12 ++++++------
include/linux/hid.h | 3 ++-
4 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index b7704dd6809d..bf77cfb723d5 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
if (!input_device->hid_desc)
goto cleanup;
- input_device->report_desc_size = desc->desc[0].wDescriptorLength;
+ input_device->report_desc_size = le16_to_cpu(
+ desc->rpt_desc.wDescriptorLength);
if (input_device->report_desc_size == 0) {
input_device->dev_info_status = -EINVAL;
goto cleanup;
@@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
- desc->desc[0].wDescriptorLength);
+ le16_to_cpu(desc->rpt_desc.wDescriptorLength));
/* Send the ack */
memset(&ack, 0, sizeof(struct mousevsc_prt_msg));
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 009a0469d54f..c3b104c72e49 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -984,12 +984,11 @@ static int usbhid_parse(struct hid_device *hid)
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_device *dev = interface_to_usbdev (intf);
struct hid_descriptor *hdesc;
+ struct hid_class_descriptor *hcdesc;
u32 quirks = 0;
unsigned int rsize = 0;
char *rdesc;
- int ret, n;
- int num_descriptors;
- size_t offset = offsetof(struct hid_descriptor, desc);
+ int ret;
quirks = hid_lookup_quirk(hid);
@@ -1011,20 +1010,19 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
- if (hdesc->bLength < sizeof(struct hid_descriptor)) {
- dbg_hid("hid descriptor is too short\n");
+ if (!hdesc->bNumDescriptors ||
+ hdesc->bLength != sizeof(*hdesc) +
+ (hdesc->bNumDescriptors - 1) * sizeof(*hcdesc)) {
+ dbg_hid("hid descriptor invalid, bLen=%hhu bNum=%hhu\n",
+ hdesc->bLength, hdesc->bNumDescriptors);
return -EINVAL;
}
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- num_descriptors = min_t(int, hdesc->bNumDescriptors,
- (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
-
- for (n = 0; n < num_descriptors; n++)
- if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
- rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ if (hdesc->rpt_desc.bDescriptorType == HID_DT_REPORT)
+ rsize = le16_to_cpu(hdesc->rpt_desc.wDescriptorLength);
if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
dbg_hid("weird size of report descriptor (%u)\n", rsize);
@@ -1052,6 +1050,11 @@ static int usbhid_parse(struct hid_device *hid)
goto err;
}
+ if (hdesc->bNumDescriptors > 1)
+ hid_warn(intf,
+ "%u unsupported optional hid class descriptors\n",
+ (int)(hdesc->bNumDescriptors - 1));
+
hid->quirks |= quirks;
return 0;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index ba018aeb21d8..2f30699f0426 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -114,8 +114,8 @@ static struct hid_descriptor hidg_desc = {
.bcdHID = cpu_to_le16(0x0101),
.bCountryCode = 0x00,
.bNumDescriptors = 0x1,
- /*.desc[0].bDescriptorType = DYNAMIC */
- /*.desc[0].wDescriptorLenght = DYNAMIC */
+ /*.rpt_desc.bDescriptorType = DYNAMIC */
+ /*.rpt_desc.wDescriptorLength = DYNAMIC */
};
/* Super-Speed Support */
@@ -724,8 +724,8 @@ static int hidg_setup(struct usb_function *f,
struct hid_descriptor hidg_desc_copy = hidg_desc;
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
- hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc_copy.desc[0].wDescriptorLength =
+ hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc_copy.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
length = min_t(unsigned short, length,
@@ -966,8 +966,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
* We can use hidg_desc struct here but we should not relay
* that its content won't change after returning from this function.
*/
- hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
- hidg_desc.desc[0].wDescriptorLength =
+ hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT;
+ hidg_desc.rpt_desc.wDescriptorLength =
cpu_to_le16(hidg->report_desc_length);
hidg_hs_in_ep_desc.bEndpointAddress =
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9e306bf9959d..03627c96d814 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -674,8 +674,9 @@ struct hid_descriptor {
__le16 bcdHID;
__u8 bCountryCode;
__u8 bNumDescriptors;
+ struct hid_class_descriptor rpt_desc;
- struct hid_class_descriptor desc[1];
+ struct hid_class_descriptor opt_descs[];
} __attribute__ ((packed));
#define HID_DEVICE(b, g, ven, prod) \
--
2.34.1
2
1
From: 岳智超 <yuezhichao1(a)h-partners.com>
driver inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8290
CVE: NA
--------------------------------
Add thread irq for io queue
Add stream detect
Signed-off-by: 岳智超 <yuezhichao1(a)h-partners.com>
---
drivers/scsi/hisi_raid/hiraid.h | 34 ++
drivers/scsi/hisi_raid/hiraid_main.c | 586 ++++++++++++++++++++++++++-
2 files changed, 604 insertions(+), 16 deletions(-)
diff --git a/drivers/scsi/hisi_raid/hiraid.h b/drivers/scsi/hisi_raid/hiraid.h
index 04b2e25..b7e3c71 100644
--- a/drivers/scsi/hisi_raid/hiraid.h
+++ b/drivers/scsi/hisi_raid/hiraid.h
@@ -686,6 +686,7 @@ struct hiraid_queue {
atomic_t inflight;
void *sense_buffer_virt;
dma_addr_t sense_buffer_phy;
+ s32 pci_irq;
struct dma_pool *prp_small_pool;
};
@@ -760,5 +761,38 @@ struct hiraid_sdev_hostdata {
u16 pend_count;
};
+enum stream_type {
+ TYPE_TOTAL,
+ TYPE_WRITE,
+ TYPE_READ,
+ TYPE_CLEAN,
+ TYPE_BOTTOM
+};
+
+struct HIRAID_STREAM_S {
+ /* recog-window */
+ u64 stream_lba;
+ u32 stream_len;
+ u16 vd_id;
+ u16 type;
+ /* aging ctrl */
+ int aging_credit;
+ int aging_grade;
+ u16 stream_id;
+ u16 using;
+};
+
+struct IO_LIST_S {
+ struct list_head list;
+ struct hiraid_scsi_io_cmd io_cmd;
+ struct hiraid_queue *submit_queue;
+ unsigned int sector_size;
+};
+
+struct spinlock_list_head_s {
+ struct list_head list;
+ spinlock_t lock;
+};
+
#endif
diff --git a/drivers/scsi/hisi_raid/hiraid_main.c b/drivers/scsi/hisi_raid/hiraid_main.c
index 2f33339..a7dc746 100644
--- a/drivers/scsi/hisi_raid/hiraid_main.c
+++ b/drivers/scsi/hisi_raid/hiraid_main.c
@@ -35,6 +35,10 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_dbg.h>
#include <scsi/sg.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sched/prio.h>
#include "hiraid.h"
@@ -107,6 +111,13 @@ static u32 log_debug_switch;
module_param(log_debug_switch, uint, 0644);
MODULE_PARM_DESC(log_debug_switch, "set log state, default zero for switch off");
+static bool threaded_irq = false;
+module_param(threaded_irq, bool, 0444);
+MODULE_PARM_DESC(threaded_irq, "use threaded irq for io queue, default off");
+
+static u32 poll_delay_min = 9;
+static u32 poll_delay_max = 19;
+
static int extra_pool_num_set(const char *val, const struct kernel_param *kp)
{
u8 n = 0;
@@ -152,7 +163,7 @@ static struct workqueue_struct *work_queue;
__func__, ##__VA_ARGS__); \
} while (0)
-#define HIRAID_DRV_VERSION "1.1.0.1"
+#define HIRAID_DRV_VERSION "1.1.0.2"
#define ADMIN_TIMEOUT (admin_tmout * HZ)
#define USRCMD_TIMEOUT (180 * HZ)
@@ -169,6 +180,15 @@ static struct workqueue_struct *work_queue;
#define MAX_CAN_QUEUE (4096 - 1)
#define MIN_CAN_QUEUE (1024 - 1)
+#define MAX_DECREASE_GRADE (-8)
+#define MAX_INCREASE_GRADE 8
+#define INC_GRADE 1
+#define MIN_CREDIT 0
+#define MAX_CREDIT 64
+#define CREDIT_THRES 32
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
enum SENSE_STATE_CODE {
SENSE_STATE_OK = 0,
SENSE_STATE_NEED_CHECK,
@@ -749,6 +769,404 @@ static int hiraid_build_sgl(struct hiraid_dev *hdev, struct hiraid_scsi_io_cmd *
return 0;
}
+#define MAX_PD_NUM (40 + 1)
+#define MAX_STREAM_NUM 8
+#define PER_MB (1024 * 1024)
+#define MAX_IO_NUM (200 * PER_MB)
+#define STREAM_LEN (4 * PER_MB)
+#define MAX_IO_NUM_ONCE 128
+#define IO_SUBMIT_TIME_OUT 100
+#define MAX_AGING_NUM 130
+
+#define MIN_IO_SEND_TIME 10
+#define MAX_IO_SEND_TIME 50
+
+#define MIN_WAIT_IO_SEND_TIME 10
+#define MAX_WAIT_IO_SEND_TIME 20
+
+enum io_operation_type {
+ TYPE_DELETE_SINGLE_IO = 1,
+ TYPE_DELETE_SINGLE_IO_LIST,
+ TYPE_DELETE_ALL_IO_LIST
+};
+
+struct HIRAID_STREAM_S stream_array[MAX_PD_NUM][MAX_STREAM_NUM] = {0};
+struct spinlock_list_head_s io_heads_per_stream[MAX_PD_NUM * MAX_STREAM_NUM];
+spinlock_t stream_array_lock;
+
+u64 g_io_transport_num[MAX_PD_NUM][MAX_STREAM_NUM] = {0};
+u16 g_io_stream_num[MAX_PD_NUM][TYPE_BOTTOM] = {0};
+u16 g_io_count = 1;
+
+void hiraid_inc_io_transport_num(u16 disk_id, u16 streamd_id, u16 nlb)
+{
+ g_io_transport_num[disk_id][streamd_id] += nlb;
+}
+
+void hiraid_refresh_io_transport_num(u16 disk_id, u16 streamd_id)
+{
+ g_io_transport_num[disk_id][streamd_id] = 0;
+}
+
+void hiraid_inc_stream_num(u16 disk_id)
+{
+ spin_lock(&stream_array_lock);
+ g_io_stream_num[disk_id][TYPE_TOTAL]++;
+ spin_unlock(&stream_array_lock);
+}
+
+void hiraid_dec_stream_num(u16 disk_id)
+{
+ spin_lock(&stream_array_lock);
+ if (g_io_stream_num[disk_id][TYPE_TOTAL] > 0)
+ g_io_stream_num[disk_id][TYPE_TOTAL]--;
+ spin_unlock(&stream_array_lock);
+}
+
+static bool hiraid_io_recog_check_stream_exceed(u16 disk_id)
+{
+ bool exceed_flag;
+
+ spin_lock(&stream_array_lock);
+ exceed_flag = (g_io_stream_num[disk_id][TYPE_TOTAL] >= MAX_STREAM_NUM);
+ spin_unlock(&stream_array_lock);
+ return exceed_flag;
+}
+
+static u16 hiraid_get_stream_num(u16 disk_id)
+{
+ return g_io_stream_num[disk_id][TYPE_TOTAL];
+}
+
+static inline struct HIRAID_STREAM_S *hiraid_get_stream(u16 disk_id,
+ u16 stream_id)
+{
+ return &stream_array[disk_id][stream_id];
+}
+
+static inline struct spinlock_list_head_s *hiraid_get_io_head(u16 disk_id)
+{
+ return &(io_heads_per_stream[disk_id]);
+}
+
+static bool hiraid_recognition_acknowledge(const struct HIRAID_STREAM_S *stream)
+{
+ return (stream->aging_credit >= CREDIT_THRES) ? true : false;
+}
+
+void hiraid_io_recognition_init(void)
+{
+ u16 i;
+
+ spin_lock_init(&stream_array_lock);
+ for (i = 0; i < (MAX_PD_NUM * MAX_STREAM_NUM); i++) {
+ INIT_LIST_HEAD(&hiraid_get_io_head(i)->list);
+ spin_lock_init(&hiraid_get_io_head(i)->lock);
+ }
+}
+
+static void hiraid_io_recognition_iterator(struct HIRAID_STREAM_S *stream,
+ int direction)
+{
+ stream->aging_grade = stream->aging_grade + direction * INC_GRADE;
+ stream->aging_grade = MAX(stream->aging_grade, MAX_DECREASE_GRADE);
+ stream->aging_grade = MIN(stream->aging_grade, MAX_INCREASE_GRADE);
+ stream->aging_credit = stream->aging_credit + stream->aging_grade;
+ stream->aging_credit = MAX(stream->aging_credit, MIN_CREDIT);
+ stream->aging_credit = MIN(stream->aging_credit, MAX_CREDIT);
+}
+
+struct HIRAID_STREAM_S *hiraid_io_pick_stream(
+ struct hiraid_scsi_rw_cmd *req, u16 type)
+{
+ struct HIRAID_STREAM_S *first_hit_stream = NULL;
+ struct HIRAID_STREAM_S *temp_stream = NULL;
+ u16 pick_flag = 0;
+ u8 i;
+
+ for (i = 0; i < MAX_STREAM_NUM; i++) {
+ temp_stream = &stream_array[req->hdid][i];
+ temp_stream->stream_id = i;
+ if (req->slba < temp_stream->stream_lba ||
+ req->slba >= temp_stream->stream_lba +
+ temp_stream->stream_len ||
+ temp_stream->type != type) {
+ continue;
+ }
+ if (!pick_flag) {
+ temp_stream->stream_lba = req->slba;
+ first_hit_stream = temp_stream;
+ pick_flag = 1;
+ continue;
+ }
+ hiraid_dec_stream_num(req->hdid);
+ memset(temp_stream, 0,
+ sizeof(struct HIRAID_STREAM_S)); // 去重影
+ }
+ return first_hit_stream;
+}
+
+static struct HIRAID_STREAM_S *hiraid_init_flow_stream(struct hiraid_scsi_rw_cmd *req,
+ u16 type)
+{
+ int i;
+ struct HIRAID_STREAM_S *stream = NULL;
+
+ for (i = 0; i < MAX_STREAM_NUM; i++) {
+ stream = hiraid_get_stream(req->hdid, i);
+ if (!stream->using) {
+ stream->using = 1;
+ stream->stream_id = i;
+ break;
+ }
+ }
+ stream->stream_lba = req->slba;
+ stream->vd_id = req->hdid;
+ stream->type = type;
+ stream->aging_credit = 0;
+ stream->aging_grade = 0;
+ stream->stream_len = STREAM_LEN;
+ return stream;
+}
+
+static struct HIRAID_STREAM_S *hiraid_stream_detect(struct hiraid_dev *hdev,
+ struct hiraid_scsi_rw_cmd *io_cmd)
+{
+ u16 type = io_cmd->opcode == HIRAID_CMD_WRITE ? TYPE_WRITE : TYPE_READ;
+ struct HIRAID_STREAM_S *stream = hiraid_io_pick_stream(io_cmd, type);
+
+ if (stream != NULL) { /* 可以命中一个stream */
+ return stream;
+ }
+
+ if (hiraid_io_recog_check_stream_exceed(io_cmd->hdid))
+ return NULL;
+ stream = hiraid_init_flow_stream(io_cmd, type);
+ hiraid_inc_stream_num(io_cmd->hdid);
+ return stream;
+}
+
+u64 g_io_last_pull_time[MAX_PD_NUM] = {0};
+
+static u16 hiraid_get_submit_io_stream(u16 did, struct hiraid_dev *hdev)
+{
+ u64 temp_num, i;
+ static u16 stream_num[MAX_PD_NUM] = {0};
+
+ if (g_io_last_pull_time[did] == 0)
+ g_io_last_pull_time[did] = jiffies_to_msecs(jiffies);
+
+ for (i = 0; i < MAX_STREAM_NUM; i++) {
+ temp_num = g_io_transport_num[did][i];
+ if (temp_num != 0) {
+ if ((temp_num < MAX_IO_NUM) &&
+ ((jiffies_to_msecs(jiffies) - g_io_last_pull_time[did])
+ < IO_SUBMIT_TIME_OUT)) {
+ stream_num[did] = i;
+ return i;
+ }
+ g_io_last_pull_time[did] = jiffies_to_msecs(jiffies);
+ hiraid_refresh_io_transport_num(did, i);
+ stream_num[did] = ((i+1) % MAX_STREAM_NUM);
+ return ((i+1) % MAX_STREAM_NUM);
+ }
+ }
+ g_io_last_pull_time[did] = jiffies_to_msecs(jiffies);
+ return ((stream_num[did]++) % MAX_STREAM_NUM);
+}
+
+static void hiraid_submit_io_stream(u16 hdid, struct hiraid_dev *hdev)
+{
+ struct spinlock_list_head_s *io_slist = NULL;
+ struct list_head *node = NULL;
+ struct list_head *next_node = NULL;
+ struct hiraid_scsi_io_cmd io_cmd = {0};
+ struct hiraid_queue *submit_queue = NULL;
+ unsigned int sector_size = 0;
+ u16 submit_stream_id = hiraid_get_submit_io_stream(hdid, hdev);
+
+ struct IO_LIST_S *temp_io_stream = NULL;
+ u16 count = 0;
+
+ io_slist = hiraid_get_io_head(hdid * MAX_STREAM_NUM + submit_stream_id);
+ spin_lock(&io_slist->lock);
+ list_for_each_safe(node, next_node, &io_slist->list) {
+ temp_io_stream = list_entry(node, struct IO_LIST_S, list);
+ list_del_init(node);
+ io_cmd = temp_io_stream->io_cmd;
+ submit_queue = temp_io_stream->submit_queue;
+ sector_size = temp_io_stream->sector_size;
+ kfree(temp_io_stream);
+ temp_io_stream = NULL;
+ spin_unlock(&io_slist->lock);
+ hiraid_submit_cmd(submit_queue, &io_cmd);
+ hiraid_inc_io_transport_num(hdid,
+ submit_stream_id, io_cmd.rw.nlb * sector_size);
+ // 单次下发不超过MAX_IO_NUM_ONCE,避免仅发送单盘
+ if (++count >= MAX_IO_NUM_ONCE) {
+ spin_lock(&io_slist->lock);
+ break;
+ }
+ spin_lock(&io_slist->lock);
+ }
+ spin_unlock(&io_slist->lock);
+}
+
+static u8 hiraid_detect_if_aging(void)
+{
+ if (++g_io_count == MAX_AGING_NUM) {
+ g_io_count = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static void hiraid_aging(struct hiraid_dev *hdev)
+{
+ struct HIRAID_STREAM_S *temp_stream = NULL;
+ int i = 0;
+ int j = 0;
+
+ for (i = 1; i < MAX_PD_NUM; i++) {
+ for (j = 0; j < MAX_STREAM_NUM; j++) {
+ temp_stream = hiraid_get_stream(i, j);
+ if (temp_stream->using) {
+ hiraid_io_recognition_iterator(temp_stream, -1);
+ if (temp_stream->aging_credit <= 0) {
+ hiraid_dec_stream_num(i);
+ memset(temp_stream,
+ 0, sizeof(struct HIRAID_STREAM_S)); // 老化
+ }
+ }
+ }
+ }
+}
+
+static u8 hiraid_io_list_operation(u32 hdid, u16 cid, u16 hwq, u8 operation)
+{
+ int i, j;
+
+ struct spinlock_list_head_s *io_slist = NULL;
+ struct list_head *node = NULL;
+ struct list_head *next_node = NULL;
+ struct hiraid_scsi_io_cmd *io_cmd = NULL;
+ struct hiraid_queue *hiraidq = NULL;
+ struct IO_LIST_S *temp_io_stream = NULL;
+
+ u8 max_hd_num = operation == TYPE_DELETE_ALL_IO_LIST ?
+ MAX_PD_NUM : hdid + 1;
+ for (i = hdid; i < max_hd_num; i++) {
+ for (j = 0; j < MAX_STREAM_NUM; j++) {
+ io_slist = hiraid_get_io_head(i * MAX_STREAM_NUM + j);
+ spin_lock(&io_slist->lock);
+ list_for_each_safe(node, next_node, &io_slist->list) {
+ temp_io_stream = list_entry(node,
+ struct IO_LIST_S, list);
+ io_cmd = &(temp_io_stream->io_cmd);
+ hiraidq = temp_io_stream->submit_queue;
+ if (operation >= TYPE_DELETE_SINGLE_IO_LIST) {
+ list_del_init(node);
+ kfree(temp_io_stream);
+ temp_io_stream = NULL;
+ } else {
+ if ((io_cmd->rw.cmd_id == cid) &&
+ (hiraidq->qid == hwq)) {
+ list_del_init(node);
+ spin_unlock(&io_slist->lock);
+ kfree(temp_io_stream);
+ return 1;
+ }
+ }
+ }
+ spin_unlock(&io_slist->lock);
+ }
+ }
+ return 0;
+}
+
+static u8 hiraid_check_io_list(u32 hdid, u16 cid, u16 hwq)
+{
+ return hiraid_io_list_operation(hdid, cid, hwq, TYPE_DELETE_SINGLE_IO);
+}
+
+static u8 hiraid_delete_single_pd_io_list(u32 hdid)
+{
+ return hiraid_io_list_operation(hdid, 0, 0, TYPE_DELETE_SINGLE_IO_LIST);
+}
+
+static u8 hiraid_delete_all_io_list(void)
+{
+ return hiraid_io_list_operation(0, 0, 0, TYPE_DELETE_ALL_IO_LIST);
+}
+
+static void hiraid_wait_for_io_submit(struct hiraid_dev *hdev)
+{
+ struct spinlock_list_head_s *io_slist = NULL;
+ int i = 0;
+ int io_flush_finished;
+
+ do {
+ io_flush_finished = 1;
+ for (i = 0; i < (MAX_PD_NUM * MAX_STREAM_NUM); i++) {
+ io_slist = hiraid_get_io_head(i);
+ if (!list_empty(&io_slist->list)) {
+ io_flush_finished = 0;
+ break;
+ }
+ }
+ usleep_range(MIN_WAIT_IO_SEND_TIME, MAX_WAIT_IO_SEND_TIME);
+ } while (!io_flush_finished);
+}
+
+static u8 hiraid_add_io_to_list(struct hiraid_queue *submit_queue,
+ struct HIRAID_STREAM_S *tmp_stream, struct hiraid_scsi_io_cmd io_cmd,
+ unsigned int sector_size)
+{
+ struct spinlock_list_head_s *io_slist = NULL;
+ struct IO_LIST_S *new_io_node = NULL;
+
+ new_io_node = kmalloc(sizeof(struct IO_LIST_S), GFP_KERNEL);
+ if (!new_io_node)
+ return 0;
+ new_io_node->io_cmd = io_cmd;
+ new_io_node->submit_queue = submit_queue;
+ new_io_node->sector_size = sector_size;
+ io_slist = hiraid_get_io_head(io_cmd.rw.hdid *
+ MAX_STREAM_NUM + tmp_stream->stream_id);
+ spin_lock(&io_slist->lock);
+ INIT_LIST_HEAD(&(new_io_node->list));
+ list_add_tail(&(new_io_node->list), &io_slist->list);
+ spin_unlock(&io_slist->lock);
+ return 1;
+}
+
+static void hiraid_submit_io_threading(struct hiraid_dev *hdev)
+{
+ int i = 1;
+
+ while (!kthread_should_stop()) {
+ for (i = 1; i < MAX_PD_NUM; i++)
+ hiraid_submit_io_stream(i, hdev);
+ usleep_range(MIN_IO_SEND_TIME, MAX_IO_SEND_TIME);
+ }
+}
+
+static void hiraid_destroy_io_stream_resource(struct hiraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < (MAX_PD_NUM * MAX_STREAM_NUM); i++)
+ list_del_init(&hiraid_get_io_head(i)->list);
+}
+
+struct task_struct *g_hiraid_submit_task;
+static void hiraid_init_io_stream(struct hiraid_dev *hdev)
+{
+ hiraid_io_recognition_init();
+ g_hiraid_submit_task = kthread_run((void *)hiraid_submit_io_threading,
+ hdev, "hiraid_submit_thread");
+}
+
#define HIRAID_RW_FUA BIT(14)
#define RW_LENGTH_ZERO (67)
@@ -871,6 +1289,30 @@ static int hiraid_setup_nonrw_cmd(struct hiraid_dev *hdev,
return 0;
}
+static bool hiraid_disk_is_hdd(u8 attr)
+{
+ switch (HIRAID_DEV_DISK_TYPE(attr)) {
+ case HIRAID_SAS_HDD_VD:
+ case HIRAID_SATA_HDD_VD:
+ case HIRAID_SAS_HDD_PD:
+ case HIRAID_SATA_HDD_PD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool hiraid_disk_is_hdd_rawdrive(u8 attr)
+{
+ switch (HIRAID_DEV_DISK_TYPE(attr)) {
+ case HIRAID_SAS_HDD_PD:
+ case HIRAID_SATA_HDD_PD:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int hiraid_setup_io_cmd(struct hiraid_dev *hdev,
struct hiraid_scsi_io_cmd *io_cmd, struct scsi_cmnd *scmd,
struct hiraid_mapmange *mapbuf)
@@ -1025,6 +1467,7 @@ static int hiraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct hiraid_sdev_hostdata *hostdata;
struct hiraid_scsi_io_cmd io_cmd;
struct hiraid_queue *ioq;
+ struct HIRAID_STREAM_S *tmp_stm = NULL;
u16 hwq, cid;
int ret;
@@ -1092,6 +1535,23 @@ static int hiraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
}
WRITE_ONCE(mapbuf->state, CMD_FLIGHT);
+
+ if (hiraid_is_rw_scmd(scmd) &&
+ hiraid_disk_is_hdd_rawdrive(hostdata->attr)) {
+ if (hiraid_detect_if_aging())
+ hiraid_aging(hdev);
+ tmp_stm = hiraid_stream_detect(hdev, &(io_cmd.rw));
+ if (tmp_stm != NULL) {
+ hiraid_io_recognition_iterator(tmp_stm, 1);
+ if (hiraid_recognition_acknowledge(tmp_stm) &&
+ (hiraid_get_stream_num(io_cmd.rw.hdid) > 1)) {
+ if (hiraid_add_io_to_list(ioq,
+ tmp_stm, io_cmd, sdev->sector_size)) {
+ return 0;
+ }
+ }
+ }
+ }
hiraid_submit_cmd(ioq, &io_cmd);
return 0;
@@ -1141,19 +1601,6 @@ static int hiraid_disk_qd(u8 attr)
}
}
-static bool hiraid_disk_is_hdd(u8 attr)
-{
- switch (HIRAID_DEV_DISK_TYPE(attr)) {
- case HIRAID_SAS_HDD_VD:
- case HIRAID_SATA_HDD_VD:
- case HIRAID_SAS_HDD_PD:
- case HIRAID_SATA_HDD_PD:
- return true;
- default:
- return false;
- }
-}
-
static int hiraid_slave_alloc(struct scsi_device *sdev)
{
struct hiraid_sdev_hostdata *hostdata;
@@ -1305,6 +1752,7 @@ static int hiraid_alloc_queue(struct hiraid_dev *hdev, u16 qid, u16 depth)
hiraidq->q_depth = depth;
hiraidq->qid = qid;
hiraidq->cq_vector = -1;
+ hiraidq->pci_irq = -1;
hdev->queue_count++;
return 0;
@@ -1594,6 +2042,33 @@ static inline bool hiraid_process_cq(struct hiraid_queue *hiraidq, u16 *start, u
return found;
}
+static inline bool hiraid_process_cq_for_thread(struct hiraid_queue *hiraidq,
+ u16 *start, u16 *end,
+ u8 *wakeup_thread_flag, int tag)
+{
+ bool found = false;
+ u16 max_io_num = hiraidq->q_depth / 4;
+ u16 io_count = 0;
+ *start = hiraidq->cq_head;
+ while (!found && hiraid_cqe_pending(hiraidq)) {
+ if (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].cmd_id) == tag)
+ found = true;
+ hiraid_update_cq_head(hiraidq);
+
+ if (++io_count >= max_io_num) {
+ *wakeup_thread_flag = 1;
+ break;
+ }
+ }
+ *end = hiraidq->cq_head;
+
+ if (*start != *end)
+ writel(hiraidq->cq_head,
+ hiraidq->q_db + hiraidq->hdev->db_stride);
+
+ return found;
+}
+
static bool hiraid_poll_cq(struct hiraid_queue *hiraidq, int cid)
{
u16 start, end;
@@ -1631,6 +2106,55 @@ static irqreturn_t hiraid_handle_irq(int irq, void *data)
return ret;
}
+static irqreturn_t hiraid_io_poll(int irq, void *data)
+{
+ struct hiraid_queue *hiraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+
+ do {
+ spin_lock(&hiraidq->cq_lock);
+ hiraid_process_cq(hiraidq, &start, &end, -1);
+ hiraidq->last_cq_head = hiraidq->cq_head;
+ spin_unlock(&hiraidq->cq_lock);
+
+ if (start != end) {
+ hiraid_complete_cqes(hiraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ usleep_range(poll_delay_min, poll_delay_max);
+ } while (start != end);
+ enable_irq(hiraidq->pci_irq);
+ return ret;
+}
+
+static irqreturn_t hiraid_io_irq(int irq, void *data)
+{
+ struct hiraid_queue *hiraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+ u8 wakeup_thread_flag = 0;
+
+ spin_lock(&hiraidq->cq_lock);
+ if (hiraidq->cq_head != hiraidq->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ hiraid_process_cq_for_thread(hiraidq, &start,
+ &end, &wakeup_thread_flag, -1);
+ hiraidq->last_cq_head = hiraidq->cq_head;
+ spin_unlock(&hiraidq->cq_lock);
+
+ if (start != end) {
+ hiraid_complete_cqes(hiraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ if (wakeup_thread_flag) {
+ disable_irq_nosync(hiraidq->pci_irq);
+ ret = IRQ_WAKE_THREAD;
+ }
+ return ret;
+}
+
static int hiraid_setup_admin_queue(struct hiraid_dev *hdev)
{
struct hiraid_queue *adminq = &hdev->queues[0];
@@ -1666,9 +2190,11 @@ static int hiraid_setup_admin_queue(struct hiraid_dev *hdev)
adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
if (ret) {
adminq->cq_vector = -1;
+ adminq->pci_irq = -1;
return ret;
}
+ adminq->pci_irq = pci_irq_vector(hdev->pdev, adminq->cq_vector);
hiraid_init_queue(adminq, 0);
dev_info(hdev->dev, "setup admin queue success, queuecount[%d] online[%d] pagesize[%d]\n",
@@ -1937,14 +2463,23 @@ static int hiraid_create_queue(struct hiraid_queue *hiraidq, u16 qid)
goto delete_cq;
hiraidq->cq_vector = cq_vector;
- ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq, NULL,
- hiraidq, "hiraid%d_q%d", hdev->instance, qid);
+
+ if (threaded_irq)
+ ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_io_irq,
+ hiraid_io_poll, hiraidq, "hiraid%d_q%d",
+ hdev->instance, qid);
+ else
+ ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq,
+ NULL, hiraidq, "hiraid%d_q%d",
+ hdev->instance, qid);
if (ret) {
hiraidq->cq_vector = -1;
+ hiraidq->pci_irq = -1;
dev_err(hdev->dev, "request queue[%d] irq failed\n", qid);
goto delete_sq;
}
+ hiraidq->pci_irq = pci_irq_vector(hdev->pdev, hiraidq->cq_vector);
hiraid_init_queue(hiraidq, qid);
return 0;
@@ -2094,10 +2629,12 @@ static int hiraid_setup_io_queues(struct hiraid_dev *hdev)
adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
if (ret) {
dev_err(hdev->dev, "request admin irq failed\n");
+ adminq->pci_irq = -1;
adminq->cq_vector = -1;
return ret;
}
+ adminq->pci_irq = pci_irq_vector(hdev->pdev, adminq->cq_vector);
hdev->online_queues++;
for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
@@ -3232,6 +3769,12 @@ static int hiraid_abort(struct scsi_cmnd *scmd)
cid = mapbuf->cid;
hwq = mapbuf->hiraidq->qid;
+ if (hiraid_check_io_list(hostdata->hdid, cid, hwq)) {
+ dev_warn(hdev->dev, "find cid[%d] qid[%d] in host, abort succ\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, send abort\n", cid, hwq);
ret = hiraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
if (ret != -ETIME) {
@@ -3263,6 +3806,7 @@ static int hiraid_scsi_reset(struct scsi_cmnd *scmd, enum hiraid_rst_type rst)
ret = hiraid_send_reset_cmd(hdev, rst, hostdata->hdid);
if ((ret == 0) || (ret == FW_EH_DEV_NONE && rst == HIRAID_RESET_TARGET)) {
if (rst == HIRAID_RESET_TARGET) {
+ hiraid_delete_single_pd_io_list(hostdata->hdid);
ret = wait_tgt_reset_io_done(scmd);
if (ret) {
dev_warn(hdev->dev, "sdev[%d:%d] target has %d peding cmd, target reset failed\n",
@@ -3300,6 +3844,7 @@ static int hiraid_host_reset(struct scsi_cmnd *scmd)
dev_warn(hdev->dev, "sdev[%d:%d] send host reset\n",
scmd->device->channel, scmd->device->id);
+ hiraid_delete_all_io_list();
if (hiraid_reset_work_sync(hdev) == -EBUSY)
flush_work(&hdev->reset_work);
@@ -3333,6 +3878,7 @@ static pci_ers_result_t hiraid_pci_error_detected(struct pci_dev *pdev,
scsi_block_requests(hdev->shost);
hiraid_dev_state_trans(hdev, DEV_RESETTING);
+ hiraid_delete_all_io_list();
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
@@ -3906,6 +4452,7 @@ static int hiraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto unregist_bsg;
scsi_scan_host(hdev->shost);
+ hiraid_init_io_stream(hdev);
return 0;
@@ -3937,6 +4484,13 @@ static void hiraid_remove(struct pci_dev *pdev)
dev_info(hdev->dev, "enter hiraid remove\n");
+ if (pci_device_is_present(pdev))
+ hiraid_wait_for_io_submit(hdev);
+
+ kthread_stop(g_hiraid_submit_task);
+ hiraid_delete_all_io_list();
+ hiraid_destroy_io_stream_resource(hdev);
+
hiraid_dev_state_trans(hdev, DEV_DELETING);
flush_work(&hdev->reset_work);
--
2.45.1.windows.1
2
1
Liu Jian (1):
cgroup: make cgroup_bpf_prog_attach work when cgroup2 is not mounted
Pu Lehui (10):
bpf: Only deploy hisock in server
bpf: Add multi port parse to hisock_cmd
bpf: Add target comm parse to hisock_cmd
bpf: Add bpf_xdp_early_demux kfunc
bpf: Add bpf_xdp_set_ingress_dev kfunc
bpf: Add bpf_skb_set_egress_dev kfunc
bpf: Deprecate some unused hisock kfuncs
bpf: Some refactor for hisock
bpf: Add ipv4-mapped ipv6 addr support for hisock
bpf: Add local connect support for hisock
include/linux/cgroup.h | 1 +
kernel/bpf/cgroup.c | 8 +-
kernel/cgroup/cgroup.c | 22 +++
net/Kconfig | 1 +
net/core/dev.c | 5 +-
net/core/filter.c | 83 +++------
samples/bpf/hisock/bpf.c | 315 ++++++++++++++++++++++++--------
samples/bpf/hisock/hisock_cmd.c | 211 ++++++++++++++++-----
8 files changed, 469 insertions(+), 177 deletions(-)
--
2.34.1
2
12
[PATCH openEuler-1.0-LTS] KVM: x86: Fix VM hard lockup after prolonged inactivity with periodic HV timer
by Zhang Kunbo 16 Jan '26
by Zhang Kunbo 16 Jan '26
16 Jan '26
From: fuqiang wang <fuqiang.wng(a)gmail.com>
commit 18ab3fc8e880791aa9f7c000261320fc812b5465 upstream.
When advancing the target expiration for the guest's APIC timer in periodic
mode, set the expiration to "now" if the target expiration is in the past
(similar to what is done in update_target_expiration()). Blindly adding
the period to the previous target expiration can result in KVM generating
a practically unbounded number of hrtimer IRQs due to programming an
expired timer over and over. In extreme scenarios, e.g. if userspace
pauses/suspends a VM for an extended duration, this can even cause hard
lockups in the host.
Currently, the bug only affects Intel CPUs when using the hypervisor timer
(HV timer), a.k.a. the VMX preemption timer. Unlike the software timer,
a.k.a. hrtimer, which KVM keeps running even on exits to userspace, the
HV timer only runs while the guest is active. As a result, if the vCPU
does not run for an extended duration, there will be a huge gap between
the target expiration and the current time the vCPU resumes running.
Because the target expiration is incremented by only one period on each
timer expiration, this leads to a series of timer expirations occurring
rapidly after the vCPU/VM resumes.
More critically, when the vCPU first triggers a periodic HV timer
expiration after resuming, advancing the expiration by only one period
will result in a target expiration in the past. As a result, the delta
may be calculated as a negative value. When the delta is converted into
an absolute value (tscdeadline is an unsigned u64), the resulting value
can overflow what the HV timer is capable of programming. I.e. the large
value will exceed the VMX Preemption Timer's maximum bit width of
cpu_preemption_timer_multi + 32, and thus cause KVM to switch from the
HV timer to the software timer (hrtimers).
After switching to the software timer, periodic timer expiration callbacks
may be executed consecutively within a single clock interrupt handler,
because hrtimers honors KVM's request for an expiration in the past and
immediately re-invokes KVM's callback after reprogramming. And because
the interrupt handler runs with IRQs disabled, restarting KVM's hrtimer
over and over until the target expiration is advanced to "now" can result
in a hard lockup.
E.g. the following hard lockup was triggered in the host when running a
Windows VM (only relevant because it used the APIC timer in periodic mode)
after resuming the VM from a long suspend (in the host).
NMI watchdog: Watchdog detected hard LOCKUP on cpu 45
...
RIP: 0010:advance_periodic_target_expiration+0x4d/0x80 [kvm]
...
RSP: 0018:ff4f88f5d98d8ef0 EFLAGS: 00000046
RAX: fff0103f91be678e RBX: fff0103f91be678e RCX: 00843a7d9e127bcc
RDX: 0000000000000002 RSI: 0052ca4003697505 RDI: ff440d5bfbdbd500
RBP: ff440d5956f99200 R08: ff2ff2a42deb6a84 R09: 000000000002a6c0
R10: 0122d794016332b3 R11: 0000000000000000 R12: ff440db1af39cfc0
R13: ff440db1af39cfc0 R14: ffffffffc0d4a560 R15: ff440db1af39d0f8
FS: 00007f04a6ffd700(0000) GS:ff440db1af380000(0000) knlGS:000000e38a3b8000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 000000d5651feff8 CR3: 000000684e038002 CR4: 0000000000773ee0
PKRU: 55555554
Call Trace:
<IRQ>
apic_timer_fn+0x31/0x50 [kvm]
__hrtimer_run_queues+0x100/0x280
hrtimer_interrupt+0x100/0x210
? ttwu_do_wakeup+0x19/0x160
smp_apic_timer_interrupt+0x6a/0x130
apic_timer_interrupt+0xf/0x20
</IRQ>
Moreover, if the suspend duration of the virtual machine is not long enough
to trigger a hard lockup in this scenario, since commit 98c25ead5eda
("KVM: VMX: Move preemption timer <=> hrtimer dance to common x86"), KVM
will continue using the software timer until the guest reprograms the APIC
timer in some way. Since the periodic timer does not require frequent APIC
timer register programming, the guest may continue to use the software
timer in perpetuity.
Fixes: d8f2f498d9ed ("x86/kvm: fix LAPIC timer drift when guest uses periodic mode")
Cc: stable(a)vger.kernel.org
Signed-off-by: fuqiang wang <fuqiang.wng(a)gmail.com>
[sean: massage comments and changelog]
Link: https://patch.msgid.link/20251113205114.1647493-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc(a)google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/kvm/lapic.c | 28 +++++++++++++++++++++++-----
1 file changed, 23 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 262e49301cae..6603b7decf4f 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1600,15 +1600,33 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
ktime_t delta;
/*
- * Synchronize both deadlines to the same time source or
- * differences in the periods (caused by differences in the
- * underlying clocks or numerical approximation errors) will
- * cause the two to drift apart over time as the errors
- * accumulate.
+ * Use kernel time as the time source for both the hrtimer deadline and
+ * TSC-based deadline so that they stay synchronized. Computing each
+ * deadline independently will cause the two deadlines to drift apart
+ * over time as differences in the periods accumulate, e.g. due to
+ * differences in the underlying clocks or numerical approximation errors.
*/
apic->lapic_timer.target_expiration =
ktime_add_ns(apic->lapic_timer.target_expiration,
apic->lapic_timer.period);
+
+ /*
+ * If the new expiration is in the past, e.g. because userspace stopped
+ * running the VM for an extended duration, then force the expiration
+ * to "now" and don't try to play catch-up with the missed events. KVM
+ * will only deliver a single interrupt regardless of how many events
+ * are pending, i.e. restarting the timer with an expiration in the
+ * past will do nothing more than waste host cycles, and can even lead
+ * to a hard lockup in extreme cases.
+ */
+ if (ktime_before(apic->lapic_timer.target_expiration, now))
+ apic->lapic_timer.target_expiration = now;
+
+ /*
+ * Note, ensuring the expiration isn't in the past also prevents delta
+ * from going negative, which could cause the TSC deadline to become
+ * excessively large due to it an unsigned value.
+ */
delta = ktime_sub(apic->lapic_timer.target_expiration, now);
apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
nsec_to_cycles(apic->vcpu, delta);
--
2.34.1
2
1
[PATCH OLK-6.6] KVM: x86: Fix VM hard lockup after prolonged inactivity with periodic HV timer
by Zhang Kunbo 16 Jan '26
by Zhang Kunbo 16 Jan '26
16 Jan '26
From: fuqiang wang <fuqiang.wng(a)gmail.com>
stable inclusion
from stable-v6.6.120
commit 7b54ccef865e0aa62e4871d4ada2ba4b9dcb8bed
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13419
CVE: CVE-2025-71104
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 18ab3fc8e880791aa9f7c000261320fc812b5465 upstream.
When advancing the target expiration for the guest's APIC timer in periodic
mode, set the expiration to "now" if the target expiration is in the past
(similar to what is done in update_target_expiration()). Blindly adding
the period to the previous target expiration can result in KVM generating
a practically unbounded number of hrtimer IRQs due to programming an
expired timer over and over. In extreme scenarios, e.g. if userspace
pauses/suspends a VM for an extended duration, this can even cause hard
lockups in the host.
Currently, the bug only affects Intel CPUs when using the hypervisor timer
(HV timer), a.k.a. the VMX preemption timer. Unlike the software timer,
a.k.a. hrtimer, which KVM keeps running even on exits to userspace, the
HV timer only runs while the guest is active. As a result, if the vCPU
does not run for an extended duration, there will be a huge gap between
the target expiration and the current time the vCPU resumes running.
Because the target expiration is incremented by only one period on each
timer expiration, this leads to a series of timer expirations occurring
rapidly after the vCPU/VM resumes.
More critically, when the vCPU first triggers a periodic HV timer
expiration after resuming, advancing the expiration by only one period
will result in a target expiration in the past. As a result, the delta
may be calculated as a negative value. When the delta is converted into
an absolute value (tscdeadline is an unsigned u64), the resulting value
can overflow what the HV timer is capable of programming. I.e. the large
value will exceed the VMX Preemption Timer's maximum bit width of
cpu_preemption_timer_multi + 32, and thus cause KVM to switch from the
HV timer to the software timer (hrtimers).
After switching to the software timer, periodic timer expiration callbacks
may be executed consecutively within a single clock interrupt handler,
because hrtimers honors KVM's request for an expiration in the past and
immediately re-invokes KVM's callback after reprogramming. And because
the interrupt handler runs with IRQs disabled, restarting KVM's hrtimer
over and over until the target expiration is advanced to "now" can result
in a hard lockup.
E.g. the following hard lockup was triggered in the host when running a
Windows VM (only relevant because it used the APIC timer in periodic mode)
after resuming the VM from a long suspend (in the host).
NMI watchdog: Watchdog detected hard LOCKUP on cpu 45
...
RIP: 0010:advance_periodic_target_expiration+0x4d/0x80 [kvm]
...
RSP: 0018:ff4f88f5d98d8ef0 EFLAGS: 00000046
RAX: fff0103f91be678e RBX: fff0103f91be678e RCX: 00843a7d9e127bcc
RDX: 0000000000000002 RSI: 0052ca4003697505 RDI: ff440d5bfbdbd500
RBP: ff440d5956f99200 R08: ff2ff2a42deb6a84 R09: 000000000002a6c0
R10: 0122d794016332b3 R11: 0000000000000000 R12: ff440db1af39cfc0
R13: ff440db1af39cfc0 R14: ffffffffc0d4a560 R15: ff440db1af39d0f8
FS: 00007f04a6ffd700(0000) GS:ff440db1af380000(0000) knlGS:000000e38a3b8000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 000000d5651feff8 CR3: 000000684e038002 CR4: 0000000000773ee0
PKRU: 55555554
Call Trace:
<IRQ>
apic_timer_fn+0x31/0x50 [kvm]
__hrtimer_run_queues+0x100/0x280
hrtimer_interrupt+0x100/0x210
? ttwu_do_wakeup+0x19/0x160
smp_apic_timer_interrupt+0x6a/0x130
apic_timer_interrupt+0xf/0x20
</IRQ>
Moreover, if the suspend duration of the virtual machine is not long enough
to trigger a hard lockup in this scenario, since commit 98c25ead5eda
("KVM: VMX: Move preemption timer <=> hrtimer dance to common x86"), KVM
will continue using the software timer until the guest reprograms the APIC
timer in some way. Since the periodic timer does not require frequent APIC
timer register programming, the guest may continue to use the software
timer in perpetuity.
Fixes: d8f2f498d9ed ("x86/kvm: fix LAPIC timer drift when guest uses periodic mode")
Cc: stable(a)vger.kernel.org
Signed-off-by: fuqiang wang <fuqiang.wng(a)gmail.com>
[sean: massage comments and changelog]
Link: https://patch.msgid.link/20251113205114.1647493-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc(a)google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Zhang Kunbo <zhangkunbo(a)huawei.com>
---
arch/x86/kvm/lapic.c | 28 +++++++++++++++++++++++-----
1 file changed, 23 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c67610ae5e0b..9f611dc081b5 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2047,15 +2047,33 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
ktime_t delta;
/*
- * Synchronize both deadlines to the same time source or
- * differences in the periods (caused by differences in the
- * underlying clocks or numerical approximation errors) will
- * cause the two to drift apart over time as the errors
- * accumulate.
+ * Use kernel time as the time source for both the hrtimer deadline and
+ * TSC-based deadline so that they stay synchronized. Computing each
+ * deadline independently will cause the two deadlines to drift apart
+ * over time as differences in the periods accumulate, e.g. due to
+ * differences in the underlying clocks or numerical approximation errors.
*/
apic->lapic_timer.target_expiration =
ktime_add_ns(apic->lapic_timer.target_expiration,
apic->lapic_timer.period);
+
+ /*
+ * If the new expiration is in the past, e.g. because userspace stopped
+ * running the VM for an extended duration, then force the expiration
+ * to "now" and don't try to play catch-up with the missed events. KVM
+ * will only deliver a single interrupt regardless of how many events
+ * are pending, i.e. restarting the timer with an expiration in the
+ * past will do nothing more than waste host cycles, and can even lead
+ * to a hard lockup in extreme cases.
+ */
+ if (ktime_before(apic->lapic_timer.target_expiration, now))
+ apic->lapic_timer.target_expiration = now;
+
+ /*
+ * Note, ensuring the expiration isn't in the past also prevents delta
+ * from going negative, which could cause the TSC deadline to become
+ * excessively large due to it an unsigned value.
+ */
delta = ktime_sub(apic->lapic_timer.target_expiration, now);
apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
nsec_to_cycles(apic->vcpu, delta);
--
2.34.1
2
1
[PATCH OLK-5.10] KVM: x86: Fix VM hard lockup after prolonged inactivity with periodic HV timer
by Zhang Kunbo 16 Jan '26
by Zhang Kunbo 16 Jan '26
16 Jan '26
From: fuqiang wang <fuqiang.wng(a)gmail.com>
stable inclusion
from stable-v6.6.120
commit 7b54ccef865e0aa62e4871d4ada2ba4b9dcb8bed
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13419
CVE: CVE-2025-71104
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 18ab3fc8e880791aa9f7c000261320fc812b5465 upstream.
When advancing the target expiration for the guest's APIC timer in periodic
mode, set the expiration to "now" if the target expiration is in the past
(similar to what is done in update_target_expiration()). Blindly adding
the period to the previous target expiration can result in KVM generating
a practically unbounded number of hrtimer IRQs due to programming an
expired timer over and over. In extreme scenarios, e.g. if userspace
pauses/suspends a VM for an extended duration, this can even cause hard
lockups in the host.
Currently, the bug only affects Intel CPUs when using the hypervisor timer
(HV timer), a.k.a. the VMX preemption timer. Unlike the software timer,
a.k.a. hrtimer, which KVM keeps running even on exits to userspace, the
HV timer only runs while the guest is active. As a result, if the vCPU
does not run for an extended duration, there will be a huge gap between
the target expiration and the current time the vCPU resumes running.
Because the target expiration is incremented by only one period on each
timer expiration, this leads to a series of timer expirations occurring
rapidly after the vCPU/VM resumes.
More critically, when the vCPU first triggers a periodic HV timer
expiration after resuming, advancing the expiration by only one period
will result in a target expiration in the past. As a result, the delta
may be calculated as a negative value. When the delta is converted into
an absolute value (tscdeadline is an unsigned u64), the resulting value
can overflow what the HV timer is capable of programming. I.e. the large
value will exceed the VMX Preemption Timer's maximum bit width of
cpu_preemption_timer_multi + 32, and thus cause KVM to switch from the
HV timer to the software timer (hrtimers).
After switching to the software timer, periodic timer expiration callbacks
may be executed consecutively within a single clock interrupt handler,
because hrtimers honors KVM's request for an expiration in the past and
immediately re-invokes KVM's callback after reprogramming. And because
the interrupt handler runs with IRQs disabled, restarting KVM's hrtimer
over and over until the target expiration is advanced to "now" can result
in a hard lockup.
E.g. the following hard lockup was triggered in the host when running a
Windows VM (only relevant because it used the APIC timer in periodic mode)
after resuming the VM from a long suspend (in the host).
NMI watchdog: Watchdog detected hard LOCKUP on cpu 45
...
RIP: 0010:advance_periodic_target_expiration+0x4d/0x80 [kvm]
...
RSP: 0018:ff4f88f5d98d8ef0 EFLAGS: 00000046
RAX: fff0103f91be678e RBX: fff0103f91be678e RCX: 00843a7d9e127bcc
RDX: 0000000000000002 RSI: 0052ca4003697505 RDI: ff440d5bfbdbd500
RBP: ff440d5956f99200 R08: ff2ff2a42deb6a84 R09: 000000000002a6c0
R10: 0122d794016332b3 R11: 0000000000000000 R12: ff440db1af39cfc0
R13: ff440db1af39cfc0 R14: ffffffffc0d4a560 R15: ff440db1af39d0f8
FS: 00007f04a6ffd700(0000) GS:ff440db1af380000(0000) knlGS:000000e38a3b8000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 000000d5651feff8 CR3: 000000684e038002 CR4: 0000000000773ee0
PKRU: 55555554
Call Trace:
<IRQ>
apic_timer_fn+0x31/0x50 [kvm]
__hrtimer_run_queues+0x100/0x280
hrtimer_interrupt+0x100/0x210
? ttwu_do_wakeup+0x19/0x160
smp_apic_timer_interrupt+0x6a/0x130
apic_timer_interrupt+0xf/0x20
</IRQ>
Moreover, if the suspend duration of the virtual machine is not long enough
to trigger a hard lockup in this scenario, since commit 98c25ead5eda
("KVM: VMX: Move preemption timer <=> hrtimer dance to common x86"), KVM
will continue using the software timer until the guest reprograms the APIC
timer in some way. Since the periodic timer does not require frequent APIC
timer register programming, the guest may continue to use the software
timer in perpetuity.
Fixes: d8f2f498d9ed ("x86/kvm: fix LAPIC timer drift when guest uses periodic mode")
Cc: stable(a)vger.kernel.org
Signed-off-by: fuqiang wang <fuqiang.wng(a)gmail.com>
[sean: massage comments and changelog]
Link: https://patch.msgid.link/20251113205114.1647493-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc(a)google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Zhang Kunbo <zhangkunbo(a)huawei.com>
---
arch/x86/kvm/lapic.c | 28 +++++++++++++++++++++++-----
1 file changed, 23 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index d6272f97368f..4386fd13c12b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1793,15 +1793,33 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
ktime_t delta;
/*
- * Synchronize both deadlines to the same time source or
- * differences in the periods (caused by differences in the
- * underlying clocks or numerical approximation errors) will
- * cause the two to drift apart over time as the errors
- * accumulate.
+ * Use kernel time as the time source for both the hrtimer deadline and
+ * TSC-based deadline so that they stay synchronized. Computing each
+ * deadline independently will cause the two deadlines to drift apart
+ * over time as differences in the periods accumulate, e.g. due to
+ * differences in the underlying clocks or numerical approximation errors.
*/
apic->lapic_timer.target_expiration =
ktime_add_ns(apic->lapic_timer.target_expiration,
apic->lapic_timer.period);
+
+ /*
+ * If the new expiration is in the past, e.g. because userspace stopped
+ * running the VM for an extended duration, then force the expiration
+ * to "now" and don't try to play catch-up with the missed events. KVM
+ * will only deliver a single interrupt regardless of how many events
+ * are pending, i.e. restarting the timer with an expiration in the
+ * past will do nothing more than waste host cycles, and can even lead
+ * to a hard lockup in extreme cases.
+ */
+ if (ktime_before(apic->lapic_timer.target_expiration, now))
+ apic->lapic_timer.target_expiration = now;
+
+ /*
+ * Note, ensuring the expiration isn't in the past also prevents delta
+ * from going negative, which could cause the TSC deadline to become
+ * excessively large due to it an unsigned value.
+ */
delta = ktime_sub(apic->lapic_timer.target_expiration, now);
apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
nsec_to_cycles(apic->vcpu, delta);
--
2.34.1
2
1
[PATCH openEuler-1.0-LTS] scsi: libsas: Fix use-after-free bug in smp_execute_task_sg()
by Wupeng Ma 16 Jan '26
by Wupeng Ma 16 Jan '26
16 Jan '26
From: Duoming Zhou <duoming(a)zju.edu.cn>
stable inclusion
from stable-v5.10.150
commit a9e5176ead6de64f572ad5c87a72825d9d3c82ae
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8336
CVE: CVE-2022-50422
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 46ba53c30666717cb06c2b3c5d896301cd00d0c0 ]
When executing SMP task failed, the smp_execute_task_sg() calls del_timer()
to delete "slow_task->timer". However, if the timer handler
sas_task_internal_timedout() is running, the del_timer() in
smp_execute_task_sg() will not stop it and a UAF will happen. The process
is shown below:
(thread 1) | (thread 2)
smp_execute_task_sg() | sas_task_internal_timedout()
... |
del_timer() |
... | ...
sas_free_task(task) |
kfree(task->slow_task) //FREE|
| task->slow_task->... //USE
Fix by calling del_timer_sync() in smp_execute_task_sg(), which makes sure
the timer handler have finished before the "task->slow_task" is
deallocated.
Link: https://lore.kernel.org/r/20220920144213.10536-1-duoming@zju.edu.cn
Fixes: 2908d778ab3e ("[SCSI] aic94xx: new driver")
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Duoming Zhou <duoming(a)zju.edu.cn>
Signed-off-by: Martin K. Petersen <martin.petersen(a)oracle.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Conflicts:
drivers/scsi/libsas/sas_expander.c
[Wupeng Ma: context conflicts]
Signed-off-by: Wupeng Ma <mawupeng1(a)huawei.com>
---
drivers/scsi/libsas/sas_expander.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 911d0e1e8e3fc..7b46f3b2b7710 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -98,7 +98,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
SAS_DPRINTK("executing SMP task failed:%d\n", res);
break;
}
--
2.43.0
2
1
scsi: qla2xxx: Array index may go out of bound
Stable-dep-of: d721b591b95c ("scsi: qla2xxx: Array index may go out of bound")
Nilesh Javali (1):
scsi: qla2xxx: Array index may go out of bound
Ye Bin (1):
scsi: qla2xxx: Fix inconsistent format argument type in qla_os.c
drivers/scsi/qla2xxx/qla_os.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
--
2.25.1
2
3
[PATCH OLK-6.6] macintosh/mac_hid: fix race condition in mac_hid_toggle_emumouse
by Yi Yang 16 Jan '26
by Yi Yang 16 Jan '26
16 Jan '26
From: Long Li <leo.lilong(a)huawei.com>
stable inclusion
from stable-v6.6.120
commit 61abf8c3162d155b4fd0fb251f08557093363a0a
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12749
CVE: CVE-2025-68367
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 1e4b207ffe54cf33a4b7a2912c4110f89c73bf3f ]
The following warning appears when running syzkaller, and this issue also
exists in the mainline code.
------------[ cut here ]------------
list_add double add: new=ffffffffa57eee28, prev=ffffffffa57eee28, next=ffffffffa5e63100.
WARNING: CPU: 0 PID: 1491 at lib/list_debug.c:35 __list_add_valid_or_report+0xf7/0x130
Modules linked in:
CPU: 0 PID: 1491 Comm: syz.1.28 Not tainted 6.6.0+ #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
RIP: 0010:__list_add_valid_or_report+0xf7/0x130
RSP: 0018:ff1100010dfb7b78 EFLAGS: 00010282
RAX: 0000000000000000 RBX: ffffffffa57eee18 RCX: ffffffff97fc9817
RDX: 0000000000040000 RSI: ffa0000002383000 RDI: 0000000000000001
RBP: ffffffffa57eee28 R08: 0000000000000001 R09: ffe21c0021bf6f2c
R10: 0000000000000001 R11: 6464615f7473696c R12: ffffffffa5e63100
R13: ffffffffa57eee28 R14: ffffffffa57eee28 R15: ff1100010dfb7d48
FS: 00007fb14398b640(0000) GS:ff11000119600000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000000000000 CR3: 000000010d096005 CR4: 0000000000773ef0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 80000000
Call Trace:
<TASK>
input_register_handler+0xb3/0x210
mac_hid_start_emulation+0x1c5/0x290
mac_hid_toggle_emumouse+0x20a/0x240
proc_sys_call_handler+0x4c2/0x6e0
new_sync_write+0x1b1/0x2d0
vfs_write+0x709/0x950
ksys_write+0x12a/0x250
do_syscall_64+0x5a/0x110
entry_SYSCALL_64_after_hwframe+0x78/0xe2
The WARNING occurs when two processes concurrently write to the mac-hid
emulation sysctl, causing a race condition in mac_hid_toggle_emumouse().
Both processes read old_val=0, then both try to register the input handler,
leading to a double list_add of the same handler.
CPU0 CPU1
------------------------- -------------------------
vfs_write() //write 1 vfs_write() //write 1
proc_sys_write() proc_sys_write()
mac_hid_toggle_emumouse() mac_hid_toggle_emumouse()
old_val = *valp // old_val=0
old_val = *valp // old_val=0
mutex_lock_killable()
proc_dointvec() // *valp=1
mac_hid_start_emulation()
input_register_handler()
mutex_unlock()
mutex_lock_killable()
proc_dointvec()
mac_hid_start_emulation()
input_register_handler() //Trigger Warning
mutex_unlock()
Fix this by moving the old_val read inside the mutex lock region.
Fixes: 99b089c3c38a ("Input: Mac button emulation - implement as an input filter")
Signed-off-by: Long Li <leo.lilong(a)huawei.com>
Signed-off-by: Madhavan Srinivasan <maddy(a)linux.ibm.com>
Link: https://patch.msgid.link/20250819091035.2263329-1-leo.lilong@huaweicloud.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yi Yang <yiyang13(a)huawei.com>
---
drivers/macintosh/mac_hid.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index d8c4d5664145..44e332ee99d3 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -186,13 +186,14 @@ static int mac_hid_toggle_emumouse(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
- int old_val = *valp;
+ int old_val;
int rc;
rc = mutex_lock_killable(&mac_hid_emumouse_mutex);
if (rc)
return rc;
+ old_val = *valp;
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (rc == 0 && write && *valp != old_val) {
--
2.25.1
2
1
[PATCH openEuler-1.0-LTS] mmc: via-sdmmc: fix return value check of mmc_add_host()
by Yi Yang 16 Jan '26
by Yi Yang 16 Jan '26
16 Jan '26
From: Yang Yingliang <yangyingliang(a)huawei.com>
stable inclusion
from stable-v4.19.270
commit 95025a8dd0ec015872f6c16473fe04d6264e68ca
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13164
CVE: CVE-2022-50846
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit e4e46fb61e3bb4628170810d3f2b996b709b90d9 ]
mmc_add_host() may return error, if we ignore its return value,
it will lead two issues:
1. The memory that allocated in mmc_alloc_host() is leaked.
2. In the remove() path, mmc_remove_host() will be called to
delete device, but it's not added yet, it will lead a kernel
crash because of null-ptr-deref in device_del().
Fix this by checking the return value and goto error path which
will call mmc_free_host().
Fixes: f0bf7f61b840 ("mmc: Add new via-sdmmc host controller driver")
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Link: https://lore.kernel.org/r/20221108130949.1067699-1-yangyingliang@huawei.com
Signed-off-by: Ulf Hansson <ulf.hansson(a)linaro.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yi Yang <yiyang13(a)huawei.com>
---
drivers/mmc/host/via-sdmmc.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 32c4211506fc..a86e4b3b4060 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1158,7 +1158,9 @@ static int via_sd_probe(struct pci_dev *pcidev,
pcidev->subsystem_device == 0x3891)
sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto unmap;
return 0;
--
2.25.1
2
1
[PATCH openEuler-1.0-LTS] fbcon: Fix the issue of uninitialized charcount in the remaining consoles
by Luo Gengkun 15 Jan '26
by Luo Gengkun 15 Jan '26
15 Jan '26
HULK inclusion
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12587
CVE: NA
----------------------------------------------------------------------
After commit 054a54161b88 ("fbdev: bitblit: bound-check glyph index in
bit_putcs*") was merged, using alt+ctrl+f1 to switch the tty from tty0 to
tty1 results in garbled display.
The reason is the vc->vc_font.charcount is 0, it is clearly an
uninitialized value. The mainline is fine because commit a1ac250a82a5
("fbcon: Avoid using FNTCHARCNT() and hard-coded built-in font charcount")
assigns the fvc->vc_font.charcount to vc->vc_font.charcount.
Fixes: f9a6134c4b54 ("fbdev: bitblit: bound-check glyph index in bit_putcs*")
Signed-off-by: Luo Gengkun <luogengkun2(a)huawei.com>
---
drivers/video/fbdev/core/fbcon.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 83a4949e2497..4043b0b19140 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1032,6 +1032,7 @@ static void fbcon_init(struct vc_data *vc, int init)
fvc->vc_font.data);
vc->vc_font.width = fvc->vc_font.width;
vc->vc_font.height = fvc->vc_font.height;
+ vc->vc_font.charcount = fvc->vc_font.charcount;
p->userfont = t->userfont;
if (p->userfont)
--
2.34.1
2
1
[PATCH openEuler-1.0-LTS] fbcon: Fix the issue of uninitialized charcount in the remaining consoles
by Luo Gengkun 15 Jan '26
by Luo Gengkun 15 Jan '26
15 Jan '26
Offering: HULK
hulk inclusion
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12587
----------------------------------------------------------------------
After commit 054a54161b88 ("fbdev: bitblit: bound-check glyph index in
bit_putcs*") was merged, using alt+ctrl+f1 to switch the tty from tty0 to
tty1 results in garbled display.
The reason is the vc->vc_font.charcount is 0, it is clearly an
uninitialized value. The mainline is fine because commit a1ac250a82a5
("fbcon: Avoid using FNTCHARCNT() and hard-coded built-in font charcount")
assigns the fvc->vc_font.charcount to vc->vc_font.charcount.
Fixes: f9a6134c4b54 ("fbdev: bitblit: bound-check glyph index in bit_putcs*")
Signed-off-by: Luo Gengkun <luogengkun2(a)huawei.com>
---
drivers/video/fbdev/core/fbcon.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 83a4949e2497..4043b0b19140 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1032,6 +1032,7 @@ static void fbcon_init(struct vc_data *vc, int init)
fvc->vc_font.data);
vc->vc_font.width = fvc->vc_font.width;
vc->vc_font.height = fvc->vc_font.height;
+ vc->vc_font.charcount = fvc->vc_font.charcount;
p->userfont = t->userfont;
if (p->userfont)
--
2.34.1
2
1
15 Jan '26
From: kuangkai <kuangkai(a)kylinos.cn>
kylin inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8354
CVE: NA
--------------------------
Commit daa2c7d2bbd0 ("mfs: Add prefetch demo") use the pthread_create but not declared and linked,
this cause compile mfs/tool with make fail:
# cd tools/mfs && make
mfsd_prefetch.cpp:118:23: error: 'pthread_create' was not declared in this scope
Signed-off-by: kuangkai <kuangkai(a)kylinos.cn>
---
tools/mfs/Makefile | 2 +-
tools/mfs/mfsd_prefetch.cpp | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/tools/mfs/Makefile b/tools/mfs/Makefile
index f99c9cde828f..3fc331c7ebde 100644
--- a/tools/mfs/Makefile
+++ b/tools/mfs/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for mfs demo
-CFLAGS = -Wall -Wextra
+CFLAGS = -Wall -Wextra -lpthread
PROGS := mfsd mfsd_prefetch
diff --git a/tools/mfs/mfsd_prefetch.cpp b/tools/mfs/mfsd_prefetch.cpp
index d35aeb31941a..a26b271b3a00 100644
--- a/tools/mfs/mfsd_prefetch.cpp
+++ b/tools/mfs/mfsd_prefetch.cpp
@@ -23,6 +23,7 @@
#include <sys/statfs.h>
#include <sys/stat.h>
#include <sys/mman.h>
+#include <pthread.h>
#include <map>
#include <string>
--
2.39.2 (Apple Git-143)
1
0
[PATCH OLK-6.6 0/1] cpufreq/sched: Explicitly synchronize limits_changed flag handling
by Lifeng Zheng 15 Jan '26
by Lifeng Zheng 15 Jan '26
15 Jan '26
From: Hongye Lin <linhongye(a)h-partners.com>
driver inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8343
----------------------------------------------------------------------
Rafael J. Wysocki (1):
cpufreq/sched: Explicitly synchronize limits_changed flag handling
kernel/sched/cpufreq_schedutil.c | 28 ++++++++++++++++++++++++----
1 file changed, 24 insertions(+), 4 deletions(-)
--
2.33.0
2
2
This series fixes kmemleak false warnings when reserving
crash kernel memory.
Patch 1 moves kmemleak_ignore_phys() to reserve_crashkernel_high()
where memory is actually tracked by kmemleak.
Patch 2 ensures kmemleak_ignore_phys() is only called when new memory
is allocated, not when crash_base is pre-set via cmdline.
ZhangPeng (2):
arm64: kdump: fix kmemleak unknown object warning
arm64: kdump: fix kmemleak unknown object warning when crash base is
set
kernel/crash_core.c | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
--
2.33.0
2
3
15 Jan '26
driver inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8310
CVE: NA
-----------------------------------------------
hw_metric events should have same pmus.
Check the event types and event pmus for event consistency.
Signed-off-by: Yushan Wang <wangyushan12(a)huawei.com>
Signed-off-by: Ying Jiang <jiangying44(a)h-partners.com>
Signed-off-by: jiangying <jiangying44(a)h-partners.com>
---
drivers/perf/arm_pmuv3.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 5d750678a70e..4f27c75cfdfa 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -1159,7 +1159,8 @@ static int armv8pmu_check_hw_metric_event(struct pmu_hw_events *cpuc,
if (event == leader)
return 0;
- if (!armv8pmu_event_is_hw_metric(leader))
+ if (leader->pmu != event->pmu ||
+ !armv8pmu_event_is_hw_metric(leader))
return -EINVAL;
for_each_sibling_event(sibling, leader) {
--
2.33.0
2
1
Patch #1 introduces the OBJECT_PHYS flag and create_object_phys()
for physical address-allocated kmemleak objects.
Patch #2 adds object_phys_tree_root to store physical address objects
and uses raw physical addresses instead of virtual addresses.
Patch #3 fixes false positives in KFENCE by using kmemleak_ignore_phys()
for early memblock-allocated pools.
Patch #4 fixes similar false positives in percpu by replacing
kmemleak_free() with kmemleak_ignore_phys().
Patrick Wang (3):
mm: kmemleak: add OBJECT_PHYS flag for objects allocated with physical
address
mm: kmemleak: add rbtree and store physical address for objects
allocated with PA
mm: percpu: use kmemleak_ignore_phys() instead of kmemleak_free()
Yee Lee (1):
mm: kfence: apply kmemleak_ignore_phys on early allocated pool
mm/kfence/core.c | 18 ++---
mm/kmemleak.c | 166 +++++++++++++++++++++++++++++++++--------------
mm/percpu.c | 6 +-
3 files changed, 131 insertions(+), 59 deletions(-)
--
2.33.0
2
5
hulk inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8232
--------------------------------
Add user manual for XSched.
Signed-off-by: Liu Kai <liukai284(a)huawei.com>
---
Documentation/scheduler/xsched.md | 355 ++++++++++++++++++++++++++++++
1 file changed, 355 insertions(+)
create mode 100644 Documentation/scheduler/xsched.md
diff --git a/Documentation/scheduler/xsched.md b/Documentation/scheduler/xsched.md
new file mode 100644
index 000000000000..89ef01d42fc0
--- /dev/null
+++ b/Documentation/scheduler/xsched.md
@@ -0,0 +1,355 @@
+# XSched 用户指南
+
+## 1 环境部署
+
+### 1.1 安装 NPU 原生软件栈
+
+本特性依赖 NPU 原生软件栈,需要安装 NPU driver、firmware、CANN
+
+### 1.2 安装XSched内核
+
+#### 1.2.1 获取内核源码
+
+```bash
+# 下载源码
+git clone https://atomgit.com/openeuler/kernel.git -b OLK-6.6 OLK-6.6
+
+# 进入内核目录
+cd OLK-6.6
+```
+
+#### 1.2.2 安装内核
+
+##### 1.2.2.1 修改内核配置
+
+```shell
+# 生成默认配置
+make openeuler_defconfig
+
+# 修改并保存配置
+vim .config
+
+CONFIG_XCU_SCHEDULER=y
+CONFIG_XCU_VSTREAM=y
+CONFIG_XSCHED_NR_CUS=8 # 根据 NPU 卡的数量配置
+CONFIG_XCU_SCHED_RT=y
+CONFIG_XCU_SCHED_CFS=y
+CONFIG_CGROUP_XCU=y
+```
+
+##### 1.2.2.2 编译安装
+
+以下方式**二选一**:
+
+* 源码编译安装
+
+ ```bash
+ make clean && make -j$(nproc)
+ make modules_install -j$(nproc) INSTALL_MOD_STRIP=1 && INSTALL_MOD_STRIP=1 make install
+ grub2-set-default 0
+ ```
+
+* RPM 包编译安装
+
+ ```bash
+ INSTALL_MOD_STRIP=1 make rpm-pkg -j `nproc`
+ # kernel 和 devel 包都要安装,否则会导致驱动无法重编
+ rpm -ivh <path_to_rpm> --force
+ grub2-set-default 0
+ ```
+
+##### 1.2.2.3 修改 cmdline
+
+```shell
+# 修改内核引导文件,根据实际情况编辑
+vim /etc/grub2-efi.cfg
+
+# 在XSched内核新增 cmdline 配置,关闭驱动签名校验、开启cgroup-v2
+module.sig_enforce=0 systemd.unified_cgroup_hierarchy=1 cgroup_no_v1=all
+```
+
+保存引导文件后,重启切换内核
+
+### 1.3 重编驱动
+
+**!!需先安装好原生驱动!!**,`npu-smi info` 检查驱动是否安装成功
+
+#### 1.3.1 修改驱动
+
+获取驱动源码
+
+```shell
+mkdir <new_driver_dir>
+
+./Ascend-hdk-910b-npu-driver_xx.x.x_linux-aarch64.run --tar -xvf -C <new_driver_dir>
+```
+
+下载驱动补丁 [XSched driver patch](https://gitee.com/openeuler/kernel/commit/8d50448f11b697a177b63d3ccb… 适配 NPU 驱动
+
+```shell
+cp 0001-Adapt-910b-npu-driver-for-xsched.patch <new_driver_dir>/driver/kernel/
+cd <new_driver_dir>/driver/kernel/
+
+# 初始化 git 仓库并应用补丁
+git init
+git add .
+git commit -m "npu init"
+# 如果有冲突则根据 .rej 文件适配冲突代码
+git am 0001-Adapt-910b-npu-driver-for-xsched.patch --reject
+```
+
+#### 1.3.2 替换驱动
+
+```shell
+# 返回驱动根目录
+cd ../../
+
+# 备份原生驱动 ko
+cp -r <new_driver_dir>/driver/host <new_driver_dir>/driver/host-bak
+rm -f <new_driver_dir>/driver/host/*
+
+cd <new_driver_dir>/driver/script
+# 如果强制重编则增加 --force 参数,驱动 ko 会生成到 <new_driver_dir>/driver/host/
+sh run_driver_ko_rebuild.sh [--force]
+
+# 替换驱动
+for line in `ls /lib/modules/$(uname -r)/updates`; do \cp ../host/$line /lib/modules/$(uname -r)/updates; done
+```
+
+#### 1.3.3 检查驱动
+
+重启后检查驱动是否替换成功
+
+```shell
+reboot
+npu-smi info
+```
+
+### 1.4 libXSched
+
+XSched 用户态拦截层,拦截 CANN API 并转发到 XSched 内核
+
+#### 1.4.1 编译
+
+获取源码
+
+```bash
+git clone https://atomgit.com/openeuler/libXSched.git
+```
+
+编译源码
+
+```bash
+# 准备头文件
+cp OLK-6.6/include/uapi/linux/xsched/xcu_vstream.h /usr/include/linux
+
+# 编译生成 libucc_engine.so
+make clean && make
+```
+
+#### 1.4.2 使用
+
+使用 `LD_PRELOAD` 加载 `libucc_engine.so`
+
+```bash
+LD_PRELOAD=<path_to_libucc> <run_model_script>
+```
+
+### 1.5 验证
+
+```shell
+# 开启 XSched 日志
+echo "file kernel/xsched/* +p" > /sys/kernel/debug/dynamic_debug/control
+
+# pytorch 框架
+LD_PRELOAD=<path_to_libucc> python3 -c "import torch;import torch_npu; a = torch.randn(3, 4).npu(); print(a + a);"
+
+# mindspore 框架
+LD_PRELOAD=<path_to_libucc> python3 -c "import mindspore;mindspore.set_device('Ascend');mindspore.run_check()"
+```
+
+`dmesg` 查看内核日志,检查是否有 XSCHED 日志,有日志则说明 XSched 环境部署成功,验证完后关闭 XSched 日志 `echo "file kernel/xsched/* -p" > /sys/kernel/debug/dynamic_debug/control`
+
+## 2 使用指南
+
+### 2.1 Cgroup 接口
+
+#### 2.1.1 配置 XSched 策略
+
+使能 XSched 控制器
+
+```bash
+echo "+xcu" > /sys/fs/cgroup/cgroup.subtree_control
+```
+
+##### 2.1.1.1 可配置接口
+
+* `xcu.sched_class`:配置调度类,cfs 或 rt
+
+* `xcu.period_ms`:配置获取算力资源时间片周期,默认 100ms(需先设置为 cfs 调度类)
+
+* `xcu.quota_ms`:配置周期内可分配的时间片,比如配置 50ms,则每 100ms 内可分配 50ms,-1 则是不管控算力资源(需先设置为 cfs 调度类)
+
+* `xcu.shares`:配置 cfs 任务的权重,权重越大,相比于其他任务的优先级越高,默认为 1024(需先设置为 cfs 调度类)
+
+* `xcu.stat`:查看统计信息,仅支持 cfs 调度类
+
+##### 2.1.1.2 Host 使用方法
+
+在主机侧使用 XSched 管控 ai 任务,需手动创建 cgroup 组
+
+```bash
+mkdir -p /sys/fs/cgroup/xsched_group
+
+# 按需配置 XSched 策略,例如 echo cfs > /sys/fs/cgroup/xsched_group/xcu.sched_class
+echo <config_value> > /sys/fs/cgroup/xsched_group/xcu.<config_item>
+```
+
+将 ai 任务加入到 cgroup 中管控算力资源
+
+```bash
+# 运行 ai 任务
+LD_PRELOAD=<path_to_libucc> <run_model_script>
+
+# 加入 ai 任务到 cgroup
+echo <pid> > /sys/fs/cgroup/xsched_group/cgroup.procs
+```
+
+##### 2.1.1.3 Docker 使用方法
+
+启动 docker 容器时会自动挂载到 cgroup 目录(`/sys/fs/cgroup/user.slice`)下,docker 内的任务的 pid 也会自动添加到 cgroup.procs 下,无需手动创建 cgroup 组和添加任务
+
+1. 修改 docker 配置,修改挂载的 cgroup 目录和 docker 的 cgroup 版本为 v2(**docker 需升级到支持 cgroup-v2 的版本**)
+
+ ```bash
+ vim /etc/docker/daemon.json
+
+ # 添加以下配置
+ # 挂载的 cgroup 目录,目录名任意,以 docker.slice 为例
+ "cgroup-parent": "docker.slice"
+ # 使用 cgroup-v2 管理 docker
+ "exec-opts": ["native.cgroupdriver=systemd"]
+
+ # 修改保存后重启 docker 服务
+ systemctl restart docker
+ ```
+
+ 因为 docker 默认的挂载目录是 `/sys/fs/cgroup/user.slice` ,`user.slice` 下还会有其他的用户任务在运行,可能会无法修改 `user.slice` 的 XSched 配置,所以需要修改 docker 的挂载目录为 `/sys/fs/cgroup/docker.slice` 方便配置和管理
+
+2. 配置 docker 的 xsched 调度策略
+
+ ```bash
+ # 修改调度类为 cfs
+ echo cfs > /sys/fs/cgroup/docker.slice/xcu.sched_class
+
+ # 启动容器
+ docker start <container_name>
+
+ # 配置对应容器的 xsched 策略
+ echo <config_value> > /sys/fs/cgroup/docker.slice/docker-<docker_id>/xcu.<config_item>
+
+ # 进入容器
+ docker exec -it <container_name> bash
+
+ # 运行 ai 任务(容器内也有 libucc_engine.so),pid 会自动添加到 /sys/fs/cgroup/docker.slice/docker-<docker_id>/cgroup.procs
+ LD_PRELOAD=<path_to_libucc> <run_model_script>
+ ```
+
+### 2.2 Syscall 接口
+
+提供两个 syscall 接口用于设置/获取 rt 调度类的任务优先级(`xsched_setattr/xsched_getattr`)
+
+* `pid`:ai 任务 pid,pid=0 表示当前进程
+
+* `struct xsched_attr`:XSched 可配置属性
+
+ ```c
+ struct xsched_attr {
+ __u32 xsched_class; // 调度类,当前仅支持 RT(xsched_class=0)
+ __u32 xsched_priority; // rt 优先级,可配置范围 [1,5],数字越大,优先级越高
+ };
+ ```
+
+调用 syscall 接口示例程序:
+
+```c
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/syscall.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+// Define xsched_attr structure
+struct xsched_attr {
+ uint32_t xsched_class; // Scheduling class: XSCHED_TYPE_RT, XSCHED_TYPE_CFS
+ uint32_t xsched_priority; // Priority level: 1~5
+};
+
+// Scheduling class constants (should match kernel header definitions)
+#define XSCHED_TYPE_RT 0 // Real-time scheduling class
+#define XSCHED_TYPE_CFS 1 // Completely Fair Scheduler class
+
+// System call numbers (update these with actual numbers)
+#define __NR_xsched_setattr 467
+#define __NR_xsched_getattr 468
+
+// Wrapper functions for system calls
+static inline long xsched_setattr(pid_t pid, const struct xsched_attr *attr)
+{
+ return syscall(__NR_xsched_setattr, pid, attr);
+}
+
+static inline long xsched_getattr(pid_t pid, struct xsched_attr *attr)
+{
+ return syscall(__NR_xsched_getattr, pid, attr);
+}
+
+void main(void)
+{
+ pid_t pid = getpid();
+ struct xsched_attr attr;
+ long ret;
+
+ // Get current configuration
+ ret = xsched_getattr(pid, &attr);
+ if (ret < 0) {
+ printf("Failed to get configuration!\n");
+ }
+
+ attr.xsched_class = XSCHED_TYPE_RT;
+ attr.xsched_priority = 3;
+
+ ret = xsched_setattr(pid, &attr);
+ if (ret < 0) {
+ printf("Failed to set configuration\n");
+ }
+
+ // Verify the configuration
+ memset(&attr, 0, sizeof(attr));
+ ret = xsched_getattr(pid, &attr);
+ if (ret < 0) {
+ printf("Failed to verify configuration!\n");
+ }
+}
+```
+
+### 2.3 调试日志
+
+开启/关闭 XSched 调试日志
+
+```shell
+echo "file kernel/xsched/* +p" > /sys/kernel/debug/dynamic_debug/control
+echo "file kernel/xsched/* -p" > /sys/kernel/debug/dynamic_debug/control
+```
+
+开启/关闭 NPU 驱动调试日志
+
+```shell
+echo "module ascend_trs_core +p" > /sys/kernel/debug/dynamic_debug/control
+echo "module ascend_trs_pm_adapt +p" > /sys/kernel/debug/dynamic_debug/control
+echo "module ascend_trs_core -p" > /sys/kernel/debug/dynamic_debug/control
+echo "module ascend_trs_pm_adapt -p" > /sys/kernel/debug/dynamic_debug/control
+```
--
2.34.1
2
1
14 Jan '26
From: David Vernet <void(a)manifault.com>
mainline inclusion
from mainline-v6.7-rc1
commit d6247ecb6c1e17d7a33317090627f5bfe563cbb2
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8335
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
----------------------------------------------------------------------
BPF supports creating high resolution timers using bpf_timer_* helper
functions. Currently, only the BPF_F_TIMER_ABS flag is supported, which
specifies that the timeout should be interpreted as absolute time. It
would also be useful to be able to pin that timer to a core. For
example, if you wanted to make a subset of cores run without timer
interrupts, and only have the timer be invoked on a single core.
This patch adds support for this with a new BPF_F_TIMER_CPU_PIN flag.
When specified, the HRTIMER_MODE_PINNED flag is passed to
hrtimer_start(). A subsequent patch will update selftests to validate.
Signed-off-by: David Vernet <void(a)manifault.com>
Signed-off-by: Daniel Borkmann <daniel(a)iogearbox.net>
Acked-by: Song Liu <song(a)kernel.org>
Acked-by: Hou Tao <houtao1(a)huawei.com>
Link: https://lore.kernel.org/bpf/20231004162339.200702-2-void@manifault.com
Signed-off-by: Luo Gengkun <luogengkun2(a)huawei.com>
---
include/uapi/linux/bpf.h | 4 ++++
kernel/bpf/helpers.c | 5 ++++-
tools/include/uapi/linux/bpf.h | 4 ++++
3 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 782d56c4ebca..3fa3f9b1a052 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5122,6 +5122,8 @@ union bpf_attr {
* **BPF_F_TIMER_ABS**
* Start the timer in absolute expire value instead of the
* default relative one.
+ * **BPF_F_TIMER_CPU_PIN**
+ * Timer will be pinned to the CPU of the caller.
*
* Return
* 0 on success.
@@ -7346,9 +7348,11 @@ struct bpf_core_relo {
* Flags to control bpf_timer_start() behaviour.
* - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
* relative to current time.
+ * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
*/
enum {
BPF_F_TIMER_ABS = (1ULL << 0),
+ BPF_F_TIMER_CPU_PIN = (1ULL << 1),
};
/* BPF numbers iterator state */
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index f40085f4de31..c889a9dee6c6 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1363,7 +1363,7 @@ BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, fla
if (in_nmi())
return -EOPNOTSUPP;
- if (flags > BPF_F_TIMER_ABS)
+ if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
return -EINVAL;
__bpf_spin_lock_irqsave(&timer->lock);
t = timer->timer;
@@ -1377,6 +1377,9 @@ BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, fla
else
mode = HRTIMER_MODE_REL_SOFT;
+ if (flags & BPF_F_TIMER_CPU_PIN)
+ mode |= HRTIMER_MODE_PINNED;
+
hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
out:
__bpf_spin_unlock_irqrestore(&timer->lock);
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index a457d4f82da2..fc0ed3b6abf1 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -5122,6 +5122,8 @@ union bpf_attr {
* **BPF_F_TIMER_ABS**
* Start the timer in absolute expire value instead of the
* default relative one.
+ * **BPF_F_TIMER_CPU_PIN**
+ * Timer will be pinned to the CPU of the caller.
*
* Return
* 0 on success.
@@ -7349,9 +7351,11 @@ struct bpf_core_relo {
* Flags to control bpf_timer_start() behaviour.
* - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
* relative to current time.
+ * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
*/
enum {
BPF_F_TIMER_ABS = (1ULL << 0),
+ BPF_F_TIMER_CPU_PIN = (1ULL << 1),
};
/* BPF numbers iterator state */
--
2.34.1
1
414
Eugene Korenevsky (1):
cifs: parse_dfs_referrals: prevent oob on malformed input
Paulo Alcantara (1):
smb: client: fix return value of parse_dfs_referrals()
fs/smb/client/misc.c | 21 +++++++++++++++++++--
fs/smb/client/smb2ops.c | 6 +++---
2 files changed, 22 insertions(+), 5 deletions(-)
--
2.34.3
2
3
[PATCH OLK-6.6] cifs: client: fix memory leak in smb3_fs_context_parse_param
by Wang Zhaolong 14 Jan '26
by Wang Zhaolong 14 Jan '26
14 Jan '26
From: Edward Adam Davis <eadavis(a)qq.com>
stable inclusion
from stable-v6.6.117
commit 868fc62811d3fabcf5685e14f36377a855d5412d
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12582
CVE: CVE-2025-40268
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit e8c73eb7db0a498cd4b22d2819e6ab1a6f506bd6 upstream.
The user calls fsconfig twice, but when the program exits, free() only
frees ctx->source for the second fsconfig, not the first.
Regarding fc->source, there is no code in the fs context related to its
memory reclamation.
To fix this memory leak, release the source memory corresponding to ctx
or fc before each parsing.
syzbot reported:
BUG: memory leak
unreferenced object 0xffff888128afa360 (size 96):
backtrace (crc 79c9c7ba):
kstrdup+0x3c/0x80 mm/util.c:84
smb3_fs_context_parse_param+0x229b/0x36c0 fs/smb/client/fs_context.c:1444
BUG: memory leak
unreferenced object 0xffff888112c7d900 (size 96):
backtrace (crc 79c9c7ba):
smb3_fs_context_fullpath+0x70/0x1b0 fs/smb/client/fs_context.c:629
smb3_fs_context_parse_param+0x2266/0x36c0 fs/smb/client/fs_context.c:1438
Reported-by: syzbot+72afd4c236e6bc3f4bac(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=72afd4c236e6bc3f4bac
Cc: stable(a)vger.kernel.org
Reviewed-by: Paulo Alcantara (Red Hat) <pc(a)manguebit.org>
Signed-off-by: Edward Adam Davis <eadavis(a)qq.com>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/client/fs_context.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 137d03781d52..cf233cb9c194 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -1359,16 +1359,18 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
goto cifs_parse_mount_err;
default:
cifs_errorf(fc, "Unknown error parsing devname\n");
goto cifs_parse_mount_err;
}
+ kfree(ctx->source);
ctx->source = smb3_fs_context_fullpath(ctx, '/');
if (IS_ERR(ctx->source)) {
ctx->source = NULL;
cifs_errorf(fc, "OOM when copying UNC string\n");
goto cifs_parse_mount_err;
}
+ kfree(fc->source);
fc->source = kstrdup(ctx->source, GFP_KERNEL);
if (fc->source == NULL) {
cifs_errorf(fc, "OOM when copying UNC string\n");
goto cifs_parse_mount_err;
}
--
2.34.3
2
1
[PATCH OLK-6.6] smb: client: fix potential cfid UAF in smb2_query_info_compound
by Wang Zhaolong 14 Jan '26
by Wang Zhaolong 14 Jan '26
14 Jan '26
From: Henrique Carvalho <henrique.carvalho(a)suse.com>
stable inclusion
from stable-v6.6.117
commit 939c4e33005e2a56ea8fcedddf0da92df864bd3b
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11308
CVE: CVE-2025-40320
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 5c76f9961c170552c1d07c830b5e145475151600 upstream.
When smb2_query_info_compound() retries, a previously allocated cfid may
have been freed in the first attempt.
Because cfid wasn't reset on replay, later cleanup could act on a stale
pointer, leading to a potential use-after-free.
Reinitialize cfid to NULL under the replay label.
Example trace (trimmed):
refcount_t: underflow; use-after-free.
WARNING: CPU: 1 PID: 11224 at ../lib/refcount.c:28 refcount_warn_saturate+0x9c/0x110
[...]
RIP: 0010:refcount_warn_saturate+0x9c/0x110
[...]
Call Trace:
<TASK>
smb2_query_info_compound+0x29c/0x5c0 [cifs f90b72658819bd21c94769b6a652029a07a7172f]
? step_into+0x10d/0x690
? __legitimize_path+0x28/0x60
smb2_queryfs+0x6a/0xf0 [cifs f90b72658819bd21c94769b6a652029a07a7172f]
smb311_queryfs+0x12d/0x140 [cifs f90b72658819bd21c94769b6a652029a07a7172f]
? kmem_cache_alloc+0x18a/0x340
? getname_flags+0x46/0x1e0
cifs_statfs+0x9f/0x2b0 [cifs f90b72658819bd21c94769b6a652029a07a7172f]
statfs_by_dentry+0x67/0x90
vfs_statfs+0x16/0xd0
user_statfs+0x54/0xa0
__do_sys_statfs+0x20/0x50
do_syscall_64+0x58/0x80
Cc: stable(a)kernel.org
Fixes: 4f1fffa237692 ("cifs: commands that are retried should have replay flag set")
Reviewed-by: Paulo Alcantara (Red Hat) <pc(a)manguebit.com>
Acked-by: Shyam Prasad N <sprasad(a)microsoft.com>
Reviewed-by: Enzo Matsumiya <ematsumiya(a)suse.de>
Signed-off-by: Henrique Carvalho <henrique.carvalho(a)suse.com>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/client/smb2ops.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index c19643a37fa0..43c6d2f861f4 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -2655,15 +2655,16 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
struct cifs_fid fid;
int rc;
__le16 *utf16_path;
- struct cached_fid *cfid = NULL;
+ struct cached_fid *cfid;
int retries = 0, cur_sleep = 1;
replay_again:
/* reinitialize for possible replay */
+ cfid = NULL;
flags = CIFS_CP_CREATE_CLOSE_OP;
oplock = SMB2_OPLOCK_LEVEL_NONE;
server = cifs_pick_channel(ses);
if (!path)
--
2.34.3
2
1
14 Jan '26
From: Edward Adam Davis <eadavis(a)qq.com>
stable inclusion
from stable-v6.6.117
commit 868fc62811d3fabcf5685e14f36377a855d5412d
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12582
CVE: CVE-2025-40268
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit e8c73eb7db0a498cd4b22d2819e6ab1a6f506bd6 upstream.
The user calls fsconfig twice, but when the program exits, free() only
frees ctx->source for the second fsconfig, not the first.
Regarding fc->source, there is no code in the fs context related to its
memory reclamation.
To fix this memory leak, release the source memory corresponding to ctx
or fc before each parsing.
syzbot reported:
BUG: memory leak
unreferenced object 0xffff888128afa360 (size 96):
backtrace (crc 79c9c7ba):
kstrdup+0x3c/0x80 mm/util.c:84
smb3_fs_context_parse_param+0x229b/0x36c0 fs/smb/client/fs_context.c:1444
BUG: memory leak
unreferenced object 0xffff888112c7d900 (size 96):
backtrace (crc 79c9c7ba):
smb3_fs_context_fullpath+0x70/0x1b0 fs/smb/client/fs_context.c:629
smb3_fs_context_parse_param+0x2266/0x36c0 fs/smb/client/fs_context.c:1438
Reported-by: syzbot+72afd4c236e6bc3f4bac(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=72afd4c236e6bc3f4bac
Cc: stable(a)vger.kernel.org
Reviewed-by: Paulo Alcantara (Red Hat) <pc(a)manguebit.org>
Signed-off-by: Edward Adam Davis <eadavis(a)qq.com>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/client/fs_context.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 137d03781d52..cf233cb9c194 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -1359,16 +1359,18 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
goto cifs_parse_mount_err;
default:
cifs_errorf(fc, "Unknown error parsing devname\n");
goto cifs_parse_mount_err;
}
+ kfree(ctx->source);
ctx->source = smb3_fs_context_fullpath(ctx, '/');
if (IS_ERR(ctx->source)) {
ctx->source = NULL;
cifs_errorf(fc, "OOM when copying UNC string\n");
goto cifs_parse_mount_err;
}
+ kfree(fc->source);
fc->source = kstrdup(ctx->source, GFP_KERNEL);
if (fc->source == NULL) {
cifs_errorf(fc, "OOM when copying UNC string\n");
goto cifs_parse_mount_err;
}
--
2.34.3
1
0
14 Jan '26
From: Shuhao Fu <sfual(a)cse.ust.hk>
mainline inclusion
from mainline-v6.18-rc2
commit c2b77f42205ef485a647f62082c442c1cd69d3fc
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/7767
CVE: CVE-2025-40103
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Fix three refcount inconsistency issues related to `cifs_sb_tlink`.
Comments for `cifs_sb_tlink` state that `cifs_put_tlink()` needs to be
called after successful calls to `cifs_sb_tlink()`. Three calls fail to
update refcount accordingly, leading to possible resource leaks.
Fixes: 8ceb98437946 ("CIFS: Move rename to ops struct")
Fixes: 2f1afe25997f ("cifs: Use smb 2 - 3 and cifsacl mount options getacl functions")
Fixes: 366ed846df60 ("cifs: Use smb 2 - 3 and cifsacl mount options setacl function")
Cc: stable(a)vger.kernel.org
Signed-off-by: Shuhao Fu <sfual(a)cse.ust.hk>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/client/inode.c | 6 ++++--
fs/smb/client/smb2ops.c | 8 ++++----
2 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 84d35a7c4e3b..108f5963e786 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -2305,12 +2305,14 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
server = tcon->ses->server;
- if (!server->ops->rename)
- return -ENOSYS;
+ if (!server->ops->rename) {
+ rc = -ENOSYS;
+ goto do_rename_exit;
+ }
/* try path-based rename first */
rc = server->ops->rename(xid, tcon, from_dentry,
from_path, to_path, cifs_sb);
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index c19643a37fa0..4950fa4550ae 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -3064,12 +3064,11 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
xid = get_xid();
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path) {
rc = -ENOMEM;
- free_xid(xid);
- return ERR_PTR(rc);
+ goto put_tlink;
}
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.path = path,
@@ -3097,10 +3096,11 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
fid.volatile_fid, (void **)&pntsd, pacllen,
info);
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
}
+put_tlink:
cifs_put_tlink(tlink);
free_xid(xid);
cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
if (rc)
@@ -3137,12 +3137,11 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
access_flags |= WRITE_DAC;
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path) {
rc = -ENOMEM;
- free_xid(xid);
- return rc;
+ goto put_tlink;
}
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.desired_access = access_flags,
@@ -3159,10 +3158,11 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
fid.volatile_fid, pnntsd, acllen, aclflag);
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
}
+put_tlink:
cifs_put_tlink(tlink);
free_xid(xid);
return rc;
}
--
2.34.3
2
1
[PATCH OLK-6.6] ceph: fix race condition validating r_parent before applying state
by Wang Zhaolong 14 Jan '26
by Wang Zhaolong 14 Jan '26
14 Jan '26
From: Alex Markuze <amarkuze(a)redhat.com>
mainline inclusion
from mainline-v6.17-rc6
commit 15f519e9f883b316d86e2bb6b767a023aafd9d83
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8352
CVE: CVE-2025-39927
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Add validation to ensure the cached parent directory inode matches the
directory info in MDS replies. This prevents client-side race conditions
where concurrent operations (e.g. rename) cause r_parent to become stale
between request initiation and reply processing, which could lead to
applying state changes to incorrect directory inodes.
[ idryomov: folded a kerneldoc fixup and a follow-up fix from Alex to
move CEPH_CAP_PIN reference when r_parent is updated:
When the parent directory lock is not held, req->r_parent can become
stale and is updated to point to the correct inode. However, the
associated CEPH_CAP_PIN reference was not being adjusted. The
CEPH_CAP_PIN is a reference on an inode that is tracked for
accounting purposes. Moving this pin is important to keep the
accounting balanced. When the pin was not moved from the old parent
to the new one, it created two problems: The reference on the old,
stale parent was never released, causing a reference leak.
A reference for the new parent was never acquired, creating the risk
of a reference underflow later in ceph_mdsc_release_request(). This
patch corrects the logic by releasing the pin from the old parent and
acquiring it for the new parent when r_parent is switched. This
ensures reference accounting stays balanced. ]
Cc: stable(a)vger.kernel.org
Signed-off-by: Alex Markuze <amarkuze(a)redhat.com>
Reviewed-by: Viacheslav Dubeyko <Slava.Dubeyko(a)ibm.com>
Signed-off-by: Ilya Dryomov <idryomov(a)gmail.com>
Conflicts:
fs/ceph/debugfs.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
[The code has undergone multiple refactorings, leading to many context
conflicts.]
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/ceph/debugfs.c | 16 ++--
fs/ceph/dir.c | 10 +--
fs/ceph/file.c | 10 +--
fs/ceph/mds_client.c | 174 ++++++++++++++++++++++++++-----------------
fs/ceph/mds_client.h | 18 ++++-
5 files changed, 136 insertions(+), 92 deletions(-)
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 2f1e7498cd74..f00b333676d9 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -53,12 +53,10 @@ static int mdsc_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct rb_node *rp;
- int pathlen = 0;
- u64 pathbase;
char *path;
mutex_lock(&mdsc->mutex);
for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) {
req = rb_entry(rp, struct ceph_mds_request, r_node);
@@ -79,41 +77,43 @@ static int mdsc_show(struct seq_file *s, void *p)
seq_puts(s, "\t");
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
- path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
- &pathbase, 0);
+ struct ceph_path_info path_info;
+
+ path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_dentry->d_lock);
seq_printf(s, " #%llx/%pd (%s)",
ceph_ino(d_inode(req->r_dentry->d_parent)),
req->r_dentry,
path ? path : "");
spin_unlock(&req->r_dentry->d_lock);
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path1) {
seq_printf(s, " #%llx/%s", req->r_ino1.ino,
req->r_path1);
} else {
seq_printf(s, " #%llx", req->r_ino1.ino);
}
if (req->r_old_dentry) {
- path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &pathlen,
- &pathbase, 0);
+ struct ceph_path_info path_info;
+
+ path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_old_dentry->d_lock);
seq_printf(s, " #%llx/%pd (%s)",
req->r_old_dentry_dir ?
ceph_ino(req->r_old_dentry_dir) : 0,
req->r_old_dentry,
path ? path : "");
spin_unlock(&req->r_old_dentry->d_lock);
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
if (req->r_ino2.ino)
seq_printf(s, " #%llx/%s", req->r_ino2.ino,
req->r_path2);
else
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 1395b71df5cc..ebe6235bff4e 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1222,14 +1222,12 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
if (result == -EJUKEBOX)
goto out;
/* If op failed, mark everyone involved for errors */
if (result) {
- int pathlen = 0;
- u64 base = 0;
- char *path = ceph_mdsc_build_path(mdsc, dentry, &pathlen,
- &base, 0);
+ struct ceph_path_info path_info = {0};
+ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
/* mark error on parent + clear complete */
mapping_set_error(req->r_parent->i_mapping, result);
ceph_dir_clear_complete(req->r_parent);
@@ -1239,12 +1237,12 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
/* mark inode itself for an error (since metadata is bogus) */
mapping_set_error(req->r_old_inode->i_mapping, result);
pr_warn("async unlink failure path=(%llx)%s result=%d!\n",
- base, IS_ERR(path) ? "<<bad>>" : path, result);
- ceph_mdsc_free_path(path, pathlen);
+ path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
+ ceph_mdsc_free_path_info(&path_info);
}
out:
iput(req->r_old_inode);
ceph_mdsc_release_dir_caps(req);
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 15f386a5d24c..eec5b573e6e1 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -571,18 +571,16 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
goto out;
mapping_set_error(req->r_parent->i_mapping, result);
if (result) {
- int pathlen = 0;
- u64 base = 0;
- char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
- &base, 0);
+ struct ceph_path_info path_info = {0};
+ char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
pr_warn("async create failure path=(%llx)%s result=%d!\n",
- base, IS_ERR(path) ? "<<bad>>" : path, result);
- ceph_mdsc_free_path(path, pathlen);
+ path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
+ ceph_mdsc_free_path_info(&path_info);
ceph_dir_clear_complete(req->r_parent);
if (!d_unhashed(dentry))
d_drop(dentry);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index dfa1b3c82b53..af3783ccd665 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2589,12 +2589,11 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
/**
* ceph_mdsc_build_path - build a path string to a given dentry
* @mdsc: mds client
* @dentry: dentry to which path should be built
- * @plen: returned length of string
- * @pbase: returned base inode number
+ * @path_info: output path, length, base ino+snap, and freepath ownership flag
* @for_wire: is this path going to be sent to the MDS?
*
* Build a string that represents the path to the dentry. This is mostly called
* for two different purposes:
*
@@ -2608,11 +2607,11 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
*
* Encode hidden .snap dirs as a double /, i.e.
* foo/.snap/bar -> foo//bar
*/
char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
- int *plen, u64 *pbase, int for_wire)
+ struct ceph_path_info *path_info, int for_wire)
{
struct dentry *cur;
struct inode *inode;
char *path;
int pos;
@@ -2718,92 +2717,115 @@ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
* possible with Ceph, but Linux cannot use them.
*/
return ERR_PTR(-ENAMETOOLONG);
}
- *pbase = base;
- *plen = PATH_MAX - 1 - pos;
+ /* Initialize the output structure */
+ memset(path_info, 0, sizeof(*path_info));
+
+ path_info->vino.ino = base;
+ path_info->pathlen = PATH_MAX - 1 - pos;
+ path_info->path = path + pos;
+ path_info->freepath = true;
+
+ /* Set snap from dentry if available */
+ if (d_inode(dentry))
+ path_info->vino.snap = ceph_snap(d_inode(dentry));
+ else
+ path_info->vino.snap = CEPH_NOSNAP;
+
dout("build_path on %p %d built %llx '%.*s'\n",
- dentry, d_count(dentry), base, *plen, path + pos);
+ dentry, d_count(dentry), base, PATH_MAX - 1 - pos, path + pos);
return path + pos;
}
static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
- struct inode *dir, const char **ppath, int *ppathlen,
- u64 *pino, bool *pfreepath, bool parent_locked)
+ struct inode *dir, struct ceph_path_info *path_info,
+ bool parent_locked)
{
char *path;
rcu_read_lock();
if (!dir)
dir = d_inode_rcu(dentry->d_parent);
if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP &&
!IS_ENCRYPTED(dir)) {
- *pino = ceph_ino(dir);
+ path_info->vino.ino = ceph_ino(dir);
+ path_info->vino.snap = ceph_snap(dir);
rcu_read_unlock();
- *ppath = dentry->d_name.name;
- *ppathlen = dentry->d_name.len;
+ path_info->path = dentry->d_name.name;
+ path_info->pathlen = dentry->d_name.len;
+ path_info->freepath = false;
return 0;
}
rcu_read_unlock();
- path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
if (IS_ERR(path))
return PTR_ERR(path);
- *ppath = path;
- *pfreepath = true;
+ /*
+ * ceph_mdsc_build_path already fills path_info, including snap handling.
+ */
return 0;
}
-static int build_inode_path(struct inode *inode,
- const char **ppath, int *ppathlen, u64 *pino,
- bool *pfreepath)
+static int build_inode_path(struct inode *inode, struct ceph_path_info *path_info)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct dentry *dentry;
char *path;
if (ceph_snap(inode) == CEPH_NOSNAP) {
- *pino = ceph_ino(inode);
- *ppathlen = 0;
+ path_info->vino.ino = ceph_ino(inode);
+ path_info->vino.snap = ceph_snap(inode);
+ path_info->pathlen = 0;
+ path_info->freepath = false;
return 0;
}
dentry = d_find_alias(inode);
- path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
dput(dentry);
if (IS_ERR(path))
return PTR_ERR(path);
- *ppath = path;
- *pfreepath = true;
+ /*
+ * ceph_mdsc_build_path already fills path_info, including snap from dentry.
+ * Override with inode's snap since that's what this function is for.
+ */
+ path_info->vino.snap = ceph_snap(inode);
return 0;
}
/*
* request arguments may be specified via an inode *, a dentry *, or
* an explicit ino+path.
*/
static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
struct dentry *rdentry, struct inode *rdiri,
- const char *rpath, u64 rino, const char **ppath,
- int *pathlen, u64 *ino, bool *freepath,
+ const char *rpath, u64 rino,
+ struct ceph_path_info *path_info,
bool parent_locked)
{
int r = 0;
+ /* Initialize the output structure */
+ memset(path_info, 0, sizeof(*path_info));
+
if (rinode) {
- r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
+ r = build_inode_path(rinode, path_info);
dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
ceph_snap(rinode));
} else if (rdentry) {
- r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
- freepath, parent_locked);
- dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
- *ppath);
+ r = build_dentry_path(mdsc, rdentry, rdiri, path_info, parent_locked);
+ dout(" dentry %p %llx/%.*s\n", rdentry, path_info->vino.ino,
+ path_info->pathlen, path_info->path);
} else if (rpath || rino) {
- *ino = rino;
- *ppath = rpath;
- *pathlen = rpath ? strlen(rpath) : 0;
- dout(" path %.*s\n", *pathlen, rpath);
+ path_info->vino.ino = rino;
+ path_info->vino.snap = CEPH_NOSNAP;
+ path_info->path = rpath;
+ path_info->pathlen = rpath ? strlen(rpath) : 0;
+ path_info->freepath = false;
+
+ dout(" path %.*s\n", path_info->pathlen, rpath);
}
return r;
}
@@ -2864,42 +2886,64 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
{
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_msg *msg;
struct ceph_mds_request_head_legacy *lhead;
- const char *path1 = NULL;
- const char *path2 = NULL;
- u64 ino1 = 0, ino2 = 0;
- int pathlen1 = 0, pathlen2 = 0;
- bool freepath1 = false, freepath2 = false;
+ struct ceph_path_info path_info1 = {0};
+ struct ceph_path_info path_info2 = {0};
struct dentry *old_dentry = NULL;
int len;
u16 releases;
void *p, *end;
int ret;
bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
&session->s_features);
+ bool parent_locked = test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
- req->r_parent, req->r_path1, req->r_ino1.ino,
- &path1, &pathlen1, &ino1, &freepath1,
- test_bit(CEPH_MDS_R_PARENT_LOCKED,
- &req->r_req_flags));
+ req->r_parent, req->r_path1, req->r_ino1.ino,
+ &path_info1, parent_locked);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
+ /*
+ * When the parent directory's i_rwsem is *not* locked, req->r_parent may
+ * have become stale (e.g. after a concurrent rename) between the time the
+ * dentry was looked up and now. If we detect that the stored r_parent
+ * does not match the inode number we just encoded for the request, switch
+ * to the correct inode so that the MDS receives a valid parent reference.
+ */
+ if (!parent_locked && req->r_parent && path_info1.vino.ino &&
+ ceph_ino(req->r_parent) != path_info1.vino.ino) {
+ struct inode *old_parent = req->r_parent;
+ struct inode *correct_dir = ceph_get_inode(mdsc->fsc->sb, path_info1.vino, NULL);
+
+ if (!IS_ERR(correct_dir)) {
+ WARN_ONCE(1, "ceph: r_parent mismatch (had %llx wanted %llx) - updating\n",
+ ceph_ino(old_parent), path_info1.vino.ino);
+ /*
+ * Transfer CEPH_CAP_PIN from the old parent to the new one.
+ * The pin was taken earlier in ceph_mdsc_submit_request().
+ */
+ ceph_put_cap_refs(ceph_inode(old_parent), CEPH_CAP_PIN);
+ iput(old_parent);
+ req->r_parent = correct_dir;
+ ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+ }
+ }
+
/* If r_old_dentry is set, then assume that its parent is locked */
if (req->r_old_dentry &&
!(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
old_dentry = req->r_old_dentry;
ret = set_request_path_attr(mdsc, NULL, old_dentry,
- req->r_old_dentry_dir,
- req->r_path2, req->r_ino2.ino,
- &path2, &pathlen2, &ino2, &freepath2, true);
+ req->r_old_dentry_dir,
+ req->r_path2, req->r_ino2.ino,
+ &path_info2, true);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out_free1;
}
@@ -2924,21 +2968,21 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
else
len = sizeof(struct ceph_mds_request_head);
/* filepaths */
len += 2 * (1 + sizeof(u32) + sizeof(u64));
- len += pathlen1 + pathlen2;
+ len += path_info1.pathlen + path_info2.pathlen;
/* cap releases */
len += sizeof(struct ceph_mds_request_release) *
(!!req->r_inode_drop + !!req->r_dentry_drop +
!!req->r_old_inode_drop + !!req->r_old_dentry_drop);
if (req->r_dentry_drop)
- len += pathlen1;
+ len += path_info1.pathlen;
if (req->r_old_dentry_drop)
- len += pathlen2;
+ len += path_info2.pathlen;
/* MClientRequest tail */
/* req->r_stamp */
len += sizeof(struct ceph_timespec);
@@ -3000,12 +3044,12 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
req->r_cred->fsgid));
lhead->ino = cpu_to_le64(req->r_deleg_ino);
lhead->args = req->r_args;
- ceph_encode_filepath(&p, end, ino1, path1);
- ceph_encode_filepath(&p, end, ino2, path2);
+ ceph_encode_filepath(&p, end, path_info1.vino.ino, path_info1.path);
+ ceph_encode_filepath(&p, end, path_info2.vino.ino, path_info2.path);
/* make note of release offset, in case we need to replay */
req->r_request_release_offset = p - msg->front.iov_base;
/* cap releases */
@@ -3064,15 +3108,13 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
}
msg->hdr.data_off = cpu_to_le16(0);
out_free2:
- if (freepath2)
- ceph_mdsc_free_path((char *)path2, pathlen2);
+ ceph_mdsc_free_path_info(&path_info2);
out_free1:
- if (freepath1)
- ceph_mdsc_free_path((char *)path1, pathlen1);
+ ceph_mdsc_free_path_info(&path_info1);
out:
return msg;
out_err:
ceph_msg_put(msg);
msg = ERR_PTR(ret);
@@ -4301,28 +4343,24 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_reconnect_state *recon_state = arg;
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct dentry *dentry;
struct ceph_cap *cap;
- char *path;
- int pathlen = 0, err;
- u64 pathbase;
+ struct ceph_path_info path_info = {0};
+ int err;
u64 snap_follows;
dentry = d_find_primary(inode);
if (dentry) {
/* set pathbase to parent dir when msg_version >= 2 */
- path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
+ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info,
recon_state->msg_version >= 2);
dput(dentry);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out_err;
}
- } else {
- path = NULL;
- pathbase = 0;
}
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
@@ -4351,22 +4389,22 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
if (recon_state->msg_version >= 2) {
rec.v2.cap_id = cpu_to_le64(cap->cap_id);
rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v2.issued = cpu_to_le32(cap->issued);
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
- rec.v2.pathbase = cpu_to_le64(pathbase);
+ rec.v2.pathbase = cpu_to_le64(path_info.vino.ino);
rec.v2.flock_len = (__force __le32)
((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v1.issued = cpu_to_le32(cap->issued);
rec.v1.size = cpu_to_le64(i_size_read(inode));
ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
- rec.v1.pathbase = cpu_to_le64(pathbase);
+ rec.v1.pathbase = cpu_to_le64(path_info.vino.ino);
}
if (list_empty(&ci->i_cap_snaps)) {
snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
} else {
@@ -4424,11 +4462,11 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
struct_len = 2 * sizeof(u32) +
(num_fcntl_locks + num_flock_locks) *
sizeof(struct ceph_filelock);
rec.v2.flock_len = cpu_to_le32(struct_len);
- struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
+ struct_len += sizeof(u32) + path_info.pathlen + sizeof(rec.v2);
if (struct_v >= 2)
struct_len += sizeof(u64); /* snap_follows */
total_len += struct_len;
@@ -4448,32 +4486,32 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
if (recon_state->msg_version >= 3) {
ceph_pagelist_encode_8(pagelist, struct_v);
ceph_pagelist_encode_8(pagelist, 1);
ceph_pagelist_encode_32(pagelist, struct_len);
}
- ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
ceph_locks_to_pagelist(flocks, pagelist,
num_fcntl_locks, num_flock_locks);
if (struct_v >= 2)
ceph_pagelist_encode_64(pagelist, snap_follows);
out_freeflocks:
kfree(flocks);
} else {
err = ceph_pagelist_reserve(pagelist,
sizeof(u64) + sizeof(u32) +
- pathlen + sizeof(rec.v1));
+ path_info.pathlen + sizeof(rec.v1));
if (err)
goto out_err;
ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
- ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
}
out_err:
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
if (!err)
recon_state->nr_caps++;
return err;
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index d930eb79dc38..66bc179413f3 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -573,18 +573,28 @@ extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
int (*cb)(struct inode *, int mds, void *),
void *arg);
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
-static inline void ceph_mdsc_free_path(char *path, int len)
+/*
+ * Structure to group path-related output parameters for build_*_path functions
+ */
+struct ceph_path_info {
+ const char *path;
+ int pathlen;
+ struct ceph_vino vino;
+ bool freepath;
+};
+
+static inline void ceph_mdsc_free_path_info(const struct ceph_path_info *path_info)
{
- if (!IS_ERR_OR_NULL(path))
- __putname(path - (PATH_MAX - 1 - len));
+ if (path_info && path_info->freepath && !IS_ERR_OR_NULL(path_info->path))
+ __putname((char *)path_info->path - (PATH_MAX - 1 - path_info->pathlen));
}
extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
- struct dentry *dentry, int *plen, u64 *base,
+ struct dentry *dentry, struct ceph_path_info *path_info,
int for_wire);
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
struct dentry *dentry, char action,
--
2.34.3
2
1
[PATCH OLK-6.6] ceph: fix race condition validating r_parent before applying state
by Wang Zhaolong 14 Jan '26
by Wang Zhaolong 14 Jan '26
14 Jan '26
From: Alex Markuze <amarkuze(a)redhat.com>
mainline inclusion
from mainline-v6.17-rc6
commit 15f519e9f883b316d86e2bb6b767a023aafd9d83
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/ID0R58
CVE: CVE-2025-39927
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Add validation to ensure the cached parent directory inode matches the
directory info in MDS replies. This prevents client-side race conditions
where concurrent operations (e.g. rename) cause r_parent to become stale
between request initiation and reply processing, which could lead to
applying state changes to incorrect directory inodes.
[ idryomov: folded a kerneldoc fixup and a follow-up fix from Alex to
move CEPH_CAP_PIN reference when r_parent is updated:
When the parent directory lock is not held, req->r_parent can become
stale and is updated to point to the correct inode. However, the
associated CEPH_CAP_PIN reference was not being adjusted. The
CEPH_CAP_PIN is a reference on an inode that is tracked for
accounting purposes. Moving this pin is important to keep the
accounting balanced. When the pin was not moved from the old parent
to the new one, it created two problems: The reference on the old,
stale parent was never released, causing a reference leak.
A reference for the new parent was never acquired, creating the risk
of a reference underflow later in ceph_mdsc_release_request(). This
patch corrects the logic by releasing the pin from the old parent and
acquiring it for the new parent when r_parent is switched. This
ensures reference accounting stays balanced. ]
Cc: stable(a)vger.kernel.org
Signed-off-by: Alex Markuze <amarkuze(a)redhat.com>
Reviewed-by: Viacheslav Dubeyko <Slava.Dubeyko(a)ibm.com>
Signed-off-by: Ilya Dryomov <idryomov(a)gmail.com>
Conflicts:
fs/ceph/debugfs.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
[The code has undergone multiple refactorings, leading to many context
conflicts.]
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/ceph/debugfs.c | 16 ++--
fs/ceph/dir.c | 10 +--
fs/ceph/file.c | 10 +--
fs/ceph/mds_client.c | 174 ++++++++++++++++++++++++++-----------------
fs/ceph/mds_client.h | 18 ++++-
5 files changed, 136 insertions(+), 92 deletions(-)
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 2f1e7498cd74..f00b333676d9 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -53,12 +53,10 @@ static int mdsc_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct rb_node *rp;
- int pathlen = 0;
- u64 pathbase;
char *path;
mutex_lock(&mdsc->mutex);
for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) {
req = rb_entry(rp, struct ceph_mds_request, r_node);
@@ -79,41 +77,43 @@ static int mdsc_show(struct seq_file *s, void *p)
seq_puts(s, "\t");
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
- path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
- &pathbase, 0);
+ struct ceph_path_info path_info;
+
+ path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_dentry->d_lock);
seq_printf(s, " #%llx/%pd (%s)",
ceph_ino(d_inode(req->r_dentry->d_parent)),
req->r_dentry,
path ? path : "");
spin_unlock(&req->r_dentry->d_lock);
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path1) {
seq_printf(s, " #%llx/%s", req->r_ino1.ino,
req->r_path1);
} else {
seq_printf(s, " #%llx", req->r_ino1.ino);
}
if (req->r_old_dentry) {
- path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &pathlen,
- &pathbase, 0);
+ struct ceph_path_info path_info;
+
+ path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
spin_lock(&req->r_old_dentry->d_lock);
seq_printf(s, " #%llx/%pd (%s)",
req->r_old_dentry_dir ?
ceph_ino(req->r_old_dentry_dir) : 0,
req->r_old_dentry,
path ? path : "");
spin_unlock(&req->r_old_dentry->d_lock);
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
} else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
if (req->r_ino2.ino)
seq_printf(s, " #%llx/%s", req->r_ino2.ino,
req->r_path2);
else
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 1395b71df5cc..ebe6235bff4e 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1222,14 +1222,12 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
if (result == -EJUKEBOX)
goto out;
/* If op failed, mark everyone involved for errors */
if (result) {
- int pathlen = 0;
- u64 base = 0;
- char *path = ceph_mdsc_build_path(mdsc, dentry, &pathlen,
- &base, 0);
+ struct ceph_path_info path_info = {0};
+ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
/* mark error on parent + clear complete */
mapping_set_error(req->r_parent->i_mapping, result);
ceph_dir_clear_complete(req->r_parent);
@@ -1239,12 +1237,12 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
/* mark inode itself for an error (since metadata is bogus) */
mapping_set_error(req->r_old_inode->i_mapping, result);
pr_warn("async unlink failure path=(%llx)%s result=%d!\n",
- base, IS_ERR(path) ? "<<bad>>" : path, result);
- ceph_mdsc_free_path(path, pathlen);
+ path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
+ ceph_mdsc_free_path_info(&path_info);
}
out:
iput(req->r_old_inode);
ceph_mdsc_release_dir_caps(req);
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 15f386a5d24c..eec5b573e6e1 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -571,18 +571,16 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
goto out;
mapping_set_error(req->r_parent->i_mapping, result);
if (result) {
- int pathlen = 0;
- u64 base = 0;
- char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
- &base, 0);
+ struct ceph_path_info path_info = {0};
+ char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
pr_warn("async create failure path=(%llx)%s result=%d!\n",
- base, IS_ERR(path) ? "<<bad>>" : path, result);
- ceph_mdsc_free_path(path, pathlen);
+ path_info.vino.ino, IS_ERR(path) ? "<<bad>>" : path, result);
+ ceph_mdsc_free_path_info(&path_info);
ceph_dir_clear_complete(req->r_parent);
if (!d_unhashed(dentry))
d_drop(dentry);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index dfa1b3c82b53..af3783ccd665 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2589,12 +2589,11 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
/**
* ceph_mdsc_build_path - build a path string to a given dentry
* @mdsc: mds client
* @dentry: dentry to which path should be built
- * @plen: returned length of string
- * @pbase: returned base inode number
+ * @path_info: output path, length, base ino+snap, and freepath ownership flag
* @for_wire: is this path going to be sent to the MDS?
*
* Build a string that represents the path to the dentry. This is mostly called
* for two different purposes:
*
@@ -2608,11 +2607,11 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
*
* Encode hidden .snap dirs as a double /, i.e.
* foo/.snap/bar -> foo//bar
*/
char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
- int *plen, u64 *pbase, int for_wire)
+ struct ceph_path_info *path_info, int for_wire)
{
struct dentry *cur;
struct inode *inode;
char *path;
int pos;
@@ -2718,92 +2717,115 @@ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
* possible with Ceph, but Linux cannot use them.
*/
return ERR_PTR(-ENAMETOOLONG);
}
- *pbase = base;
- *plen = PATH_MAX - 1 - pos;
+ /* Initialize the output structure */
+ memset(path_info, 0, sizeof(*path_info));
+
+ path_info->vino.ino = base;
+ path_info->pathlen = PATH_MAX - 1 - pos;
+ path_info->path = path + pos;
+ path_info->freepath = true;
+
+ /* Set snap from dentry if available */
+ if (d_inode(dentry))
+ path_info->vino.snap = ceph_snap(d_inode(dentry));
+ else
+ path_info->vino.snap = CEPH_NOSNAP;
+
dout("build_path on %p %d built %llx '%.*s'\n",
- dentry, d_count(dentry), base, *plen, path + pos);
+ dentry, d_count(dentry), base, PATH_MAX - 1 - pos, path + pos);
return path + pos;
}
static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
- struct inode *dir, const char **ppath, int *ppathlen,
- u64 *pino, bool *pfreepath, bool parent_locked)
+ struct inode *dir, struct ceph_path_info *path_info,
+ bool parent_locked)
{
char *path;
rcu_read_lock();
if (!dir)
dir = d_inode_rcu(dentry->d_parent);
if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP &&
!IS_ENCRYPTED(dir)) {
- *pino = ceph_ino(dir);
+ path_info->vino.ino = ceph_ino(dir);
+ path_info->vino.snap = ceph_snap(dir);
rcu_read_unlock();
- *ppath = dentry->d_name.name;
- *ppathlen = dentry->d_name.len;
+ path_info->path = dentry->d_name.name;
+ path_info->pathlen = dentry->d_name.len;
+ path_info->freepath = false;
return 0;
}
rcu_read_unlock();
- path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
if (IS_ERR(path))
return PTR_ERR(path);
- *ppath = path;
- *pfreepath = true;
+ /*
+ * ceph_mdsc_build_path already fills path_info, including snap handling.
+ */
return 0;
}
-static int build_inode_path(struct inode *inode,
- const char **ppath, int *ppathlen, u64 *pino,
- bool *pfreepath)
+static int build_inode_path(struct inode *inode, struct ceph_path_info *path_info)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct dentry *dentry;
char *path;
if (ceph_snap(inode) == CEPH_NOSNAP) {
- *pino = ceph_ino(inode);
- *ppathlen = 0;
+ path_info->vino.ino = ceph_ino(inode);
+ path_info->vino.snap = ceph_snap(inode);
+ path_info->pathlen = 0;
+ path_info->freepath = false;
return 0;
}
dentry = d_find_alias(inode);
- path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
+ path = ceph_mdsc_build_path(mdsc, dentry, path_info, 1);
dput(dentry);
if (IS_ERR(path))
return PTR_ERR(path);
- *ppath = path;
- *pfreepath = true;
+ /*
+ * ceph_mdsc_build_path already fills path_info, including snap from dentry.
+ * Override with inode's snap since that's what this function is for.
+ */
+ path_info->vino.snap = ceph_snap(inode);
return 0;
}
/*
* request arguments may be specified via an inode *, a dentry *, or
* an explicit ino+path.
*/
static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
struct dentry *rdentry, struct inode *rdiri,
- const char *rpath, u64 rino, const char **ppath,
- int *pathlen, u64 *ino, bool *freepath,
+ const char *rpath, u64 rino,
+ struct ceph_path_info *path_info,
bool parent_locked)
{
int r = 0;
+ /* Initialize the output structure */
+ memset(path_info, 0, sizeof(*path_info));
+
if (rinode) {
- r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
+ r = build_inode_path(rinode, path_info);
dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
ceph_snap(rinode));
} else if (rdentry) {
- r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
- freepath, parent_locked);
- dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
- *ppath);
+ r = build_dentry_path(mdsc, rdentry, rdiri, path_info, parent_locked);
+ dout(" dentry %p %llx/%.*s\n", rdentry, path_info->vino.ino,
+ path_info->pathlen, path_info->path);
} else if (rpath || rino) {
- *ino = rino;
- *ppath = rpath;
- *pathlen = rpath ? strlen(rpath) : 0;
- dout(" path %.*s\n", *pathlen, rpath);
+ path_info->vino.ino = rino;
+ path_info->vino.snap = CEPH_NOSNAP;
+ path_info->path = rpath;
+ path_info->pathlen = rpath ? strlen(rpath) : 0;
+ path_info->freepath = false;
+
+ dout(" path %.*s\n", path_info->pathlen, rpath);
}
return r;
}
@@ -2864,42 +2886,64 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
{
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_msg *msg;
struct ceph_mds_request_head_legacy *lhead;
- const char *path1 = NULL;
- const char *path2 = NULL;
- u64 ino1 = 0, ino2 = 0;
- int pathlen1 = 0, pathlen2 = 0;
- bool freepath1 = false, freepath2 = false;
+ struct ceph_path_info path_info1 = {0};
+ struct ceph_path_info path_info2 = {0};
struct dentry *old_dentry = NULL;
int len;
u16 releases;
void *p, *end;
int ret;
bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
&session->s_features);
+ bool parent_locked = test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
- req->r_parent, req->r_path1, req->r_ino1.ino,
- &path1, &pathlen1, &ino1, &freepath1,
- test_bit(CEPH_MDS_R_PARENT_LOCKED,
- &req->r_req_flags));
+ req->r_parent, req->r_path1, req->r_ino1.ino,
+ &path_info1, parent_locked);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
+ /*
+ * When the parent directory's i_rwsem is *not* locked, req->r_parent may
+ * have become stale (e.g. after a concurrent rename) between the time the
+ * dentry was looked up and now. If we detect that the stored r_parent
+ * does not match the inode number we just encoded for the request, switch
+ * to the correct inode so that the MDS receives a valid parent reference.
+ */
+ if (!parent_locked && req->r_parent && path_info1.vino.ino &&
+ ceph_ino(req->r_parent) != path_info1.vino.ino) {
+ struct inode *old_parent = req->r_parent;
+ struct inode *correct_dir = ceph_get_inode(mdsc->fsc->sb, path_info1.vino, NULL);
+
+ if (!IS_ERR(correct_dir)) {
+ WARN_ONCE(1, "ceph: r_parent mismatch (had %llx wanted %llx) - updating\n",
+ ceph_ino(old_parent), path_info1.vino.ino);
+ /*
+ * Transfer CEPH_CAP_PIN from the old parent to the new one.
+ * The pin was taken earlier in ceph_mdsc_submit_request().
+ */
+ ceph_put_cap_refs(ceph_inode(old_parent), CEPH_CAP_PIN);
+ iput(old_parent);
+ req->r_parent = correct_dir;
+ ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+ }
+ }
+
/* If r_old_dentry is set, then assume that its parent is locked */
if (req->r_old_dentry &&
!(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
old_dentry = req->r_old_dentry;
ret = set_request_path_attr(mdsc, NULL, old_dentry,
- req->r_old_dentry_dir,
- req->r_path2, req->r_ino2.ino,
- &path2, &pathlen2, &ino2, &freepath2, true);
+ req->r_old_dentry_dir,
+ req->r_path2, req->r_ino2.ino,
+ &path_info2, true);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out_free1;
}
@@ -2924,21 +2968,21 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
else
len = sizeof(struct ceph_mds_request_head);
/* filepaths */
len += 2 * (1 + sizeof(u32) + sizeof(u64));
- len += pathlen1 + pathlen2;
+ len += path_info1.pathlen + path_info2.pathlen;
/* cap releases */
len += sizeof(struct ceph_mds_request_release) *
(!!req->r_inode_drop + !!req->r_dentry_drop +
!!req->r_old_inode_drop + !!req->r_old_dentry_drop);
if (req->r_dentry_drop)
- len += pathlen1;
+ len += path_info1.pathlen;
if (req->r_old_dentry_drop)
- len += pathlen2;
+ len += path_info2.pathlen;
/* MClientRequest tail */
/* req->r_stamp */
len += sizeof(struct ceph_timespec);
@@ -3000,12 +3044,12 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
req->r_cred->fsgid));
lhead->ino = cpu_to_le64(req->r_deleg_ino);
lhead->args = req->r_args;
- ceph_encode_filepath(&p, end, ino1, path1);
- ceph_encode_filepath(&p, end, ino2, path2);
+ ceph_encode_filepath(&p, end, path_info1.vino.ino, path_info1.path);
+ ceph_encode_filepath(&p, end, path_info2.vino.ino, path_info2.path);
/* make note of release offset, in case we need to replay */
req->r_request_release_offset = p - msg->front.iov_base;
/* cap releases */
@@ -3064,15 +3108,13 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
}
msg->hdr.data_off = cpu_to_le16(0);
out_free2:
- if (freepath2)
- ceph_mdsc_free_path((char *)path2, pathlen2);
+ ceph_mdsc_free_path_info(&path_info2);
out_free1:
- if (freepath1)
- ceph_mdsc_free_path((char *)path1, pathlen1);
+ ceph_mdsc_free_path_info(&path_info1);
out:
return msg;
out_err:
ceph_msg_put(msg);
msg = ERR_PTR(ret);
@@ -4301,28 +4343,24 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_reconnect_state *recon_state = arg;
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct dentry *dentry;
struct ceph_cap *cap;
- char *path;
- int pathlen = 0, err;
- u64 pathbase;
+ struct ceph_path_info path_info = {0};
+ int err;
u64 snap_follows;
dentry = d_find_primary(inode);
if (dentry) {
/* set pathbase to parent dir when msg_version >= 2 */
- path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
+ char *path = ceph_mdsc_build_path(mdsc, dentry, &path_info,
recon_state->msg_version >= 2);
dput(dentry);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out_err;
}
- } else {
- path = NULL;
- pathbase = 0;
}
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
@@ -4351,22 +4389,22 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
if (recon_state->msg_version >= 2) {
rec.v2.cap_id = cpu_to_le64(cap->cap_id);
rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v2.issued = cpu_to_le32(cap->issued);
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
- rec.v2.pathbase = cpu_to_le64(pathbase);
+ rec.v2.pathbase = cpu_to_le64(path_info.vino.ino);
rec.v2.flock_len = (__force __le32)
((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v1.issued = cpu_to_le32(cap->issued);
rec.v1.size = cpu_to_le64(i_size_read(inode));
ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
- rec.v1.pathbase = cpu_to_le64(pathbase);
+ rec.v1.pathbase = cpu_to_le64(path_info.vino.ino);
}
if (list_empty(&ci->i_cap_snaps)) {
snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
} else {
@@ -4424,11 +4462,11 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
struct_len = 2 * sizeof(u32) +
(num_fcntl_locks + num_flock_locks) *
sizeof(struct ceph_filelock);
rec.v2.flock_len = cpu_to_le32(struct_len);
- struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
+ struct_len += sizeof(u32) + path_info.pathlen + sizeof(rec.v2);
if (struct_v >= 2)
struct_len += sizeof(u64); /* snap_follows */
total_len += struct_len;
@@ -4448,32 +4486,32 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
if (recon_state->msg_version >= 3) {
ceph_pagelist_encode_8(pagelist, struct_v);
ceph_pagelist_encode_8(pagelist, 1);
ceph_pagelist_encode_32(pagelist, struct_len);
}
- ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
ceph_locks_to_pagelist(flocks, pagelist,
num_fcntl_locks, num_flock_locks);
if (struct_v >= 2)
ceph_pagelist_encode_64(pagelist, snap_follows);
out_freeflocks:
kfree(flocks);
} else {
err = ceph_pagelist_reserve(pagelist,
sizeof(u64) + sizeof(u32) +
- pathlen + sizeof(rec.v1));
+ path_info.pathlen + sizeof(rec.v1));
if (err)
goto out_err;
ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
- ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_encode_string(pagelist, (char *)path_info.path, path_info.pathlen);
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
}
out_err:
- ceph_mdsc_free_path(path, pathlen);
+ ceph_mdsc_free_path_info(&path_info);
if (!err)
recon_state->nr_caps++;
return err;
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index d930eb79dc38..66bc179413f3 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -573,18 +573,28 @@ extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
int (*cb)(struct inode *, int mds, void *),
void *arg);
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
-static inline void ceph_mdsc_free_path(char *path, int len)
+/*
+ * Structure to group path-related output parameters for build_*_path functions
+ */
+struct ceph_path_info {
+ const char *path;
+ int pathlen;
+ struct ceph_vino vino;
+ bool freepath;
+};
+
+static inline void ceph_mdsc_free_path_info(const struct ceph_path_info *path_info)
{
- if (!IS_ERR_OR_NULL(path))
- __putname(path - (PATH_MAX - 1 - len));
+ if (path_info && path_info->freepath && !IS_ERR_OR_NULL(path_info->path))
+ __putname((char *)path_info->path - (PATH_MAX - 1 - path_info->pathlen));
}
extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
- struct dentry *dentry, int *plen, u64 *base,
+ struct dentry *dentry, struct ceph_path_info *path_info,
int for_wire);
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
struct dentry *dentry, char action,
--
2.34.3
2
1
[PATCH OLK-6.6] smb: client: fix smbdirect_recv_io leak in smbd_negotiate() error path
by Wang Zhaolong 14 Jan '26
by Wang Zhaolong 14 Jan '26
14 Jan '26
From: Stefan Metzmacher <metze(a)samba.org>
stable inclusion
from stable-v6.6.108
commit e7b7a93879558e77d950f1ff9a6f3daa385b33df
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8210
CVE: CVE-2025-39929
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit daac51c7032036a0ca5f1aa419ad1b0471d1c6e0 ]
During tests of another unrelated patch I was able to trigger this
error: Objects remaining on __kmem_cache_shutdown()
Cc: Steve French <smfrench(a)gmail.com>
Cc: Tom Talpey <tom(a)talpey.com>
Cc: Long Li <longli(a)microsoft.com>
Cc: Namjae Jeon <linkinjeon(a)kernel.org>
Cc: linux-cifs(a)vger.kernel.org
Cc: samba-technical(a)lists.samba.org
Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
Signed-off-by: Stefan Metzmacher <metze(a)samba.org>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Wang Hai <wanghai38(a)huawei.com>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/client/smbdirect.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index d74e829de51c..02a30b769066 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -1046,12 +1046,14 @@ static int smbd_negotiate(struct smbd_connection *info)
response->type = SMBD_NEGOTIATE_RESP;
rc = smbd_post_recv(info, response);
log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
rc, response->sge.addr,
response->sge.length, response->sge.lkey);
- if (rc)
+ if (rc) {
+ put_receive_buffer(info, response);
return rc;
+ }
init_completion(&info->negotiate_completion);
info->negotiate_done = false;
rc = smbd_post_send_negotiate_req(info);
if (rc)
--
2.34.3
2
1
Trond Myklebust (2):
NFS: Fix the setting of capabilities when automounting a new
filesystem
NFSv4: Don't clear capabilities that won't be reset
fs/nfs/client.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
fs/nfs/internal.h | 2 +-
fs/nfs/nfs4client.c | 20 +-------------------
fs/nfs/nfs4proc.c | 1 -
4 files changed, 44 insertions(+), 23 deletions(-)
--
2.34.3
2
3
[PATCH OLK-6.6] NFS: Fix filehandle bounds checking in nfs_fh_to_dentry()
by Wang Zhaolong 14 Jan '26
by Wang Zhaolong 14 Jan '26
14 Jan '26
From: Trond Myklebust <trond.myklebust(a)hammerspace.com>
stable inclusion
from stable-v6.6.102
commit 12ad3def2e5e0b120e3d0cb6ce8b7b796819ad40
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8984
CVE: CVE-2025-39730
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit ef93a685e01a281b5e2a25ce4e3428cf9371a205 ]
The function needs to check the minimal filehandle length before it can
access the embedded filehandle.
Reported-by: zhangjian <zhangjian496(a)huawei.com>
Fixes: 20fa19027286 ("nfs: add export operations")
Signed-off-by: Trond Myklebust <trond.myklebust(a)hammerspace.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/nfs/export.c | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
index be686b8e0c54..aeb17adcb2b6 100644
--- a/fs/nfs/export.c
+++ b/fs/nfs/export.c
@@ -64,18 +64,25 @@ static struct dentry *
nfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
struct nfs_fattr *fattr = NULL;
struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw);
- size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
+ size_t fh_size = offsetof(struct nfs_fh, data);
const struct nfs_rpc_ops *rpc_ops;
struct dentry *dentry;
struct inode *inode;
- int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
+ int len = EMBED_FH_OFF;
u32 *p = fid->raw;
int ret;
+ /* Initial check of bounds */
+ if (fh_len < len + XDR_QUADLEN(fh_size) ||
+ fh_len > XDR_QUADLEN(NFS_MAXFHSIZE))
+ return NULL;
+ /* Calculate embedded filehandle size */
+ fh_size += server_fh->size;
+ len += XDR_QUADLEN(fh_size);
/* NULL translates to ESTALE */
if (fh_len < len || fh_type != len)
return NULL;
fattr = nfs_alloc_fattr_with_label(NFS_SB(sb));
--
2.34.3
2
1
14 Jan '26
In get_guest_rtc_ns(), "s->base_rtc" is uint64_t, which multiplied by
"NANOSECONDS_PER_SECOND" may overflow the uint64_t type, which will cause
the QEMU Linux Virtual Machine's time to jump and in turn triggers
a kernel Soft Lockup and ultimately leads to a crash.
Fix it by avoiding adding s->base_rtc in get_guest_rtc_ns_offset(),
because get_guest_rtc_ns() is used either take the remainder of
NANOSECONDS_PER_SECOND or take the quotient of NANOSECONDS_PER_SECOND.
Fixes: 56038ef6234e ("RTC: Update the RTC clock only when reading it")
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
v2:
- Add comment for get_guest_rtc_ns().
- Update the commit message.
---
hw/rtc/mc146818rtc.c | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c
index 8631386b9f..e418c543c3 100644
--- a/hw/rtc/mc146818rtc.c
+++ b/hw/rtc/mc146818rtc.c
@@ -77,12 +77,13 @@ static inline bool rtc_running(MC146818RtcState *s)
(s->cmos_data[RTC_REG_A] & 0x70) <= 0x20);
}
+/*
+ * Note: get_guest_rtc_ns() does not include the base_rtc seconds value,
+ * so the caller "must" handle it themselves!!!
+ */
static uint64_t get_guest_rtc_ns(MC146818RtcState *s)
{
- uint64_t guest_clock = qemu_clock_get_ns(rtc_clock);
-
- return s->base_rtc * NANOSECONDS_PER_SECOND +
- guest_clock - s->last_update + s->offset;
+ return qemu_clock_get_ns(rtc_clock) - s->last_update + s->offset;
}
static void rtc_coalesced_timer_update(MC146818RtcState *s)
@@ -623,10 +624,8 @@ static void rtc_update_time(MC146818RtcState *s)
{
struct tm ret;
time_t guest_sec;
- int64_t guest_nsec;
- guest_nsec = get_guest_rtc_ns(s);
- guest_sec = guest_nsec / NANOSECONDS_PER_SECOND;
+ guest_sec = s->base_rtc + get_guest_rtc_ns(s) / NANOSECONDS_PER_SECOND;
gmtime_r(&guest_sec, &ret);
/* Is SET flag of Register B disabled? */
@@ -637,7 +636,7 @@ static void rtc_update_time(MC146818RtcState *s)
static int update_in_progress(MC146818RtcState *s)
{
- int64_t guest_nsec;
+ uint64_t guest_nsec;
if (!rtc_running(s)) {
return 0;
--
2.34.1
1
0
13 Jan '26
From: Benjamin Coddington <bcodding(a)redhat.com>
mainline inclusion
from mainline-v6.16-rc5
commit c01776287414ca43412d1319d2877cbad65444ac
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9400
CVE: CVE-2025-38393
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
We found a few different systems hung up in writeback waiting on the same
page lock, and one task waiting on the NFS_LAYOUT_DRAIN bit in
pnfs_update_layout(), however the pnfs_layout_hdr's plh_outstanding count
was zero.
It seems most likely that this is another race between the waiter and waker
similar to commit ed0172af5d6f ("SUNRPC: Fix a race to wake a sync task").
Fix it up by applying the advised barrier.
Fixes: 880265c77ac4 ("pNFS: Avoid a live lock condition in pnfs_update_layout()")
Signed-off-by: Benjamin Coddington <bcodding(a)redhat.com>
Signed-off-by: Anna Schumaker <anna.schumaker(a)oracle.com>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/nfs/pnfs.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 91998a68b360..4868b48cb299 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1920,12 +1920,14 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
}
static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
{
if (atomic_dec_and_test(&lo->plh_outstanding) &&
- test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags))
+ test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) {
+ smp_mb__after_atomic();
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN);
+ }
}
static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
{
return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags);
--
2.34.3
2
1
13 Jan '26
From: Steve French <stfrench(a)microsoft.com>
stable inclusion
from stable-v6.6.103
commit a0620e1525663edd8c4594f49fb75fe5be4724b0
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9094
CVE: CVE-2025-38728
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 7d34ec36abb84fdfb6632a0f2cbda90379ae21fc upstream.
With KASAN enabled, it is possible to get a slab out of bounds
during mount to ksmbd due to missing check in parse_server_interfaces()
(see below):
BUG: KASAN: slab-out-of-bounds in
parse_server_interfaces+0x14ee/0x1880 [cifs]
Read of size 4 at addr ffff8881433dba98 by task mount/9827
CPU: 5 UID: 0 PID: 9827 Comm: mount Tainted: G
OE 6.16.0-rc2-kasan #2 PREEMPT(voluntary)
Tainted: [O]=OOT_MODULE, [E]=UNSIGNED_MODULE
Hardware name: Dell Inc. Precision Tower 3620/0MWYPT,
BIOS 2.13.1 06/14/2019
Call Trace:
<TASK>
dump_stack_lvl+0x9f/0xf0
print_report+0xd1/0x670
__virt_addr_valid+0x22c/0x430
? parse_server_interfaces+0x14ee/0x1880 [cifs]
? kasan_complete_mode_report_info+0x2a/0x1f0
? parse_server_interfaces+0x14ee/0x1880 [cifs]
kasan_report+0xd6/0x110
parse_server_interfaces+0x14ee/0x1880 [cifs]
__asan_report_load_n_noabort+0x13/0x20
parse_server_interfaces+0x14ee/0x1880 [cifs]
? __pfx_parse_server_interfaces+0x10/0x10 [cifs]
? trace_hardirqs_on+0x51/0x60
SMB3_request_interfaces+0x1ad/0x3f0 [cifs]
? __pfx_SMB3_request_interfaces+0x10/0x10 [cifs]
? SMB2_tcon+0x23c/0x15d0 [cifs]
smb3_qfs_tcon+0x173/0x2b0 [cifs]
? __pfx_smb3_qfs_tcon+0x10/0x10 [cifs]
? cifs_get_tcon+0x105d/0x2120 [cifs]
? do_raw_spin_unlock+0x5d/0x200
? cifs_get_tcon+0x105d/0x2120 [cifs]
? __pfx_smb3_qfs_tcon+0x10/0x10 [cifs]
cifs_mount_get_tcon+0x369/0xb90 [cifs]
? dfs_cache_find+0xe7/0x150 [cifs]
dfs_mount_share+0x985/0x2970 [cifs]
? check_path.constprop.0+0x28/0x50
? save_trace+0x54/0x370
? __pfx_dfs_mount_share+0x10/0x10 [cifs]
? __lock_acquire+0xb82/0x2ba0
? __kasan_check_write+0x18/0x20
cifs_mount+0xbc/0x9e0 [cifs]
? __pfx_cifs_mount+0x10/0x10 [cifs]
? do_raw_spin_unlock+0x5d/0x200
? cifs_setup_cifs_sb+0x29d/0x810 [cifs]
cifs_smb3_do_mount+0x263/0x1990 [cifs]
Reported-by: Namjae Jeon <linkinjeon(a)kernel.org>
Tested-by: Namjae Jeon <linkinjeon(a)kernel.org>
Cc: stable(a)vger.kernel.org
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/client/smb2ops.c | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index c19643a37fa0..3b083a2bfd08 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -731,10 +731,17 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
next = le32_to_cpu(p->Next);
if (!next) {
bytes_left -= sizeof(*p);
break;
}
+ /* Validate that Next doesn't point beyond the buffer */
+ if (next > bytes_left) {
+ cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n",
+ __func__, next, bytes_left);
+ rc = -EINVAL;
+ goto out;
+ }
p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
bytes_left -= next;
}
if (!nb_iface) {
@@ -742,11 +749,13 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
rc = -EINVAL;
goto out;
}
/* Azure rounds the buffer size up 8, to a 16 byte boundary */
- if ((bytes_left > 8) || p->Next)
+ if ((bytes_left > 8) ||
+ (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next)
+ + sizeof(p->Next) && p->Next))
cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
ses->iface_last_update = jiffies;
out:
--
2.34.3
2
1
[PATCH OLK-6.6] smb: client: fix potential deadlock when reconnecting channels
by Wang Zhaolong 13 Jan '26
by Wang Zhaolong 13 Jan '26
13 Jan '26
From: Paulo Alcantara <pc(a)manguebit.org>
stable inclusion
from stable-v6.6.96
commit c82c7041258d96e3286f6790ab700e4edd3cc9e3
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9552
CVE: CVE-2025-38244t
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 711741f94ac3cf9f4e3aa73aa171e76d188c0819 ]
Fix cifs_signal_cifsd_for_reconnect() to take the correct lock order
and prevent the following deadlock from happening
======================================================
WARNING: possible circular locking dependency detected
6.16.0-rc3-build2+ #1301 Tainted: G S W
------------------------------------------------------
cifsd/6055 is trying to acquire lock:
ffff88810ad56038 (&tcp_ses->srv_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0x134/0x200
but task is already holding lock:
ffff888119c64330 (&ret_buf->chan_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0xcf/0x200
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #2 (&ret_buf->chan_lock){+.+.}-{3:3}:
validate_chain+0x1cf/0x270
__lock_acquire+0x60e/0x780
lock_acquire.part.0+0xb4/0x1f0
_raw_spin_lock+0x2f/0x40
cifs_setup_session+0x81/0x4b0
cifs_get_smb_ses+0x771/0x900
cifs_mount_get_session+0x7e/0x170
cifs_mount+0x92/0x2d0
cifs_smb3_do_mount+0x161/0x460
smb3_get_tree+0x55/0x90
vfs_get_tree+0x46/0x180
do_new_mount+0x1b0/0x2e0
path_mount+0x6ee/0x740
do_mount+0x98/0xe0
__do_sys_mount+0x148/0x180
do_syscall_64+0xa4/0x260
entry_SYSCALL_64_after_hwframe+0x76/0x7e
-> #1 (&ret_buf->ses_lock){+.+.}-{3:3}:
validate_chain+0x1cf/0x270
__lock_acquire+0x60e/0x780
lock_acquire.part.0+0xb4/0x1f0
_raw_spin_lock+0x2f/0x40
cifs_match_super+0x101/0x320
sget+0xab/0x270
cifs_smb3_do_mount+0x1e0/0x460
smb3_get_tree+0x55/0x90
vfs_get_tree+0x46/0x180
do_new_mount+0x1b0/0x2e0
path_mount+0x6ee/0x740
do_mount+0x98/0xe0
__do_sys_mount+0x148/0x180
do_syscall_64+0xa4/0x260
entry_SYSCALL_64_after_hwframe+0x76/0x7e
-> #0 (&tcp_ses->srv_lock){+.+.}-{3:3}:
check_noncircular+0x95/0xc0
check_prev_add+0x115/0x2f0
validate_chain+0x1cf/0x270
__lock_acquire+0x60e/0x780
lock_acquire.part.0+0xb4/0x1f0
_raw_spin_lock+0x2f/0x40
cifs_signal_cifsd_for_reconnect+0x134/0x200
__cifs_reconnect+0x8f/0x500
cifs_handle_standard+0x112/0x280
cifs_demultiplex_thread+0x64d/0xbc0
kthread+0x2f7/0x310
ret_from_fork+0x2a/0x230
ret_from_fork_asm+0x1a/0x30
other info that might help us debug this:
Chain exists of:
&tcp_ses->srv_lock --> &ret_buf->ses_lock --> &ret_buf->chan_lock
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&ret_buf->chan_lock);
lock(&ret_buf->ses_lock);
lock(&ret_buf->chan_lock);
lock(&tcp_ses->srv_lock);
*** DEADLOCK ***
3 locks held by cifsd/6055:
#0: ffffffff857de398 (&cifs_tcp_ses_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0x7b/0x200
#1: ffff888119c64060 (&ret_buf->ses_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0x9c/0x200
#2: ffff888119c64330 (&ret_buf->chan_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0xcf/0x200
Cc: linux-cifs(a)vger.kernel.org
Reported-by: David Howells <dhowells(a)redhat.com>
Fixes: d7d7a66aacd6 ("cifs: avoid use of global locks for high contention data")
Reviewed-by: David Howells <dhowells(a)redhat.com>
Tested-by: David Howells <dhowells(a)redhat.com>
Signed-off-by: Paulo Alcantara (Red Hat) <pc(a)manguebit.org>
Signed-off-by: David Howells <dhowells(a)redhat.com>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/client/cifsglob.h | 1 +
fs/smb/client/connect.c | 58 +++++++++++++++++++++++++---------------
2 files changed, 37 insertions(+), 22 deletions(-)
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 1e6d0c87346c..d52db0669b7a 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -675,10 +675,11 @@ inc_rfc1001_len(void *buf, int count)
}
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
+ struct list_head rlist; /* reconnect list */
spinlock_t srv_lock; /* protect anything here that is not protected */
__u64 conn_id; /* connection identifier (useful for debugging) */
int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 23e0b902239a..c7f15c71f6ac 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -142,10 +142,18 @@ static void smb2_query_server_interfaces(struct work_struct *work)
queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
(SMB_INTERFACE_POLL_INTERVAL * HZ));
}
+#define set_need_reco(server) \
+do { \
+ spin_lock(&server->srv_lock); \
+ if (server->tcpStatus != CifsExiting) \
+ server->tcpStatus = CifsNeedReconnect; \
+ spin_unlock(&server->srv_lock); \
+} while (0)
+
/*
* Update the tcpStatus for the server.
* This is used to signal the cifsd thread to call cifs_reconnect
* ONLY cifsd thread should call cifs_reconnect. For any other
* thread, use this function
@@ -155,43 +163,49 @@ static void smb2_query_server_interfaces(struct work_struct *work)
*/
void
cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
bool all_channels)
{
- struct TCP_Server_Info *pserver;
+ struct TCP_Server_Info *nserver;
struct cifs_ses *ses;
+ LIST_HEAD(reco);
int i;
- /* If server is a channel, select the primary channel */
- pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
-
/* if we need to signal just this channel */
if (!all_channels) {
- spin_lock(&server->srv_lock);
- if (server->tcpStatus != CifsExiting)
- server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&server->srv_lock);
+ set_need_reco(server);
return;
}
- spin_lock(&cifs_tcp_ses_lock);
- list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
- if (cifs_ses_exiting(ses))
- continue;
- spin_lock(&ses->chan_lock);
- for (i = 0; i < ses->chan_count; i++) {
- if (!ses->chans[i].server)
+ if (SERVER_IS_CHAN(server))
+ server = server->primary_server;
+ scoped_guard(spinlock, &cifs_tcp_ses_lock) {
+ set_need_reco(server);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
continue;
-
- spin_lock(&ses->chans[i].server->srv_lock);
- if (ses->chans[i].server->tcpStatus != CifsExiting)
- ses->chans[i].server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&ses->chans[i].server->srv_lock);
+ }
+ spin_lock(&ses->chan_lock);
+ for (i = 1; i < ses->chan_count; i++) {
+ nserver = ses->chans[i].server;
+ if (!nserver)
+ continue;
+ nserver->srv_count++;
+ list_add(&nserver->rlist, &reco);
+ }
+ spin_unlock(&ses->chan_lock);
+ spin_unlock(&ses->ses_lock);
}
- spin_unlock(&ses->chan_lock);
}
- spin_unlock(&cifs_tcp_ses_lock);
+
+ list_for_each_entry_safe(server, nserver, &reco, rlist) {
+ list_del_init(&server->rlist);
+ set_need_reco(server);
+ cifs_put_tcp_session(server, 0);
+ }
}
/*
* Mark all sessions and tcons for reconnect.
* IMPORTANT: make sure that this gets called only from
--
2.34.3
2
1
13 Jan '26
From: Ruben Devos <devosruben6(a)gmail.com>
stable inclusion
from stable-v6.6.95
commit 37166d63e42c34846a16001950ecec96229a8d17
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9602
CVE: CVE-2025-38208
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit f1e7a277a1736e12cc4bd6d93b8a5c439b8ca20c upstream.
page is checked for null in __build_path_from_dentry_optional_prefix
when tcon->origin_fullpath is not set. However, the check is missing when
it is set.
Add a check to prevent a potential NULL pointer dereference.
Signed-off-by: Ruben Devos <devosruben6(a)gmail.com>
Cc: stable(a)vger.kernel.org
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/client/namespace.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
index 830f2a292bb0..ec58c0e50724 100644
--- a/fs/smb/client/namespace.c
+++ b/fs/smb/client/namespace.c
@@ -144,10 +144,13 @@ static char *automount_fullpath(struct dentry *dentry, void *page)
page,
true);
}
spin_unlock(&tcon->tc_lock);
+ if (unlikely(!page))
+ return ERR_PTR(-ENOMEM);
+
s = dentry_path_raw(dentry, page, PATH_MAX);
if (IS_ERR(s))
return s;
/* for root, we want "" */
if (!s[1])
--
2.34.3
2
1
13 Jan '26
From: Wang Zhaolong <wangzhaolong1(a)huawei.com>
stable inclusion
from stable-v6.6.93
commit 73cadde98f67f76c5eba00ac0b72c453383cec8b
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9768
cve: CVE-2025-38051
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit a7a8fe56e932a36f43e031b398aef92341bf5ea0 upstream.
There is a race condition in the readdir concurrency process, which may
access the rsp buffer after it has been released, triggering the
following KASAN warning.
==================================================================
BUG: KASAN: slab-use-after-free in cifs_fill_dirent+0xb03/0xb60 [cifs]
Read of size 4 at addr ffff8880099b819c by task a.out/342975
CPU: 2 UID: 0 PID: 342975 Comm: a.out Not tainted 6.15.0-rc6+ #240 PREEMPT(full)
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.1-2.fc37 04/01/2014
Call Trace:
<TASK>
dump_stack_lvl+0x53/0x70
print_report+0xce/0x640
kasan_report+0xb8/0xf0
cifs_fill_dirent+0xb03/0xb60 [cifs]
cifs_readdir+0x12cb/0x3190 [cifs]
iterate_dir+0x1a1/0x520
__x64_sys_getdents+0x134/0x220
do_syscall_64+0x4b/0x110
entry_SYSCALL_64_after_hwframe+0x76/0x7e
RIP: 0033:0x7f996f64b9f9
Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 89 f8 48 89
f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01
f0 ff ff 0d f7 c3 0c 00 f7 d8 64 89 8
RSP: 002b:00007f996f53de78 EFLAGS: 00000207 ORIG_RAX: 000000000000004e
RAX: ffffffffffffffda RBX: 00007f996f53ecdc RCX: 00007f996f64b9f9
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000003
RBP: 00007f996f53dea0 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000207 R12: ffffffffffffff88
R13: 0000000000000000 R14: 00007ffc8cd9a500 R15: 00007f996f51e000
</TASK>
Allocated by task 408:
kasan_save_stack+0x20/0x40
kasan_save_track+0x14/0x30
__kasan_slab_alloc+0x6e/0x70
kmem_cache_alloc_noprof+0x117/0x3d0
mempool_alloc_noprof+0xf2/0x2c0
cifs_buf_get+0x36/0x80 [cifs]
allocate_buffers+0x1d2/0x330 [cifs]
cifs_demultiplex_thread+0x22b/0x2690 [cifs]
kthread+0x394/0x720
ret_from_fork+0x34/0x70
ret_from_fork_asm+0x1a/0x30
Freed by task 342979:
kasan_save_stack+0x20/0x40
kasan_save_track+0x14/0x30
kasan_save_free_info+0x3b/0x60
__kasan_slab_free+0x37/0x50
kmem_cache_free+0x2b8/0x500
cifs_buf_release+0x3c/0x70 [cifs]
cifs_readdir+0x1c97/0x3190 [cifs]
iterate_dir+0x1a1/0x520
__x64_sys_getdents64+0x134/0x220
do_syscall_64+0x4b/0x110
entry_SYSCALL_64_after_hwframe+0x76/0x7e
The buggy address belongs to the object at ffff8880099b8000
which belongs to the cache cifs_request of size 16588
The buggy address is located 412 bytes inside of
freed 16588-byte region [ffff8880099b8000, ffff8880099bc0cc)
The buggy address belongs to the physical page:
page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x99b8
head: order:3 mapcount:0 entire_mapcount:0 nr_pages_mapped:0 pincount:0
anon flags: 0x80000000000040(head|node=0|zone=1)
page_type: f5(slab)
raw: 0080000000000040 ffff888001e03400 0000000000000000 dead000000000001
raw: 0000000000000000 0000000000010001 00000000f5000000 0000000000000000
head: 0080000000000040 ffff888001e03400 0000000000000000 dead000000000001
head: 0000000000000000 0000000000010001 00000000f5000000 0000000000000000
head: 0080000000000003 ffffea0000266e01 00000000ffffffff 00000000ffffffff
head: ffffffffffffffff 0000000000000000 00000000ffffffff 0000000000000008
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8880099b8080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880099b8100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff8880099b8180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8880099b8200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880099b8280: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
POC is available in the link [1].
The problem triggering process is as follows:
Process 1 Process 2
-----------------------------------------------------------------
cifs_readdir
/* file->private_data == NULL */
initiate_cifs_search
cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
smb2_query_dir_first ->query_dir_first()
SMB2_query_directory
SMB2_query_directory_init
cifs_send_recv
smb2_parse_query_directory
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = (char *)rsp + ...
srch_inf->last_entry = (char *)rsp + ...
srch_inf->smallBuf = true;
find_cifs_entry
/* if (cfile->srch_inf.ntwrk_buf_start) */
cifs_small_buf_release(cfile->srch_inf // free
cifs_readdir ->iterate_shared()
/* file->private_data != NULL */
find_cifs_entry
/* in while (...) loop */
smb2_query_dir_next ->query_dir_next()
SMB2_query_directory
SMB2_query_directory_init
cifs_send_recv
compound_send_recv
smb_send_rqst
__smb_send_rqst
rc = -ERESTARTSYS;
/* if (fatal_signal_pending()) */
goto out;
return rc
/* if (cfile->srch_inf.last_entry) */
cifs_save_resume_key()
cifs_fill_dirent // UAF
/* if (rc) */
return -ENOENT;
Fix this by ensuring the return code is checked before using pointers
from the srch_inf.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=220131 [1]
Fixes: a364bc0b37f1 ("[CIFS] fix saving of resume key before CIFSFindNext")
Cc: stable(a)vger.kernel.org
Reviewed-by: Paulo Alcantara (Red Hat) <pc(a)manguebit.com>
Signed-off-by: Wang Zhaolong <wangzhaolong1(a)huawei.com>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/client/readdir.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index 75929a0a56f9..8a7c34158d8a 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -754,15 +754,15 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
(rc == 0) && !cfile->srch_inf.endOfSearch) {
cifs_dbg(FYI, "calling findnext2\n");
rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
search_flags,
&cfile->srch_inf);
+ if (rc)
+ return -ENOENT;
/* FindFirst/Next set last_entry to NULL on malformed reply */
if (cfile->srch_inf.last_entry)
cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
- if (rc)
- return -ENOENT;
}
if (index_to_find < cfile->srch_inf.index_of_last_entry) {
/* we found the buffer that contains the entry */
/* scan and find it */
int i;
--
2.34.3
2
1
13 Jan '26
From: Wang Zhaolong <wangzhaolong1(a)huawei.com>
stable inclusion
from stable-v6.6.93
commit 73cadde98f67f76c5eba00ac0b72c453383cec8b
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9768
cve: CVE-2025-38051
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit a7a8fe56e932a36f43e031b398aef92341bf5ea0 upstream.
There is a race condition in the readdir concurrency process, which may
access the rsp buffer after it has been released, triggering the
following KASAN warning.
==================================================================
BUG: KASAN: slab-use-after-free in cifs_fill_dirent+0xb03/0xb60 [cifs]
Read of size 4 at addr ffff8880099b819c by task a.out/342975
CPU: 2 UID: 0 PID: 342975 Comm: a.out Not tainted 6.15.0-rc6+ #240 PREEMPT(full)
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.1-2.fc37 04/01/2014
Call Trace:
<TASK>
dump_stack_lvl+0x53/0x70
print_report+0xce/0x640
kasan_report+0xb8/0xf0
cifs_fill_dirent+0xb03/0xb60 [cifs]
cifs_readdir+0x12cb/0x3190 [cifs]
iterate_dir+0x1a1/0x520
__x64_sys_getdents+0x134/0x220
do_syscall_64+0x4b/0x110
entry_SYSCALL_64_after_hwframe+0x76/0x7e
RIP: 0033:0x7f996f64b9f9
Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 89 f8 48 89
f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01
f0 ff ff 0d f7 c3 0c 00 f7 d8 64 89 8
RSP: 002b:00007f996f53de78 EFLAGS: 00000207 ORIG_RAX: 000000000000004e
RAX: ffffffffffffffda RBX: 00007f996f53ecdc RCX: 00007f996f64b9f9
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000003
RBP: 00007f996f53dea0 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000207 R12: ffffffffffffff88
R13: 0000000000000000 R14: 00007ffc8cd9a500 R15: 00007f996f51e000
</TASK>
Allocated by task 408:
kasan_save_stack+0x20/0x40
kasan_save_track+0x14/0x30
__kasan_slab_alloc+0x6e/0x70
kmem_cache_alloc_noprof+0x117/0x3d0
mempool_alloc_noprof+0xf2/0x2c0
cifs_buf_get+0x36/0x80 [cifs]
allocate_buffers+0x1d2/0x330 [cifs]
cifs_demultiplex_thread+0x22b/0x2690 [cifs]
kthread+0x394/0x720
ret_from_fork+0x34/0x70
ret_from_fork_asm+0x1a/0x30
Freed by task 342979:
kasan_save_stack+0x20/0x40
kasan_save_track+0x14/0x30
kasan_save_free_info+0x3b/0x60
__kasan_slab_free+0x37/0x50
kmem_cache_free+0x2b8/0x500
cifs_buf_release+0x3c/0x70 [cifs]
cifs_readdir+0x1c97/0x3190 [cifs]
iterate_dir+0x1a1/0x520
__x64_sys_getdents64+0x134/0x220
do_syscall_64+0x4b/0x110
entry_SYSCALL_64_after_hwframe+0x76/0x7e
The buggy address belongs to the object at ffff8880099b8000
which belongs to the cache cifs_request of size 16588
The buggy address is located 412 bytes inside of
freed 16588-byte region [ffff8880099b8000, ffff8880099bc0cc)
The buggy address belongs to the physical page:
page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x99b8
head: order:3 mapcount:0 entire_mapcount:0 nr_pages_mapped:0 pincount:0
anon flags: 0x80000000000040(head|node=0|zone=1)
page_type: f5(slab)
raw: 0080000000000040 ffff888001e03400 0000000000000000 dead000000000001
raw: 0000000000000000 0000000000010001 00000000f5000000 0000000000000000
head: 0080000000000040 ffff888001e03400 0000000000000000 dead000000000001
head: 0000000000000000 0000000000010001 00000000f5000000 0000000000000000
head: 0080000000000003 ffffea0000266e01 00000000ffffffff 00000000ffffffff
head: ffffffffffffffff 0000000000000000 00000000ffffffff 0000000000000008
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8880099b8080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880099b8100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff8880099b8180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8880099b8200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880099b8280: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
POC is available in the link [1].
The problem triggering process is as follows:
Process 1 Process 2
-----------------------------------------------------------------
cifs_readdir
/* file->private_data == NULL */
initiate_cifs_search
cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
smb2_query_dir_first ->query_dir_first()
SMB2_query_directory
SMB2_query_directory_init
cifs_send_recv
smb2_parse_query_directory
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = (char *)rsp + ...
srch_inf->last_entry = (char *)rsp + ...
srch_inf->smallBuf = true;
find_cifs_entry
/* if (cfile->srch_inf.ntwrk_buf_start) */
cifs_small_buf_release(cfile->srch_inf // free
cifs_readdir ->iterate_shared()
/* file->private_data != NULL */
find_cifs_entry
/* in while (...) loop */
smb2_query_dir_next ->query_dir_next()
SMB2_query_directory
SMB2_query_directory_init
cifs_send_recv
compound_send_recv
smb_send_rqst
__smb_send_rqst
rc = -ERESTARTSYS;
/* if (fatal_signal_pending()) */
goto out;
return rc
/* if (cfile->srch_inf.last_entry) */
cifs_save_resume_key()
cifs_fill_dirent // UAF
/* if (rc) */
return -ENOENT;
Fix this by ensuring the return code is checked before using pointers
from the srch_inf.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=220131 [1]
Fixes: a364bc0b37f1 ("[CIFS] fix saving of resume key before CIFSFindNext")
Cc: stable(a)vger.kernel.org
Reviewed-by: Paulo Alcantara (Red Hat) <pc(a)manguebit.com>
Signed-off-by: Wang Zhaolong <wangzhaolong1(a)huawei.com>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Wang Hai <wanghai38(a)huawei.com>
---
fs/smb/client/readdir.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index 75929a0a56f9..8a7c34158d8a 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -754,15 +754,15 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
(rc == 0) && !cfile->srch_inf.endOfSearch) {
cifs_dbg(FYI, "calling findnext2\n");
rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
search_flags,
&cfile->srch_inf);
+ if (rc)
+ return -ENOENT;
/* FindFirst/Next set last_entry to NULL on malformed reply */
if (cfile->srch_inf.last_entry)
cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
- if (rc)
- return -ENOENT;
}
if (index_to_find < cfile->srch_inf.index_of_last_entry) {
/* we found the buffer that contains the entry */
/* scan and find it */
int i;
--
2.34.3
2
1
[PATCH OLK-6.6] smb: client: Avoid race in open_cached_dir with lease breaks
by Wang Zhaolong 13 Jan '26
by Wang Zhaolong 13 Jan '26
13 Jan '26
From: Paul Aurich <paul(a)darkrain42.org>
mainline inclusion
from mainline-v6.15-rc6
commit 3ca02e63edccb78ef3659bebc68579c7224a6ca2
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/10178
CVE: CVE-2025-37954
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
A pre-existing valid cfid returned from find_or_create_cached_dir might
race with a lease break, meaning open_cached_dir doesn't consider it
valid, and thinks it's newly-constructed. This leaks a dentry reference
if the allocation occurs before the queued lease break work runs.
Avoid the race by extending holding the cfid_list_lock across
find_or_create_cached_dir and when the result is checked.
Cc: stable(a)vger.kernel.org
Reviewed-by: Henrique Carvalho <henrique.carvalho(a)suse.com>
Signed-off-by: Paul Aurich <paul(a)darkrain42.org>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Wang Zhaolong <wangzhaolong1(a)huawei.com>
---
fs/smb/client/cached_dir.c | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index 9c0ef4195b58..749794667295 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -27,38 +27,32 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
bool lookup_only,
__u32 max_cached_dirs)
{
struct cached_fid *cfid;
- spin_lock(&cfids->cfid_list_lock);
list_for_each_entry(cfid, &cfids->entries, entry) {
if (!strcmp(cfid->path, path)) {
/*
* If it doesn't have a lease it is either not yet
* fully cached or it may be in the process of
* being deleted due to a lease break.
*/
if (!cfid->time || !cfid->has_lease) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
kref_get(&cfid->refcount);
- spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
}
if (lookup_only) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
if (cfids->num_entries >= max_cached_dirs) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid = init_cached_dir(path);
if (cfid == NULL) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid->cfids = cfids;
cfids->num_entries++;
list_add(&cfid->entry, &cfids->entries);
@@ -72,11 +66,10 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
* Concurrent processes won't be to use it yet due to @cfid->time being
* zero.
*/
cfid->has_lease = true;
- spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
static struct dentry *
path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
@@ -183,21 +176,22 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
return -ENOMEM;
+ spin_lock(&cfids->cfid_list_lock);
cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
if (cfid == NULL) {
+ spin_unlock(&cfids->cfid_list_lock);
kfree(utf16_path);
return -ENOENT;
}
/*
* Return cached fid if it is valid (has a lease and has a time).
* Otherwise, it is either a new entry or laundromat worker removed it
* from @cfids->entries. Caller will put last reference if the latter.
*/
- spin_lock(&cfids->cfid_list_lock);
if (cfid->has_lease && cfid->time) {
spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;
kfree(utf16_path);
return 0;
--
2.34.3
2
1
Fix CVE-2025-40016
Ricardo Ribalda (1):
media: uvcvideo: Allow extra entities
Thadeu Lima de Souza Cascardo (1):
media: uvcvideo: Mark invalid entities with id UVC_INVALID_ENTITY_ID
drivers/media/usb/uvc/uvc_driver.c | 66 +++++++++++++++++++-----------
drivers/media/usb/uvc/uvcvideo.h | 9 +++-
2 files changed, 49 insertions(+), 26 deletions(-)
--
2.39.2
2
3
13 Jan '26
From: Paulo Alcantara <pc(a)manguebit.org>
mainline inclusion
from mainline-v6.17-rc1
commit d84291fc7453df7881a970716f8256273aca5747
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8680
CVE: CVE-2025-39825
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Besides sending the rename request to the server, the rename process
also involves closing any deferred close, waiting for outstanding I/O
to complete as well as marking all existing open handles as deleted to
prevent them from deferring closes, which increases the race window
for potential concurrent opens on the target file.
Fix this by unhashing the dentry in advance to prevent any concurrent
opens on the target.
Signed-off-by: Paulo Alcantara (Red Hat) <pc(a)manguebit.org>
Reviewed-by: David Howells <dhowells(a)redhat.com>
Cc: Al Viro <viro(a)zeniv.linux.org.uk>
Cc: linux-cifs(a)vger.kernel.org
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Conflicts:
fs/cifs/inode.c
fs/smb/client/inode.c
[Code mvoe to fs/smb/client]
Signed-off-by: Long Li <leo.lilong(a)huawei.com>
---
fs/cifs/inode.c | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index b11a919b9cab..3f33cdd14b1a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2069,6 +2069,7 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
+ bool rehash = false;
FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
FILE_UNIX_BASIC_INFO *info_buf_target;
unsigned int xid;
@@ -2078,6 +2079,18 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
return -EINVAL;
cifs_sb = CIFS_SB(source_dir->i_sb);
+
+ /*
+ * Prevent any concurrent opens on the target by unhashing the dentry.
+ * VFS already unhashes the target when renaming directories.
+ */
+ if (d_is_positive(target_dentry) && !d_is_dir(target_dentry)) {
+ if (!d_unhashed(target_dentry)) {
+ d_drop(target_dentry);
+ rehash = true;
+ }
+ }
+
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
@@ -2103,6 +2116,8 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
to_name);
+ if (!rc)
+ rehash = false;
/*
* No-replace is the natural behavior for CIFS, so skip unlink hacks.
@@ -2159,6 +2174,8 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
goto cifs_rename_exit;
rc = cifs_do_rename(xid, source_dentry, from_name,
target_dentry, to_name);
+ if (!rc)
+ rehash = false;
}
/* force revalidate to go get info when needed */
@@ -2168,6 +2185,8 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
target_dir->i_mtime = current_time(source_dir);
cifs_rename_exit:
+ if (rehash)
+ d_rehash(target_dentry);
kfree(info_buf_source);
kfree(from_name);
kfree(to_name);
--
2.39.2
2
1
Fix CVE-2022-50737
Edward Lo (2):
fs/ntfs3: Validate index root when initialize NTFS security
fs/ntfs3: Validate buffer length while parsing index
Konstantin Komarov (1):
fs/ntfs3: Add null pointer checks
fs/ntfs3/fsntfs.c | 18 ++++++++++++------
fs/ntfs3/index.c | 7 +++++++
2 files changed, 19 insertions(+), 6 deletions(-)
--
2.39.2
2
4
Fix CVE-2024-53179
Paulo Alcantara (1):
smb: client: fix use-after-free of signing key
Shyam Prasad N (1):
cifs: missed ref-counting smb session in find
fs/cifs/smb2proto.h | 2 --
fs/cifs/smb2transport.c | 58 ++++++++++++++++++++++++++++++-----------
2 files changed, 43 insertions(+), 17 deletions(-)
--
2.39.2
2
3
13 Jan '26
Offering: HULK
hulk inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8353
--------------------------------
[BUG]
A KASAN out-of-bounds issue was discovered during syzkaller testing:
BUG: unable to handle page fault for address: ffffed101c3f4de7 ...
......
Call Trace:
xfs_dir2_leaf_addname+0xc70/0x1060
xfs_dir_createname+0x3a6/0x4a0
xfs_create+0x6ff/0x8b0
......
[CAUSE]
The root cause is mounting a corrupted XFS image where the physical block
of a data block overlaps with the physical block of a leaf block. The
process is as follows:
1) User called getdents. Then xfs_readdir() is called to pre-read the
xfs_buf corresponding to the data block.
2) Then, when creating a file in the corresponding directory, the leaf
reuses the previous xfs_buf because of the same value of startblock and
blockcnt in record.
3) In xfs_dir2_leaf_addname(), "bestsp" is calculated by subtracting the
ltp->bestcount size from an address. At this point, bestcount is actually
xfs_dir2_data_unused->tag from the data block, which represents the offset
of the current unused space, making it easy to trigger an out-of-bounds
access on bestsp. Subsequent accesses to bestsp lead to this issue.
[FIX]
The xfs_dir3_leaf_read() function does not perform further validation on
the acquired xfs_buf. Following the logic in xfs_dir3_data_read() and
linux-mainline, apply the validation to leaf/leafn read to detect errors
early and prevent subsequent KASAN issues.
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Zizhi Wo <wozizhi(a)huawei.com>
---
fs/xfs/libxfs/xfs_dir2_leaf.c | 51 +++++++++++++++++++++++++++++++++--
1 file changed, 49 insertions(+), 2 deletions(-)
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index bb18323fd1ed..553ca8526c89 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -203,6 +203,29 @@ xfs_dir3_leaf_verify(
return xfs_dir3_leaf_check_int(mp, &leafhdr, bp->b_addr);
}
+xfs_failaddr_t
+xfs_dir3_leaf_header_check(
+ struct xfs_buf *bp,
+ xfs_ino_t owner)
+{
+ struct xfs_mount *mp = bp->b_mount;
+
+ if (xfs_has_crc(mp)) {
+ struct xfs_dir3_leaf *hdr3 = bp->b_addr;
+
+ if (hdr3->hdr.info.hdr.magic !=
+ cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) &&
+ hdr3->hdr.info.hdr.magic !=
+ cpu_to_be16(XFS_DIR3_LEAFN_MAGIC))
+ return __this_address;
+
+ if (be64_to_cpu(hdr3->hdr.info.owner) != owner)
+ return __this_address;
+ }
+
+ return NULL;
+}
+
static void
xfs_dir3_leaf_read_verify(
struct xfs_buf *bp)
@@ -269,11 +292,23 @@ xfs_dir3_leaf_read(
xfs_dablk_t fbno,
struct xfs_buf **bpp)
{
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
&xfs_dir3_leaf1_buf_ops);
- if (!err && tp && *bpp)
+ if (err || !(*bpp))
+ return err;
+
+ fa = xfs_dir3_leaf_header_check(*bpp, dp->i_ino);
+ if (fa) {
+ __xfs_buf_mark_corrupt(*bpp, fa);
+ xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
+ return -EFSCORRUPTED;
+ }
+
+ if (tp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAF1_BUF);
return err;
}
@@ -285,11 +320,23 @@ xfs_dir3_leafn_read(
xfs_dablk_t fbno,
struct xfs_buf **bpp)
{
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
&xfs_dir3_leafn_buf_ops);
- if (!err && tp && *bpp)
+ if (err || !(*bpp))
+ return err;
+
+ fa = xfs_dir3_leaf_header_check(*bpp, dp->i_ino);
+ if (fa) {
+ __xfs_buf_mark_corrupt(*bpp, fa);
+ xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
+ return -EFSCORRUPTED;
+ }
+
+ if (tp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAFN_BUF);
return err;
}
--
2.39.2
2
1
13 Jan '26
Offering: HULK
hulk inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8353
--------------------------------
[BUG]
A KASAN out-of-bounds issue was discovered during syzkaller testing:
BUG: unable to handle page fault for address: ffffed101c3f4de7 ...
......
Call Trace:
xfs_dir2_leaf_addname+0xc70/0x1060
xfs_dir_createname+0x3a6/0x4a0
xfs_create+0x6ff/0x8b0
......
[CAUSE]
The root cause is mounting a corrupted XFS image where the physical block
of a data block overlaps with the physical block of a leaf block. The
process is as follows:
1) User called getdents. Then xfs_readdir() is called to pre-read the
xfs_buf corresponding to the data block.
2) Then, when creating a file in the corresponding directory, the leaf
reuses the previous xfs_buf because of the same value of startblock and
blockcnt in record.
3) In xfs_dir2_leaf_addname(), "bestsp" is calculated by subtracting the
ltp->bestcount size from an address. At this point, bestcount is actually
xfs_dir2_data_unused->tag from the data block, which represents the offset
of the current unused space, making it easy to trigger an out-of-bounds
access on bestsp. Subsequent accesses to bestsp lead to this issue.
[FIX]
The xfs_dir3_leaf_read() function does not perform further validation on
the acquired xfs_buf. Following the logic in xfs_dir3_data_read() and
linux-mainline, apply the validation to leaf/leafn read to detect errors
early and prevent subsequent KASAN issues.
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Zizhi Wo <wozizhi(a)huawei.com>
---
fs/xfs/libxfs/xfs_dir2_leaf.c | 51 +++++++++++++++++++++++++++++++++--
1 file changed, 49 insertions(+), 2 deletions(-)
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index cb9e950a911d..57720c2274b6 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -207,6 +207,29 @@ xfs_dir3_leaf_verify(
return xfs_dir3_leaf_check_int(mp, &leafhdr, bp->b_addr, true);
}
+xfs_failaddr_t
+xfs_dir3_leaf_header_check(
+ struct xfs_buf *bp,
+ xfs_ino_t owner)
+{
+ struct xfs_mount *mp = bp->b_mount;
+
+ if (xfs_has_crc(mp)) {
+ struct xfs_dir3_leaf *hdr3 = bp->b_addr;
+
+ if (hdr3->hdr.info.hdr.magic !=
+ cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) &&
+ hdr3->hdr.info.hdr.magic !=
+ cpu_to_be16(XFS_DIR3_LEAFN_MAGIC))
+ return __this_address;
+
+ if (be64_to_cpu(hdr3->hdr.info.owner) != owner)
+ return __this_address;
+ }
+
+ return NULL;
+}
+
static void
xfs_dir3_leaf_read_verify(
struct xfs_buf *bp)
@@ -273,11 +296,23 @@ xfs_dir3_leaf_read(
xfs_dablk_t fbno,
struct xfs_buf **bpp)
{
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
&xfs_dir3_leaf1_buf_ops);
- if (!err && tp && *bpp)
+ if (err || !(*bpp))
+ return err;
+
+ fa = xfs_dir3_leaf_header_check(*bpp, dp->i_ino);
+ if (fa) {
+ __xfs_buf_mark_corrupt(*bpp, fa);
+ xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
+ return -EFSCORRUPTED;
+ }
+
+ if (tp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAF1_BUF);
return err;
}
@@ -289,11 +324,23 @@ xfs_dir3_leafn_read(
xfs_dablk_t fbno,
struct xfs_buf **bpp)
{
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
&xfs_dir3_leafn_buf_ops);
- if (!err && tp && *bpp)
+ if (err || !(*bpp))
+ return err;
+
+ fa = xfs_dir3_leaf_header_check(*bpp, dp->i_ino);
+ if (fa) {
+ __xfs_buf_mark_corrupt(*bpp, fa);
+ xfs_trans_brelse(tp, *bpp);
+ *bpp = NULL;
+ return -EFSCORRUPTED;
+ }
+
+ if (tp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAFN_BUF);
return err;
}
--
2.39.2
2
1
[PATCH OLK-6.6] net/mdiobus: Fix potential out-of-bounds read/write access
by Zhang Changzhong 12 Jan '26
by Zhang Changzhong 12 Jan '26
12 Jan '26
From: Jakub Raczynski <j.raczynski(a)samsung.com>
stable inclusion
from stable-v6.6.94
commit bab6bca0834cbb5be2a7cfe59ec6ad016ec72608
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9645
CVE: CVE-2025-38111
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 0e629694126ca388916f059453a1c36adde219c4 ]
When using publicly available tools like 'mdio-tools' to read/write data
from/to network interface and its PHY via mdiobus, there is no verification of
parameters passed to the ioctl and it accepts any mdio address.
Currently there is support for 32 addresses in kernel via PHY_MAX_ADDR define,
but it is possible to pass higher value than that via ioctl.
While read/write operation should generally fail in this case,
mdiobus provides stats array, where wrong address may allow out-of-bounds
read/write.
Fix that by adding address verification before read/write operation.
While this excludes this access from any statistics, it improves security of
read/write operation.
Fixes: 080bb352fad00 ("net: phy: Maintain MDIO device and bus statistics")
Signed-off-by: Jakub Raczynski <j.raczynski(a)samsung.com>
Reported-by: Wenjing Shan <wenjing.shan(a)samsung.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Zhang Changzhong <zhangchangzhong(a)huawei.com>
---
drivers/net/phy/mdio_bus.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6f8177a..e02706b 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -850,6 +850,9 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read)
retval = bus->read(bus, addr, regnum);
else
@@ -879,6 +882,9 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write)
err = bus->write(bus, addr, regnum, val);
else
--
2.9.5
2
1
Andrew Lunn (1):
net: mdio: C22 is now optional, EOPNOTSUPP if not provided
Jakub Raczynski (1):
net/mdiobus: Fix potential out-of-bounds read/write access
drivers/net/phy/mdio_bus.c | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
--
2.9.5
2
3
[PATCH OLK-6.6] net/mdiobus: Fix potential out-of-bounds clause 45 read/write access
by Zhang Changzhong 12 Jan '26
by Zhang Changzhong 12 Jan '26
12 Jan '26
From: Jakub Raczynski <j.raczynski(a)samsung.com>
stable inclusion
from stable-v6.6.94
commit abb0605ca00979a49572a6516f6db22c3dc57223
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9663
CVE: CVE-2025-38110
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 260388f79e94fb3026c419a208ece8358bb7b555 ]
When using publicly available tools like 'mdio-tools' to read/write data
from/to network interface and its PHY via C45 (clause 45) mdiobus,
there is no verification of parameters passed to the ioctl and
it accepts any mdio address.
Currently there is support for 32 addresses in kernel via PHY_MAX_ADDR define,
but it is possible to pass higher value than that via ioctl.
While read/write operation should generally fail in this case,
mdiobus provides stats array, where wrong address may allow out-of-bounds
read/write.
Fix that by adding address verification before C45 read/write operation.
While this excludes this access from any statistics, it improves security of
read/write operation.
Fixes: 4e4aafcddbbf ("net: mdio: Add dedicated C45 API to MDIO bus drivers")
Signed-off-by: Jakub Raczynski <j.raczynski(a)samsung.com>
Reported-by: Wenjing Shan <wenjing.shan(a)samsung.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Zhang Changzhong <zhangchangzhong(a)huawei.com>
---
drivers/net/phy/mdio_bus.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 25dcaa4..6f8177a 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -940,6 +940,9 @@ int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read_c45)
retval = bus->read_c45(bus, addr, devad, regnum);
else
@@ -971,6 +974,9 @@ int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write_c45)
err = bus->write_c45(bus, addr, devad, regnum, val);
else
--
2.9.5
2
1
[PATCH OLK-6.6] net: phy: clear phydev->devlink when the link is deleted
by Zhang Changzhong 12 Jan '26
by Zhang Changzhong 12 Jan '26
12 Jan '26
From: Wei Fang <wei.fang(a)nxp.com>
stable inclusion
from stable-v6.6.94
commit 363fdf2777423ad346d781f09548cca14877f729
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9635
CVE: CVE-2025-38149
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 0795b05a59b1371b18ffbf09d385296b12e9f5d5 ]
There is a potential crash issue when disabling and re-enabling the
network port. When disabling the network port, phy_detach() calls
device_link_del() to remove the device link, but it does not clear
phydev->devlink, so phydev->devlink is not a NULL pointer. Then the
network port is re-enabled, but if phy_attach_direct() fails before
calling device_link_add(), the code jumps to the "error" label and
calls phy_detach(). Since phydev->devlink retains the old value from
the previous attach/detach cycle, device_link_del() uses the old value,
which accesses a NULL pointer and causes a crash. The simplified crash
log is as follows.
[ 24.702421] Call trace:
[ 24.704856] device_link_put_kref+0x20/0x120
[ 24.709124] device_link_del+0x30/0x48
[ 24.712864] phy_detach+0x24/0x168
[ 24.716261] phy_attach_direct+0x168/0x3a4
[ 24.720352] phylink_fwnode_phy_connect+0xc8/0x14c
[ 24.725140] phylink_of_phy_connect+0x1c/0x34
Therefore, phydev->devlink needs to be cleared when the device link is
deleted.
Fixes: bc66fa87d4fd ("net: phy: Add link between phy dev and mac dev")
Signed-off-by: Wei Fang <wei.fang(a)nxp.com>
Reviewed-by: Andrew Lunn <andrew(a)lunn.ch>
Reviewed-by: Florian Fainelli <florian.fainelli(a)broadcom.com>
Link: https://patch.msgid.link/20250523083759.3741168-1-wei.fang@nxp.com
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Zhang Changzhong <zhangchangzhong(a)huawei.com>
---
drivers/net/phy/phy_device.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ee43f4d..14da8fdb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1833,8 +1833,10 @@ void phy_detach(struct phy_device *phydev)
struct module *ndev_owner = NULL;
struct mii_bus *bus;
- if (phydev->devlink)
+ if (phydev->devlink) {
device_link_del(phydev->devlink);
+ phydev->devlink = NULL;
+ }
if (phydev->sysfs_links) {
if (dev)
--
2.9.5
2
1
[PATCH OLK-6.6] netfilter: nft_set_pipapo: prevent overflow in lookup table allocation
by Zhang Changzhong 12 Jan '26
by Zhang Changzhong 12 Jan '26
12 Jan '26
From: Pablo Neira Ayuso <pablo(a)netfilter.org>
mainline inclusion
from mainline-v6.16-rc1
commit 4c5c6aa9967dbe55bd017bb509885928d0f31206
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9687
CVE: CVE-2025-38162
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
When calculating the lookup table size, ensure the following
multiplication does not overflow:
- desc->field_len[] maximum value is U8_MAX multiplied by
NFT_PIPAPO_GROUPS_PER_BYTE(f) that can be 2, worst case.
- NFT_PIPAPO_BUCKETS(f->bb) is 2^8, worst case.
- sizeof(unsigned long), from sizeof(*f->lt), lt in
struct nft_pipapo_field.
Then, use check_mul_overflow() to multiply by bucket size and then use
check_add_overflow() to the alignment for avx2 (if needed). Finally, add
lt_size_check_overflow() helper and use it to consolidate this.
While at it, replace leftover allocation using the GFP_KERNEL to
GFP_KERNEL_ACCOUNT for consistency, in pipapo_resize().
Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
Signed-off-by: Pablo Neira Ayuso <pablo(a)netfilter.org>
Reviewed-by: Stefano Brivio <sbrivio(a)redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo(a)netfilter.org>
Conflicts:
net/netfilter/nft_set_pipapo.c
[commit 9f439bd6ef4f, aac14d516c2b, 69e687cea79f not merged]
Signed-off-by: Zhang Changzhong <zhangchangzhong(a)huawei.com>
---
net/netfilter/nft_set_pipapo.c | 58 ++++++++++++++++++++++++++++++++----------
1 file changed, 44 insertions(+), 14 deletions(-)
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index ebd0f70..90ade86 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -610,6 +610,30 @@ static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
nft_genmask_cur(net), get_jiffies_64());
}
+
+/**
+ * lt_calculate_size() - Get storage size for lookup table with overflow check
+ * @groups: Amount of bit groups
+ * @bb: Number of bits grouped together in lookup table buckets
+ * @bsize: Size of each bucket in lookup table, in longs
+ *
+ * Return: allocation size including alignment overhead, negative on overflow
+ */
+static ssize_t lt_calculate_size(unsigned int groups, unsigned int bb,
+ unsigned int bsize)
+{
+ ssize_t ret = groups * NFT_PIPAPO_BUCKETS(bb) * sizeof(long);
+
+ if (check_mul_overflow(ret, bsize, &ret))
+ return -1;
+ if (check_add_overflow(ret, NFT_PIPAPO_ALIGN_HEADROOM, &ret))
+ return -1;
+ if (ret > INT_MAX)
+ return -1;
+
+ return ret;
+}
+
/**
* pipapo_resize() - Resize lookup or mapping table, or both
* @f: Field containing lookup and mapping tables
@@ -628,6 +652,7 @@ static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
union nft_pipapo_map_bucket *new_mt, *old_mt = f->mt;
size_t new_bucket_size, copy;
int group, bucket;
+ ssize_t lt_size;
new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
#ifdef NFT_PIPAPO_ALIGN
@@ -643,10 +668,11 @@ static int pipapo_resize(struct nft_pipapo_field *f, int old_rules, int rules)
else
copy = new_bucket_size;
- new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
- new_bucket_size * sizeof(*new_lt) +
- NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL);
+ lt_size = lt_calculate_size(f->groups, f->bb, new_bucket_size);
+ if (lt_size < 0)
+ return -ENOMEM;
+
+ new_lt = kvzalloc(lt_size, GFP_KERNEL);
if (!new_lt)
return -ENOMEM;
@@ -845,7 +871,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
{
unsigned long *new_lt;
int groups, bb;
- size_t lt_size;
+ ssize_t lt_size;
lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
sizeof(*f->lt);
@@ -855,15 +881,17 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
groups = f->groups * 2;
bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
- lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
- sizeof(*f->lt);
+ lt_size = lt_calculate_size(groups, bb, f->bsize);
+ if (lt_size < 0)
+ return;
} else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
groups = f->groups / 2;
bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
- lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
- sizeof(*f->lt);
+ lt_size = lt_calculate_size(groups, bb, f->bsize);
+ if (lt_size < 0)
+ return;
/* Don't increase group width if the resulting lookup table size
* would exceed the upper size threshold for a "small" set.
@@ -874,7 +902,7 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
return;
}
- new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL);
+ new_lt = kvzalloc(lt_size, GFP_KERNEL);
if (!new_lt)
return;
@@ -1347,13 +1375,15 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
for (i = 0; i < old->field_count; i++) {
unsigned long *new_lt;
+ ssize_t lt_size;
memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
- new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
- src->bsize * sizeof(*dst->lt) +
- NFT_PIPAPO_ALIGN_HEADROOM,
- GFP_KERNEL);
+ lt_size = lt_calculate_size(src->groups, src->bb, src->bsize);
+ if (lt_size < 0)
+ goto out_lt;
+
+ new_lt = kvzalloc(lt_size, GFP_KERNEL);
if (!new_lt)
goto out_lt;
--
2.9.5
2
1
Arjan van de Ven (2):
VFIO: Add the SPR_DSA and SPR_IAX devices to the denylist
dmaengine: idxd: add a new security check to deal with a hardware
erratum
Dave Jiang (2):
dmaengine: idxd: add per DSA wq workqueue for processing cr faults
dmaengine: idxd: Fix ->poll() return value
Fenghua Yu (1):
dmaengine: idxd: add idxd_copy_cr() to copy user completion record
during page fault handling
Harshit Mogalapalli (1):
dmaengine: idxd: Fix passing freed memory in idxd_cdev_open()
Nikhil Rao (1):
dmaengine: idxd: add a write() method for applications to submit work
Vinicius Costa Gomes (1):
dmaengine: idxd: Fix allowing write() from different address spaces
drivers/dma/idxd/cdev.c | 205 ++++++++++++++++++++++++++++++++++-
drivers/dma/idxd/idxd.h | 10 ++
drivers/dma/idxd/init.c | 6 +
drivers/dma/idxd/registers.h | 3 -
drivers/dma/idxd/sysfs.c | 28 ++++-
drivers/vfio/pci/vfio_pci.c | 2 +
include/linux/pci_ids.h | 2 +
7 files changed, 245 insertions(+), 11 deletions(-)
--
2.34.3
2
9
12 Jan '26
From: Steve French <stfrench(a)microsoft.com>
mainline inclusion
from mainline-v6.17-rc2
commit 7d34ec36abb84fdfb6632a0f2cbda90379ae21fc
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9094
CVE: CVE-2025-38728
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
With KASAN enabled, it is possible to get a slab out of bounds
during mount to ksmbd due to missing check in parse_server_interfaces()
(see below):
BUG: KASAN: slab-out-of-bounds in
parse_server_interfaces+0x14ee/0x1880 [cifs]
Read of size 4 at addr ffff8881433dba98 by task mount/9827
CPU: 5 UID: 0 PID: 9827 Comm: mount Tainted: G
OE 6.16.0-rc2-kasan #2 PREEMPT(voluntary)
Tainted: [O]=OOT_MODULE, [E]=UNSIGNED_MODULE
Hardware name: Dell Inc. Precision Tower 3620/0MWYPT,
BIOS 2.13.1 06/14/2019
Call Trace:
<TASK>
dump_stack_lvl+0x9f/0xf0
print_report+0xd1/0x670
__virt_addr_valid+0x22c/0x430
? parse_server_interfaces+0x14ee/0x1880 [cifs]
? kasan_complete_mode_report_info+0x2a/0x1f0
? parse_server_interfaces+0x14ee/0x1880 [cifs]
kasan_report+0xd6/0x110
parse_server_interfaces+0x14ee/0x1880 [cifs]
__asan_report_load_n_noabort+0x13/0x20
parse_server_interfaces+0x14ee/0x1880 [cifs]
? __pfx_parse_server_interfaces+0x10/0x10 [cifs]
? trace_hardirqs_on+0x51/0x60
SMB3_request_interfaces+0x1ad/0x3f0 [cifs]
? __pfx_SMB3_request_interfaces+0x10/0x10 [cifs]
? SMB2_tcon+0x23c/0x15d0 [cifs]
smb3_qfs_tcon+0x173/0x2b0 [cifs]
? __pfx_smb3_qfs_tcon+0x10/0x10 [cifs]
? cifs_get_tcon+0x105d/0x2120 [cifs]
? do_raw_spin_unlock+0x5d/0x200
? cifs_get_tcon+0x105d/0x2120 [cifs]
? __pfx_smb3_qfs_tcon+0x10/0x10 [cifs]
cifs_mount_get_tcon+0x369/0xb90 [cifs]
? dfs_cache_find+0xe7/0x150 [cifs]
dfs_mount_share+0x985/0x2970 [cifs]
? check_path.constprop.0+0x28/0x50
? save_trace+0x54/0x370
? __pfx_dfs_mount_share+0x10/0x10 [cifs]
? __lock_acquire+0xb82/0x2ba0
? __kasan_check_write+0x18/0x20
cifs_mount+0xbc/0x9e0 [cifs]
? __pfx_cifs_mount+0x10/0x10 [cifs]
? do_raw_spin_unlock+0x5d/0x200
? cifs_setup_cifs_sb+0x29d/0x810 [cifs]
cifs_smb3_do_mount+0x263/0x1990 [cifs]
Reported-by: Namjae Jeon <linkinjeon(a)kernel.org>
Tested-by: Namjae Jeon <linkinjeon(a)kernel.org>
Cc: stable(a)vger.kernel.org
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Conflicts:
fs/cifs/smb2ops.c
fs/smb/client/smb2ops.c
[Conflict with mainline commit aa45dadd34e4 ("cifs: change iface_list
from array to sorted linked list").]
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/cifs/smb2ops.c | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 2bc268401d91..e3f96a99a4d5 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -479,10 +479,17 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
next = le32_to_cpu(p->Next);
if (!next) {
bytes_left -= sizeof(*p);
break;
}
+ /* Validate that Next doesn't point beyond the buffer */
+ if (next > bytes_left) {
+ cifs_dbg(VFS, "%s: invalid Next pointer %zu > %zd\n",
+ __func__, next, bytes_left);
+ rc = -EINVAL;
+ goto out;
+ }
p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
bytes_left -= next;
}
if (!nb_iface) {
@@ -490,14 +497,15 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
rc = -EINVAL;
goto out;
}
/* Azure rounds the buffer size up 8, to a 16 byte boundary */
- if ((bytes_left > 8) || p->Next)
+ if ((bytes_left > 8) ||
+ (bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next)
+ + sizeof(p->Next) && p->Next))
cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
-
/*
* Second pass: extract info to internal structure
*/
*iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
--
2.34.3
2
1
12 Jan '26
From: Wang Zhaolong <wangzhaolong1(a)huawei.com>
stable inclusion
from stable-v5.10.238
commit a24c2f05ac3c5b0aaa539d9d913826d2643dfd0e
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9768
CVE: CVE-2025-38051
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit a7a8fe56e932a36f43e031b398aef92341bf5ea0 upstream.
There is a race condition in the readdir concurrency process, which may
access the rsp buffer after it has been released, triggering the
following KASAN warning.
==================================================================
BUG: KASAN: slab-use-after-free in cifs_fill_dirent+0xb03/0xb60 [cifs]
Read of size 4 at addr ffff8880099b819c by task a.out/342975
CPU: 2 UID: 0 PID: 342975 Comm: a.out Not tainted 6.15.0-rc6+ #240 PREEMPT(full)
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.1-2.fc37 04/01/2014
Call Trace:
<TASK>
dump_stack_lvl+0x53/0x70
print_report+0xce/0x640
kasan_report+0xb8/0xf0
cifs_fill_dirent+0xb03/0xb60 [cifs]
cifs_readdir+0x12cb/0x3190 [cifs]
iterate_dir+0x1a1/0x520
__x64_sys_getdents+0x134/0x220
do_syscall_64+0x4b/0x110
entry_SYSCALL_64_after_hwframe+0x76/0x7e
RIP: 0033:0x7f996f64b9f9
Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 89 f8 48 89
f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01
f0 ff ff 0d f7 c3 0c 00 f7 d8 64 89 8
RSP: 002b:00007f996f53de78 EFLAGS: 00000207 ORIG_RAX: 000000000000004e
RAX: ffffffffffffffda RBX: 00007f996f53ecdc RCX: 00007f996f64b9f9
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000003
RBP: 00007f996f53dea0 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000207 R12: ffffffffffffff88
R13: 0000000000000000 R14: 00007ffc8cd9a500 R15: 00007f996f51e000
</TASK>
Allocated by task 408:
kasan_save_stack+0x20/0x40
kasan_save_track+0x14/0x30
__kasan_slab_alloc+0x6e/0x70
kmem_cache_alloc_noprof+0x117/0x3d0
mempool_alloc_noprof+0xf2/0x2c0
cifs_buf_get+0x36/0x80 [cifs]
allocate_buffers+0x1d2/0x330 [cifs]
cifs_demultiplex_thread+0x22b/0x2690 [cifs]
kthread+0x394/0x720
ret_from_fork+0x34/0x70
ret_from_fork_asm+0x1a/0x30
Freed by task 342979:
kasan_save_stack+0x20/0x40
kasan_save_track+0x14/0x30
kasan_save_free_info+0x3b/0x60
__kasan_slab_free+0x37/0x50
kmem_cache_free+0x2b8/0x500
cifs_buf_release+0x3c/0x70 [cifs]
cifs_readdir+0x1c97/0x3190 [cifs]
iterate_dir+0x1a1/0x520
__x64_sys_getdents64+0x134/0x220
do_syscall_64+0x4b/0x110
entry_SYSCALL_64_after_hwframe+0x76/0x7e
The buggy address belongs to the object at ffff8880099b8000
which belongs to the cache cifs_request of size 16588
The buggy address is located 412 bytes inside of
freed 16588-byte region [ffff8880099b8000, ffff8880099bc0cc)
The buggy address belongs to the physical page:
page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x99b8
head: order:3 mapcount:0 entire_mapcount:0 nr_pages_mapped:0 pincount:0
anon flags: 0x80000000000040(head|node=0|zone=1)
page_type: f5(slab)
raw: 0080000000000040 ffff888001e03400 0000000000000000 dead000000000001
raw: 0000000000000000 0000000000010001 00000000f5000000 0000000000000000
head: 0080000000000040 ffff888001e03400 0000000000000000 dead000000000001
head: 0000000000000000 0000000000010001 00000000f5000000 0000000000000000
head: 0080000000000003 ffffea0000266e01 00000000ffffffff 00000000ffffffff
head: ffffffffffffffff 0000000000000000 00000000ffffffff 0000000000000008
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8880099b8080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880099b8100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff8880099b8180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8880099b8200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880099b8280: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
POC is available in the link [1].
The problem triggering process is as follows:
Process 1 Process 2
-----------------------------------------------------------------
cifs_readdir
/* file->private_data == NULL */
initiate_cifs_search
cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
smb2_query_dir_first ->query_dir_first()
SMB2_query_directory
SMB2_query_directory_init
cifs_send_recv
smb2_parse_query_directory
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = (char *)rsp + ...
srch_inf->last_entry = (char *)rsp + ...
srch_inf->smallBuf = true;
find_cifs_entry
/* if (cfile->srch_inf.ntwrk_buf_start) */
cifs_small_buf_release(cfile->srch_inf // free
cifs_readdir ->iterate_shared()
/* file->private_data != NULL */
find_cifs_entry
/* in while (...) loop */
smb2_query_dir_next ->query_dir_next()
SMB2_query_directory
SMB2_query_directory_init
cifs_send_recv
compound_send_recv
smb_send_rqst
__smb_send_rqst
rc = -ERESTARTSYS;
/* if (fatal_signal_pending()) */
goto out;
return rc
/* if (cfile->srch_inf.last_entry) */
cifs_save_resume_key()
cifs_fill_dirent // UAF
/* if (rc) */
return -ENOENT;
Fix this by ensuring the return code is checked before using pointers
from the srch_inf.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=220131 [1]
Fixes: a364bc0b37f1 ("[CIFS] fix saving of resume key before CIFSFindNext")
Cc: stable(a)vger.kernel.org
Reviewed-by: Paulo Alcantara (Red Hat) <pc(a)manguebit.com>
Signed-off-by: Wang Zhaolong <wangzhaolong1(a)huawei.com>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Wang Zhaolong <wangzhaolong1(a)huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Liu Mingrui <liumingrui(a)huawei.com>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/cifs/readdir.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 799be3a5d25e..a7a9391214c1 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -776,15 +776,15 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
(rc == 0) && !cfile->srch_inf.endOfSearch) {
cifs_dbg(FYI, "calling findnext2\n");
rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
search_flags,
&cfile->srch_inf);
+ if (rc)
+ return -ENOENT;
/* FindFirst/Next set last_entry to NULL on malformed reply */
if (cfile->srch_inf.last_entry)
cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
- if (rc)
- return -ENOENT;
}
if (index_to_find < cfile->srch_inf.index_of_last_entry) {
/* we found the buffer that contains the entry */
/* scan and find it */
int i;
--
2.34.3
2
1
[PATCH OLK-6.6] NFS: Automounted filesystems should inherit ro,noexec,nodev,sync flags
by Wang Zhaolong 12 Jan '26
by Wang Zhaolong 12 Jan '26
12 Jan '26
From: Trond Myklebust <trond.myklebust(a)hammerspace.com>
mainline inclusion
from mainline-v6.19-rc1
commit 8675c69816e4276b979ff475ee5fac4688f80125
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13299
CVE: CVE-2025-68764
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
When a filesystem is being automounted, it needs to preserve the
user-set superblock mount options, such as the "ro" flag.
Reported-by: Li Lingfeng <lilingfeng3(a)huawei.com>
Link: https://lore.kernel.org/all/20240604112636.236517-3-lilingfeng@huaweicloud.…
Fixes: f2aedb713c28 ("NFS: Add fs_context support.")
Signed-off-by: Trond Myklebust <trond.myklebust(a)hammerspace.com>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/nfs/namespace.c | 6 ++++++
fs/nfs/super.c | 4 ----
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index e7494cdd957e..40d7163bca87 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -147,10 +147,11 @@ struct vfsmount *nfs_d_automount(struct path *path)
struct nfs_fs_context *ctx;
struct fs_context *fc;
struct vfsmount *mnt = ERR_PTR(-ENOMEM);
struct nfs_server *server = NFS_SB(path->dentry->d_sb);
struct nfs_client *client = server->nfs_client;
+ unsigned long s_flags = path->dentry->d_sb->s_flags;
int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout);
int ret;
if (IS_ROOT(path->dentry))
return ERR_PTR(-ESTALE);
@@ -172,10 +173,15 @@ struct vfsmount *nfs_d_automount(struct path *path)
if (fc->net_ns != client->cl_net) {
put_net(fc->net_ns);
fc->net_ns = get_net(client->cl_net);
}
+ /* Inherit the flags covered by NFS_SB_MASK */
+ fc->sb_flags_mask |= NFS_SB_MASK;
+ fc->sb_flags &= ~NFS_SB_MASK;
+ fc->sb_flags |= s_flags & NFS_SB_MASK;
+
/* for submounts we want the same server; referrals will reassign */
memcpy(&ctx->nfs_server._address, &client->cl_addr, client->cl_addrlen);
ctx->nfs_server.addrlen = client->cl_addrlen;
ctx->nfs_server.port = server->port;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 1c2969cb907e..9fa3d17981bd 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1318,14 +1318,10 @@ int nfs_get_tree_common(struct fs_context *fc)
/* -o noac implies -o sync */
if (server->flags & NFS_MOUNT_NOAC)
fc->sb_flags |= SB_SYNCHRONOUS;
- if (ctx->clone_data.sb)
- if (ctx->clone_data.sb->s_flags & SB_SYNCHRONOUS)
- fc->sb_flags |= SB_SYNCHRONOUS;
-
/* Get a superblock - note that we may end up sharing one that already exists */
fc->s_fs_info = server;
s = sget_fc(fc, compare_super, nfs_set_super);
fc->s_fs_info = NULL;
if (IS_ERR(s)) {
--
2.34.3
2
1
[PATCH OLK-5.10] VMCI: fix race between vmci_host_setup_notify and vmci_ctx_unset_notify
by Chen Ridong 12 Jan '26
by Chen Ridong 12 Jan '26
12 Jan '26
From: Wupeng Ma <mawupeng1(a)huawei.com>
stable inclusion
from stable-v5.10.240
commit 6e3af836805ed1d7a699f76ec798626198917aa4
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9640
CVE: CVE-2025-38102
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 1bd6406fb5f36c2bb1e96e27d4c3e9f4d09edde4 ]
During our test, it is found that a warning can be trigger in try_grab_folio
as follow:
------------[ cut here ]------------
WARNING: CPU: 0 PID: 1678 at mm/gup.c:147 try_grab_folio+0x106/0x130
Modules linked in:
CPU: 0 UID: 0 PID: 1678 Comm: syz.3.31 Not tainted 6.15.0-rc5 #163 PREEMPT(undef)
RIP: 0010:try_grab_folio+0x106/0x130
Call Trace:
<TASK>
follow_huge_pmd+0x240/0x8e0
follow_pmd_mask.constprop.0.isra.0+0x40b/0x5c0
follow_pud_mask.constprop.0.isra.0+0x14a/0x170
follow_page_mask+0x1c2/0x1f0
__get_user_pages+0x176/0x950
__gup_longterm_locked+0x15b/0x1060
? gup_fast+0x120/0x1f0
gup_fast_fallback+0x17e/0x230
get_user_pages_fast+0x5f/0x80
vmci_host_unlocked_ioctl+0x21c/0xf80
RIP: 0033:0x54d2cd
---[ end trace 0000000000000000 ]---
Digging into the source, context->notify_page may init by get_user_pages_fast
and can be seen in vmci_ctx_unset_notify which will try to put_page. However
get_user_pages_fast is not finished here and lead to following
try_grab_folio warning. The race condition is shown as follow:
cpu0 cpu1
vmci_host_do_set_notify
vmci_host_setup_notify
get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
lockless_pages_from_mm
gup_pgd_range
gup_huge_pmd // update &context->notify_page
vmci_host_do_set_notify
vmci_ctx_unset_notify
notify_page = context->notify_page;
if (notify_page)
put_page(notify_page); // page is freed
__gup_longterm_locked
__get_user_pages
follow_trans_huge_pmd
try_grab_folio // warn here
To slove this, use local variable page to make notify_page can be seen
after finish get_user_pages_fast.
Fixes: a1d88436d53a ("VMCI: Fix two UVA mapping bugs")
Cc: stable <stable(a)kernel.org>
Closes: https://lore.kernel.org/all/e91da589-ad57-3969-d979-879bbd10dddd@huawei.com/
Signed-off-by: Wupeng Ma <mawupeng1(a)huawei.com>
Link: https://lore.kernel.org/r/20250510033040.901582-1-mawupeng1@huawei.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Chen Ridong <chenridong(a)huawei.com>
---
drivers/misc/vmw_vmci/vmci_host.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index cc6da9e5a542..e7965ee6bdba 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -227,6 +227,7 @@ static int drv_cp_harray_to_user(void __user *user_buf_uva,
static int vmci_host_setup_notify(struct vmci_ctx *context,
unsigned long uva)
{
+ struct page *page;
int retval;
if (context->notify_page) {
@@ -243,13 +244,11 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
/*
* Lock physical page backing a given user VA.
*/
- retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
- if (retval != 1) {
- context->notify_page = NULL;
+ retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &page);
+ if (retval != 1)
return VMCI_ERROR_GENERIC;
- }
- if (context->notify_page == NULL)
- return VMCI_ERROR_UNAVAILABLE;
+
+ context->notify_page = page;
/*
* Map the locked page and set up notify pointer.
--
2.34.1
2
1
12 Jan '26
From: Herbert Xu <herbert(a)gondor.apana.org.au>
mainline inclusion
from mainline-v6.17-rc7
commit 9574b2330dbd2b5459b74d3b5e9619d39299fc6f
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8202
CVE: CVE-2025-39931
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
If an error causes af_alg_sendmsg to abort, ctx->merge may contain
a garbage value from the previous loop. This may then trigger a
crash on the next entry into af_alg_sendmsg when it attempts to do
a merge that can't be done.
Fix this by setting ctx->merge to zero near the start of the loop.
Fixes: 8ff590903d5 ("crypto: algif_skcipher - User-space interface for skcipher operations")
Reported-by: Muhammad Alifa Ramdhan <ramdhan(a)starlabs.sg>
Reported-by: Bing-Jhong Billy Jheng <billy(a)starlabs.sg>
Signed-off-by: Herbert Xu <herbert(a)gondor.apana.org.au>
Conflicts:
crypto/af_alg.c
[Context conflicts due to commit d3dccb0a487d ("crypto: af_alg - Fix
merging of written data into spliced pages") not merge.]
Signed-off-by: Gu Bowen <gubowen5(a)huawei.com>
---
crypto/af_alg.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 755e6caf18d2..99085c6b9656 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -911,6 +911,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
continue;
}
+ ctx->merge = 0;
+
if (!af_alg_writable(sk)) {
err = af_alg_wait_for_wmem(sk, msg->msg_flags);
if (err)
--
2.43.0
2
1
12 Jan '26
From: Thomas Zimmermann <tzimmermann(a)suse.de>
stable inclusion
from stable-v6.6.117
commit b61ed8005bd3102510fab5015ac6a275c9c5ea16
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11637
CVE: CVE-2025-40360
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 14e02ed3876f4ab0ed6d3f41972175f8b8df3d70 ]
The plane state in __drm_gem_reset_shadow_plane() can be NULL. Do not
deref that pointer, but forward NULL to the other plane-reset helpers.
Clears plane->state to NULL.
v2:
- fix typo in commit description (Javier)
Signed-off-by: Thomas Zimmermann <tzimmermann(a)suse.de>
Fixes: b71565022031 ("drm/gem: Export implementation of shadow-plane helpers")
Reported-by: Dan Carpenter <dan.carpenter(a)linaro.org>
Closes: https://lore.kernel.org/dri-devel/aPIDAsHIUHp_qSW4@stanley.mountain/
Cc: Thomas Zimmermann <tzimmermann(a)suse.de>
Cc: Melissa Wen <melissa.srw(a)gmail.com>
Cc: Maarten Lankhorst <maarten.lankhorst(a)linux.intel.com>
Cc: Maxime Ripard <mripard(a)kernel.org>
Cc: David Airlie <airlied(a)gmail.com>
Cc: Simona Vetter <simona(a)ffwll.ch>
Cc: dri-devel(a)lists.freedesktop.org
Cc: <stable(a)vger.kernel.org> # v5.15+
Reviewed-by: Javier Martinez Canillas <javierm(a)redhat.com>
Link: https://patch.msgid.link/20251017091407.58488-1-tzimmermann@suse.de
[ removed drm_format_conv_state_init() call ]
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Pan Taixi <pantaixi1(a)huawei.com>
---
drivers/gpu/drm/drm_gem_atomic_helper.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 5d4b9cd077f7..e0ea3c661cb7 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -301,7 +301,11 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
- __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
+ if (shadow_plane_state) {
+ __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
+ } else {
+ __drm_atomic_helper_plane_reset(plane, NULL);
+ }
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
--
2.34.1
2
1
[PATCH OLK-6.6] binfmt_misc: restore write access before closing files opened by open_exec()
by Pan Taixi 12 Jan '26
by Pan Taixi 12 Jan '26
12 Jan '26
From: Zilin Guan <zilin(a)seu.edu.cn>
mainline inclusion
from mainline-v6.18-rc7
commit 90f601b497d76f40fa66795c3ecf625b6aced9fd
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11533
CVE: CVE-2025-68239
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
bm_register_write() opens an executable file using open_exec(), which
internally calls do_open_execat() and denies write access on the file to
avoid modification while it is being executed.
However, when an error occurs, bm_register_write() closes the file using
filp_close() directly. This does not restore the write permission, which
may cause subsequent write operations on the same file to fail.
Fix this by calling exe_file_allow_write_access() before filp_close() to
restore the write permission properly.
Fixes: e7850f4d844e ("binfmt_misc: fix possible deadlock in bm_register_write")
Signed-off-by: Zilin Guan <zilin(a)seu.edu.cn>
Link: https://patch.msgid.link/20251105022923.1813587-1-zilin@seu.edu.cn
Signed-off-by: Christian Brauner <brauner(a)kernel.org>
Conflicts:
fs/binfmt_misc.c
[Context conflicts as exe_file_allow_write_access() is introduced in commit
0357ef03c94e ("fs: don't block write during exec on pre-content watched files"),
which is not merged. Use allow_write_access() instead.]
Signed-off-by: Pan Taixi <pantaixi1(a)huawei.com>
---
fs/binfmt_misc.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index cf5ed5cd4102..a45b5ba12a9c 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -815,8 +815,10 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
inode_unlock(d_inode(root));
if (err) {
- if (f)
+ if (f) {
+ allow_write_access(f);
filp_close(f, NULL);
+ }
kfree(e);
return err;
}
--
2.34.1
2
1
[PATCH OLK-5.10] binfmt_misc: restore write access before closing files opened by open_exec()
by Pan Taixi 12 Jan '26
by Pan Taixi 12 Jan '26
12 Jan '26
From: Zilin Guan <zilin(a)seu.edu.cn>
mainline inclusion
from mainline-v6.18-rc7
commit 90f601b497d76f40fa66795c3ecf625b6aced9fd
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11533
CVE: CVE-2025-68239
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
bm_register_write() opens an executable file using open_exec(), which
internally calls do_open_execat() and denies write access on the file to
avoid modification while it is being executed.
However, when an error occurs, bm_register_write() closes the file using
filp_close() directly. This does not restore the write permission, which
may cause subsequent write operations on the same file to fail.
Fix this by calling exe_file_allow_write_access() before filp_close() to
restore the write permission properly.
Fixes: e7850f4d844e ("binfmt_misc: fix possible deadlock in bm_register_write")
Signed-off-by: Zilin Guan <zilin(a)seu.edu.cn>
Link: https://patch.msgid.link/20251105022923.1813587-1-zilin@seu.edu.cn
Signed-off-by: Christian Brauner <brauner(a)kernel.org>
Conflicts:
fs/binfmt_misc.c
[Context conflicts as exe_file_allow_write_access() is introduced in commit
0357ef03c94e ("fs: don't block write during exec on pre-content watched files"),
which is not merged. Use allow_write_access() instead.]
Signed-off-by: Pan Taixi <pantaixi1(a)huawei.com>
---
fs/binfmt_misc.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index ce0047feea72..760a270fd7d5 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -704,8 +704,10 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
inode_unlock(d_inode(root));
if (err) {
- if (f)
+ if (f) {
+ allow_write_access(f);
filp_close(f, NULL);
+ }
kfree(e);
return err;
}
--
2.34.1
2
1
[PATCH OLK-6.6] RDMA/rxe: Fix null deref on srq->rq.queue after resize failure
by Ziming Du 12 Jan '26
by Ziming Du 12 Jan '26
12 Jan '26
From: Zhu Yanjun <yanjun.zhu(a)linux.dev>
mainline inclusion
from mainline-v6.19-rc1
commit 503a5e4690ae14c18570141bc0dcf7501a8419b0
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12742
CVE: CVE-2025-68379
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
A NULL pointer dereference can occur in rxe_srq_chk_attr() when
ibv_modify_srq() is invoked twice in succession under certain error
conditions. The first call may fail in rxe_queue_resize(), which leads
rxe_srq_from_attr() to set srq->rq.queue = NULL. The second call then
triggers a crash (null deref) when accessing
srq->rq.queue->buf->index_mask.
Call Trace:
<TASK>
rxe_modify_srq+0x170/0x480 [rdma_rxe]
? __pfx_rxe_modify_srq+0x10/0x10 [rdma_rxe]
? uverbs_try_lock_object+0x4f/0xa0 [ib_uverbs]
? rdma_lookup_get_uobject+0x1f0/0x380 [ib_uverbs]
ib_uverbs_modify_srq+0x204/0x290 [ib_uverbs]
? __pfx_ib_uverbs_modify_srq+0x10/0x10 [ib_uverbs]
? tryinc_node_nr_active+0xe6/0x150
? uverbs_fill_udata+0xed/0x4f0 [ib_uverbs]
ib_uverbs_handler_UVERBS_METHOD_INVOKE_WRITE+0x2c0/0x470 [ib_uverbs]
? __pfx_ib_uverbs_handler_UVERBS_METHOD_INVOKE_WRITE+0x10/0x10 [ib_uverbs]
? uverbs_fill_udata+0xed/0x4f0 [ib_uverbs]
ib_uverbs_run_method+0x55a/0x6e0 [ib_uverbs]
? __pfx_ib_uverbs_handler_UVERBS_METHOD_INVOKE_WRITE+0x10/0x10 [ib_uverbs]
ib_uverbs_cmd_verbs+0x54d/0x800 [ib_uverbs]
? __pfx_ib_uverbs_cmd_verbs+0x10/0x10 [ib_uverbs]
? __pfx___raw_spin_lock_irqsave+0x10/0x10
? __pfx_do_vfs_ioctl+0x10/0x10
? ioctl_has_perm.constprop.0.isra.0+0x2c7/0x4c0
? __pfx_ioctl_has_perm.constprop.0.isra.0+0x10/0x10
ib_uverbs_ioctl+0x13e/0x220 [ib_uverbs]
? __pfx_ib_uverbs_ioctl+0x10/0x10 [ib_uverbs]
__x64_sys_ioctl+0x138/0x1c0
do_syscall_64+0x82/0x250
? fdget_pos+0x58/0x4c0
? ksys_write+0xf3/0x1c0
? __pfx_ksys_write+0x10/0x10
? do_syscall_64+0xc8/0x250
? __pfx_vm_mmap_pgoff+0x10/0x10
? fget+0x173/0x230
? fput+0x2a/0x80
? ksys_mmap_pgoff+0x224/0x4c0
? do_syscall_64+0xc8/0x250
? do_user_addr_fault+0x37b/0xfe0
? clear_bhb_loop+0x50/0xa0
? clear_bhb_loop+0x50/0xa0
? clear_bhb_loop+0x50/0xa0
entry_SYSCALL_64_after_hwframe+0x76/0x7e
Fixes: 8700e3e7c485 ("Soft RoCE driver")
Tested-by: Liu Yi <asatsuyu.liu(a)gmail.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu(a)linux.dev>
Link: https://patch.msgid.link/20251027215203.1321-1-yanjun.zhu@linux.dev
Signed-off-by: Leon Romanovsky <leon(a)kernel.org>
Signed-off-by: Ziming Du <duziming2(a)huawei.com>
---
drivers/infiniband/sw/rxe/rxe_srq.c | 7 +------
1 file changed, 1 insertion(+), 6 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 3661cb627d28..2a234f26ac10 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -171,7 +171,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
udata, mi, &srq->rq.producer_lock,
&srq->rq.consumer_lock);
if (err)
- goto err_free;
+ return err;
srq->rq.max_wr = attr->max_wr;
}
@@ -180,11 +180,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->limit = attr->srq_limit;
return 0;
-
-err_free:
- rxe_queue_cleanup(q);
- srq->rq.queue = NULL;
- return err;
}
void rxe_srq_cleanup(struct rxe_pool_elem *elem)
--
2.43.0
2
1
[PATCH OLK-6.6] Bluetooth: hci_sock: Prevent race in socket write iter and sock bind
by Ziming Du 12 Jan '26
by Ziming Du 12 Jan '26
12 Jan '26
From: Edward Adam Davis <eadavis(a)qq.com>
stable inclusion
from stable-v6.6.119
commit fe68510fc99bb4b88c9c611f83699749002d515a
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11646
CVE: CVE-2025-68305
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 89bb613511cc21ed5ba6bddc1c9b9ae9c0dad392 ]
There is a potential race condition between sock bind and socket write
iter. bind may free the same cmd via mgmt_pending before write iter sends
the cmd, just as syzbot reported in UAF[1].
Here we use hci_dev_lock to synchronize the two, thereby avoiding the
UAF mentioned in [1].
[1]
syzbot reported:
BUG: KASAN: slab-use-after-free in mgmt_pending_remove+0x3b/0x210 net/bluetooth/mgmt_util.c:316
Read of size 8 at addr ffff888077164818 by task syz.0.17/5989
Call Trace:
mgmt_pending_remove+0x3b/0x210 net/bluetooth/mgmt_util.c:316
set_link_security+0x5c2/0x710 net/bluetooth/mgmt.c:1918
hci_mgmt_cmd+0x9c9/0xef0 net/bluetooth/hci_sock.c:1719
hci_sock_sendmsg+0x6ca/0xef0 net/bluetooth/hci_sock.c:1839
sock_sendmsg_nosec net/socket.c:727 [inline]
__sock_sendmsg+0x21c/0x270 net/socket.c:742
sock_write_iter+0x279/0x360 net/socket.c:1195
Allocated by task 5989:
mgmt_pending_add+0x35/0x140 net/bluetooth/mgmt_util.c:296
set_link_security+0x557/0x710 net/bluetooth/mgmt.c:1910
hci_mgmt_cmd+0x9c9/0xef0 net/bluetooth/hci_sock.c:1719
hci_sock_sendmsg+0x6ca/0xef0 net/bluetooth/hci_sock.c:1839
sock_sendmsg_nosec net/socket.c:727 [inline]
__sock_sendmsg+0x21c/0x270 net/socket.c:742
sock_write_iter+0x279/0x360 net/socket.c:1195
Freed by task 5991:
mgmt_pending_free net/bluetooth/mgmt_util.c:311 [inline]
mgmt_pending_foreach+0x30d/0x380 net/bluetooth/mgmt_util.c:257
mgmt_index_removed+0x112/0x2f0 net/bluetooth/mgmt.c:9477
hci_sock_bind+0xbe9/0x1000 net/bluetooth/hci_sock.c:1314
Fixes: 6fe26f694c82 ("Bluetooth: MGMT: Protect mgmt_pending list with its own lock")
Reported-by: syzbot+9aa47cd4633a3cf92a80(a)syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=9aa47cd4633a3cf92a80
Tested-by: syzbot+9aa47cd4633a3cf92a80(a)syzkaller.appspotmail.com
Signed-off-by: Edward Adam Davis <eadavis(a)qq.com>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz(a)intel.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Ziming Du <duziming2(a)huawei.com>
---
net/bluetooth/hci_sock.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 69c2ba1e843e..4df1c91bb847 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1304,7 +1304,9 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
goto done;
}
+ hci_dev_lock(hdev);
mgmt_index_removed(hdev);
+ hci_dev_unlock(hdev);
err = hci_dev_open(hdev->id);
if (err) {
--
2.43.0
2
1
[PATCH openEuler-1.0-LTS] tpm: tpm_tis: Add the missed acpi_put_table() to fix memory leak
by Gu Bowen 12 Jan '26
by Gu Bowen 12 Jan '26
12 Jan '26
From: Hanjun Guo <guohanjun(a)huawei.com>
stable inclusion
from stable-v4.19.270
commit de667a2704ae799f697fd45cf4317623d8c79fb7
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/13141
CVE: CVE-2022-50824
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
------------------
commit db9622f762104459ff87ecdf885cc42c18053fd9 upstream.
In check_acpi_tpm2(), we get the TPM2 table just to make
sure the table is there, not used after the init, so the
acpi_put_table() should be added to release the ACPI memory.
Fixes: 4cb586a188d4 ("tpm_tis: Consolidate the platform and acpi probe flow")
Cc: stable(a)vger.kernel.org
Signed-off-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Jarkko Sakkinen <jarkko(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Gu Bowen <gubowen5(a)huawei.com>
---
drivers/char/tpm/tpm_tis.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index f08949a5f678..5460c35796fd 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -106,6 +106,7 @@ static int check_acpi_tpm2(struct device *dev)
const struct acpi_device_id *aid = acpi_match_device(tpm_acpi_tbl, dev);
struct acpi_table_tpm2 *tbl;
acpi_status st;
+ int ret = 0;
if (!aid || aid->driver_data != DEVICE_IS_TPM2)
return 0;
@@ -113,8 +114,7 @@ static int check_acpi_tpm2(struct device *dev)
/* If the ACPI TPM2 signature is matched then a global ACPI_SIG_TPM2
* table is mandatory
*/
- st =
- acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
+ st = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **)&tbl);
if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
return -EINVAL;
@@ -122,9 +122,10 @@ static int check_acpi_tpm2(struct device *dev)
/* The tpm2_crb driver handles this device */
if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
- return -ENODEV;
+ ret = -ENODEV;
- return 0;
+ acpi_put_table((struct acpi_table_header *)tbl);
+ return ret;
}
#else
static int check_acpi_tpm2(struct device *dev)
--
2.43.0
2
1
Szymon Heidrich (1):
usb: rndis_host: Secure rndis_query check against int overflow
drivers/net/usb/rndis_host.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
--
2.43.0
2
2
[PATCH OLK-6.6] ceph: fix crash in process_v2_sparse_read() for encrypted directories
by Fanhua Li 12 Jan '26
by Fanhua Li 12 Jan '26
12 Jan '26
From: Viacheslav Dubeyko <Slava.Dubeyko(a)ibm.com>
stable inclusion
from stable-v6.6.119
commit 5a3f3e39b18705bc578fae58abacc8ef93c15194
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/12614
CVE: CVE-2025-68297
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 43962db4a6f593903340c85591056a0cef812dfd upstream.
The crash in process_v2_sparse_read() for fscrypt-encrypted directories
has been reported. Issue takes place for Ceph msgr2 protocol in secure
mode. It can be reproduced by the steps:
sudo mount -t ceph :/ /mnt/cephfs/ -o name=admin,fs=cephfs,ms_mode=secure
(1) mkdir /mnt/cephfs/fscrypt-test-3
(2) cp area_decrypted.tar /mnt/cephfs/fscrypt-test-3
(3) fscrypt encrypt --source=raw_key --key=./my.key /mnt/cephfs/fscrypt-test-3
(4) fscrypt lock /mnt/cephfs/fscrypt-test-3
(5) fscrypt unlock --key=my.key /mnt/cephfs/fscrypt-test-3
(6) cat /mnt/cephfs/fscrypt-test-3/area_decrypted.tar
(7) Issue has been triggered
[ 408.072247] ------------[ cut here ]------------
[ 408.072251] WARNING: CPU: 1 PID: 392 at net/ceph/messenger_v2.c:865
ceph_con_v2_try_read+0x4b39/0x72f0
[ 408.072267] Modules linked in: intel_rapl_msr intel_rapl_common
intel_uncore_frequency_common intel_pmc_core pmt_telemetry pmt_discovery
pmt_class intel_pmc_ssram_telemetry intel_vsec kvm_intel joydev kvm irqbypass
polyval_clmulni ghash_clmulni_intel aesni_intel rapl input_leds psmouse
serio_raw i2c_piix4 vga16fb bochs vgastate i2c_smbus floppy mac_hid qemu_fw_cfg
pata_acpi sch_fq_codel rbd msr parport_pc ppdev lp parport efi_pstore
[ 408.072304] CPU: 1 UID: 0 PID: 392 Comm: kworker/1:3 Not tainted 6.17.0-rc7+
[ 408.072307] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.17.0-5.fc42 04/01/2014
[ 408.072310] Workqueue: ceph-msgr ceph_con_workfn
[ 408.072314] RIP: 0010:ceph_con_v2_try_read+0x4b39/0x72f0
[ 408.072317] Code: c7 c1 20 f0 d4 ae 50 31 d2 48 c7 c6 60 27 d5 ae 48 c7 c7 f8
8e 6f b0 68 60 38 d5 ae e8 00 47 61 fe 48 83 c4 18 e9 ac fc ff ff <0f> 0b e9 06
fe ff ff 4c 8b 9d 98 fd ff ff 0f 84 64 e7 ff ff 89 85
[ 408.072319] RSP: 0018:ffff88811c3e7a30 EFLAGS: 00010246
[ 408.072322] RAX: ffffed1024874c6f RBX: ffffea00042c2b40 RCX: 0000000000000f38
[ 408.072324] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
[ 408.072325] RBP: ffff88811c3e7ca8 R08: 0000000000000000 R09: 00000000000000c8
[ 408.072326] R10: 00000000000000c8 R11: 0000000000000000 R12: 00000000000000c8
[ 408.072327] R13: dffffc0000000000 R14: ffff8881243a6030 R15: 0000000000003000
[ 408.072329] FS: 0000000000000000(0000) GS:ffff88823eadf000(0000)
knlGS:0000000000000000
[ 408.072331] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 408.072332] CR2: 000000c0003c6000 CR3: 000000010c106005 CR4: 0000000000772ef0
[ 408.072336] PKRU: 55555554
[ 408.072337] Call Trace:
[ 408.072338] <TASK>
[ 408.072340] ? sched_clock_noinstr+0x9/0x10
[ 408.072344] ? __pfx_ceph_con_v2_try_read+0x10/0x10
[ 408.072347] ? _raw_spin_unlock+0xe/0x40
[ 408.072349] ? finish_task_switch.isra.0+0x15d/0x830
[ 408.072353] ? __kasan_check_write+0x14/0x30
[ 408.072357] ? mutex_lock+0x84/0xe0
[ 408.072359] ? __pfx_mutex_lock+0x10/0x10
[ 408.072361] ceph_con_workfn+0x27e/0x10e0
[ 408.072364] ? metric_delayed_work+0x311/0x2c50
[ 408.072367] process_one_work+0x611/0xe20
[ 408.072371] ? __kasan_check_write+0x14/0x30
[ 408.072373] worker_thread+0x7e3/0x1580
[ 408.072375] ? __pfx__raw_spin_lock_irqsave+0x10/0x10
[ 408.072378] ? __pfx_worker_thread+0x10/0x10
[ 408.072381] kthread+0x381/0x7a0
[ 408.072383] ? __pfx__raw_spin_lock_irq+0x10/0x10
[ 408.072385] ? __pfx_kthread+0x10/0x10
[ 408.072387] ? __kasan_check_write+0x14/0x30
[ 408.072389] ? recalc_sigpending+0x160/0x220
[ 408.072392] ? _raw_spin_unlock_irq+0xe/0x50
[ 408.072394] ? calculate_sigpending+0x78/0xb0
[ 408.072395] ? __pfx_kthread+0x10/0x10
[ 408.072397] ret_from_fork+0x2b6/0x380
[ 408.072400] ? __pfx_kthread+0x10/0x10
[ 408.072402] ret_from_fork_asm+0x1a/0x30
[ 408.072406] </TASK>
[ 408.072407] ---[ end trace 0000000000000000 ]---
[ 408.072418] Oops: general protection fault, probably for non-canonical
address 0xdffffc0000000000: 0000 [#1] SMP KASAN NOPTI
[ 408.072984] KASAN: null-ptr-deref in range [0x0000000000000000-
0x0000000000000007]
[ 408.073350] CPU: 1 UID: 0 PID: 392 Comm: kworker/1:3 Tainted: G W
6.17.0-rc7+ #1 PREEMPT(voluntary)
[ 408.073886] Tainted: [W]=WARN
[ 408.074042] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.17.0-5.fc42 04/01/2014
[ 408.074468] Workqueue: ceph-msgr ceph_con_workfn
[ 408.074694] RIP: 0010:ceph_msg_data_advance+0x79/0x1a80
[ 408.074976] Code: fc ff df 49 8d 77 08 48 c1 ee 03 80 3c 16 00 0f 85 07 11 00
00 48 ba 00 00 00 00 00 fc ff df 49 8b 5f 08 48 89 de 48 c1 ee 03 <0f> b6 14 16
84 d2 74 09 80 fa 03 0f 8e 0f 0e 00 00 8b 13 83 fa 03
[ 408.075884] RSP: 0018:ffff88811c3e7990 EFLAGS: 00010246
[ 408.076305] RAX: ffff8881243a6388 RBX: 0000000000000000 RCX: 0000000000000000
[ 408.076909] RDX: dffffc0000000000 RSI: 0000000000000000 RDI: ffff8881243a6378
[ 408.077466] RBP: ffff88811c3e7a20 R08: 0000000000000000 R09: 00000000000000c8
[ 408.078034] R10: ffff8881243a6388 R11: 0000000000000000 R12: ffffed1024874c71
[ 408.078575] R13: dffffc0000000000 R14: ffff8881243a6030 R15: ffff8881243a6378
[ 408.079159] FS: 0000000000000000(0000) GS:ffff88823eadf000(0000)
knlGS:0000000000000000
[ 408.079736] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 408.080039] CR2: 000000c0003c6000 CR3: 000000010c106005 CR4: 0000000000772ef0
[ 408.080376] PKRU: 55555554
[ 408.080513] Call Trace:
[ 408.080630] <TASK>
[ 408.080729] ceph_con_v2_try_read+0x49b9/0x72f0
[ 408.081115] ? __pfx_ceph_con_v2_try_read+0x10/0x10
[ 408.081348] ? _raw_spin_unlock+0xe/0x40
[ 408.081538] ? finish_task_switch.isra.0+0x15d/0x830
[ 408.081768] ? __kasan_check_write+0x14/0x30
[ 408.081986] ? mutex_lock+0x84/0xe0
[ 408.082160] ? __pfx_mutex_lock+0x10/0x10
[ 408.082343] ceph_con_workfn+0x27e/0x10e0
[ 408.082529] ? metric_delayed_work+0x311/0x2c50
[ 408.082737] process_one_work+0x611/0xe20
[ 408.082948] ? __kasan_check_write+0x14/0x30
[ 408.083156] worker_thread+0x7e3/0x1580
[ 408.083331] ? __pfx__raw_spin_lock_irqsave+0x10/0x10
[ 408.083557] ? __pfx_worker_thread+0x10/0x10
[ 408.083751] kthread+0x381/0x7a0
[ 408.083922] ? __pfx__raw_spin_lock_irq+0x10/0x10
[ 408.084139] ? __pfx_kthread+0x10/0x10
[ 408.084310] ? __kasan_check_write+0x14/0x30
[ 408.084510] ? recalc_sigpending+0x160/0x220
[ 408.084708] ? _raw_spin_unlock_irq+0xe/0x50
[ 408.084917] ? calculate_sigpending+0x78/0xb0
[ 408.085138] ? __pfx_kthread+0x10/0x10
[ 408.085335] ret_from_fork+0x2b6/0x380
[ 408.085525] ? __pfx_kthread+0x10/0x10
[ 408.085720] ret_from_fork_asm+0x1a/0x30
[ 408.085922] </TASK>
[ 408.086036] Modules linked in: intel_rapl_msr intel_rapl_common
intel_uncore_frequency_common intel_pmc_core pmt_telemetry pmt_discovery
pmt_class intel_pmc_ssram_telemetry intel_vsec kvm_intel joydev kvm irqbypass
polyval_clmulni ghash_clmulni_intel aesni_intel rapl input_leds psmouse
serio_raw i2c_piix4 vga16fb bochs vgastate i2c_smbus floppy mac_hid qemu_fw_cfg
pata_acpi sch_fq_codel rbd msr parport_pc ppdev lp parport efi_pstore
[ 408.087778] ---[ end trace 0000000000000000 ]---
[ 408.088007] RIP: 0010:ceph_msg_data_advance+0x79/0x1a80
[ 408.088260] Code: fc ff df 49 8d 77 08 48 c1 ee 03 80 3c 16 00 0f 85 07 11 00
00 48 ba 00 00 00 00 00 fc ff df 49 8b 5f 08 48 89 de 48 c1 ee 03 <0f> b6 14 16
84 d2 74 09 80 fa 03 0f 8e 0f 0e 00 00 8b 13 83 fa 03
[ 408.089118] RSP: 0018:ffff88811c3e7990 EFLAGS: 00010246
[ 408.089357] RAX: ffff8881243a6388 RBX: 0000000000000000 RCX: 0000000000000000
[ 408.089678] RDX: dffffc0000000000 RSI: 0000000000000000 RDI: ffff8881243a6378
[ 408.090020] RBP: ffff88811c3e7a20 R08: 0000000000000000 R09: 00000000000000c8
[ 408.090360] R10: ffff8881243a6388 R11: 0000000000000000 R12: ffffed1024874c71
[ 408.090687] R13: dffffc0000000000 R14: ffff8881243a6030 R15: ffff8881243a6378
[ 408.091035] FS: 0000000000000000(0000) GS:ffff88823eadf000(0000)
knlGS:0000000000000000
[ 408.091452] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 408.092015] CR2: 000000c0003c6000 CR3: 000000010c106005 CR4: 0000000000772ef0
[ 408.092530] PKRU: 55555554
[ 417.112915]
==================================================================
[ 417.113491] BUG: KASAN: slab-use-after-free in
__mutex_lock.constprop.0+0x1522/0x1610
[ 417.114014] Read of size 4 at addr ffff888124870034 by task kworker/2:0/4951
[ 417.114587] CPU: 2 UID: 0 PID: 4951 Comm: kworker/2:0 Tainted: G D W
6.17.0-rc7+ #1 PREEMPT(voluntary)
[ 417.114592] Tainted: [D]=DIE, [W]=WARN
[ 417.114593] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.17.0-5.fc42 04/01/2014
[ 417.114596] Workqueue: events handle_timeout
[ 417.114601] Call Trace:
[ 417.114602] <TASK>
[ 417.114604] dump_stack_lvl+0x5c/0x90
[ 417.114610] print_report+0x171/0x4dc
[ 417.114613] ? __pfx__raw_spin_lock_irqsave+0x10/0x10
[ 417.114617] ? kasan_complete_mode_report_info+0x80/0x220
[ 417.114621] kasan_report+0xbd/0x100
[ 417.114625] ? __mutex_lock.constprop.0+0x1522/0x1610
[ 417.114628] ? __mutex_lock.constprop.0+0x1522/0x1610
[ 417.114630] __asan_report_load4_noabort+0x14/0x30
[ 417.114633] __mutex_lock.constprop.0+0x1522/0x1610
[ 417.114635] ? queue_con_delay+0x8d/0x200
[ 417.114638] ? __pfx___mutex_lock.constprop.0+0x10/0x10
[ 417.114641] ? __send_subscribe+0x529/0xb20
[ 417.114644] __mutex_lock_slowpath+0x13/0x20
[ 417.114646] mutex_lock+0xd4/0xe0
[ 417.114649] ? __pfx_mutex_lock+0x10/0x10
[ 417.114652] ? ceph_monc_renew_subs+0x2a/0x40
[ 417.114654] ceph_con_keepalive+0x22/0x110
[ 417.114656] handle_timeout+0x6b3/0x11d0
[ 417.114659] ? _raw_spin_unlock_irq+0xe/0x50
[ 417.114662] ? __pfx_handle_timeout+0x10/0x10
[ 417.114664] ? queue_delayed_work_on+0x8e/0xa0
[ 417.114669] process_one_work+0x611/0xe20
[ 417.114672] ? __kasan_check_write+0x14/0x30
[ 417.114676] worker_thread+0x7e3/0x1580
[ 417.114678] ? __pfx__raw_spin_lock_irqsave+0x10/0x10
[ 417.114682] ? __pfx_sched_setscheduler_nocheck+0x10/0x10
[ 417.114687] ? __pfx_worker_thread+0x10/0x10
[ 417.114689] kthread+0x381/0x7a0
[ 417.114692] ? __pfx__raw_spin_lock_irq+0x10/0x10
[ 417.114694] ? __pfx_kthread+0x10/0x10
[ 417.114697] ? __kasan_check_write+0x14/0x30
[ 417.114699] ? recalc_sigpending+0x160/0x220
[ 417.114703] ? _raw_spin_unlock_irq+0xe/0x50
[ 417.114705] ? calculate_sigpending+0x78/0xb0
[ 417.114707] ? __pfx_kthread+0x10/0x10
[ 417.114710] ret_from_fork+0x2b6/0x380
[ 417.114713] ? __pfx_kthread+0x10/0x10
[ 417.114715] ret_from_fork_asm+0x1a/0x30
[ 417.114720] </TASK>
[ 417.125171] Allocated by task 2:
[ 417.125333] kasan_save_stack+0x26/0x60
[ 417.125522] kasan_save_track+0x14/0x40
[ 417.125742] kasan_save_alloc_info+0x39/0x60
[ 417.125945] __kasan_slab_alloc+0x8b/0xb0
[ 417.126133] kmem_cache_alloc_node_noprof+0x13b/0x460
[ 417.126381] copy_process+0x320/0x6250
[ 417.126595] kernel_clone+0xb7/0x840
[ 417.126792] kernel_thread+0xd6/0x120
[ 417.126995] kthreadd+0x85c/0xbe0
[ 417.127176] ret_from_fork+0x2b6/0x380
[ 417.127378] ret_from_fork_asm+0x1a/0x30
[ 417.127692] Freed by task 0:
[ 417.127851] kasan_save_stack+0x26/0x60
[ 417.128057] kasan_save_track+0x14/0x40
[ 417.128267] kasan_save_free_info+0x3b/0x60
[ 417.128491] __kasan_slab_free+0x6c/0xa0
[ 417.128708] kmem_cache_free+0x182/0x550
[ 417.128906] free_task+0xeb/0x140
[ 417.129070] __put_task_struct+0x1d2/0x4f0
[ 417.129259] __put_task_struct_rcu_cb+0x15/0x20
[ 417.129480] rcu_do_batch+0x3d3/0xe70
[ 417.129681] rcu_core+0x549/0xb30
[ 417.129839] rcu_core_si+0xe/0x20
[ 417.130005] handle_softirqs+0x160/0x570
[ 417.130190] __irq_exit_rcu+0x189/0x1e0
[ 417.130369] irq_exit_rcu+0xe/0x20
[ 417.130531] sysvec_apic_timer_interrupt+0x9f/0xd0
[ 417.130768] asm_sysvec_apic_timer_interrupt+0x1b/0x20
[ 417.131082] Last potentially related work creation:
[ 417.131305] kasan_save_stack+0x26/0x60
[ 417.131484] kasan_record_aux_stack+0xae/0xd0
[ 417.131695] __call_rcu_common+0xcd/0x14b0
[ 417.131909] call_rcu+0x31/0x50
[ 417.132071] delayed_put_task_struct+0x128/0x190
[ 417.132295] rcu_do_batch+0x3d3/0xe70
[ 417.132478] rcu_core+0x549/0xb30
[ 417.132658] rcu_core_si+0xe/0x20
[ 417.132808] handle_softirqs+0x160/0x570
[ 417.132993] __irq_exit_rcu+0x189/0x1e0
[ 417.133181] irq_exit_rcu+0xe/0x20
[ 417.133353] sysvec_apic_timer_interrupt+0x9f/0xd0
[ 417.133584] asm_sysvec_apic_timer_interrupt+0x1b/0x20
[ 417.133921] Second to last potentially related work creation:
[ 417.134183] kasan_save_stack+0x26/0x60
[ 417.134362] kasan_record_aux_stack+0xae/0xd0
[ 417.134566] __call_rcu_common+0xcd/0x14b0
[ 417.134782] call_rcu+0x31/0x50
[ 417.134929] put_task_struct_rcu_user+0x58/0xb0
[ 417.135143] finish_task_switch.isra.0+0x5d3/0x830
[ 417.135366] __schedule+0xd30/0x5100
[ 417.135534] schedule_idle+0x5a/0x90
[ 417.135712] do_idle+0x25f/0x410
[ 417.135871] cpu_startup_entry+0x53/0x70
[ 417.136053] start_secondary+0x216/0x2c0
[ 417.136233] common_startup_64+0x13e/0x141
[ 417.136894] The buggy address belongs to the object at ffff888124870000
which belongs to the cache task_struct of size 10504
[ 417.138122] The buggy address is located 52 bytes inside of
freed 10504-byte region [ffff888124870000, ffff888124872908)
[ 417.139465] The buggy address belongs to the physical page:
[ 417.140016] page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0
pfn:0x124870
[ 417.140789] head: order:3 mapcount:0 entire_mapcount:0 nr_pages_mapped:0
pincount:0
[ 417.141519] memcg:ffff88811aa20e01
[ 417.141874] anon flags:
0x17ffffc0000040(head|node=0|zone=2|lastcpupid=0x1fffff)
[ 417.142600] page_type: f5(slab)
[ 417.142922] raw: 0017ffffc0000040 ffff88810094f040 0000000000000000
dead000000000001
[ 417.143554] raw: 0000000000000000 0000000000030003 00000000f5000000
ffff88811aa20e01
[ 417.143954] head: 0017ffffc0000040 ffff88810094f040 0000000000000000
dead000000000001
[ 417.144329] head: 0000000000000000 0000000000030003 00000000f5000000
ffff88811aa20e01
[ 417.144710] head: 0017ffffc0000003 ffffea0004921c01 00000000ffffffff
00000000ffffffff
[ 417.145106] head: ffffffffffffffff 0000000000000000 00000000ffffffff
0000000000000008
[ 417.145485] page dumped because: kasan: bad access detected
[ 417.145859] Memory state around the buggy address:
[ 417.146094] ffff88812486ff00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
fc
[ 417.146439] ffff88812486ff80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
fc
[ 417.146791] >ffff888124870000: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb
fb
[ 417.147145] ^
[ 417.147387] ffff888124870080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
fb
[ 417.147751] ffff888124870100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
fb
[ 417.148123]
==================================================================
First of all, we have warning in get_bvec_at() because
cursor->total_resid contains zero value. And, finally,
we have crash in ceph_msg_data_advance() because
cursor->data is NULL. It means that get_bvec_at()
receives not initialized ceph_msg_data_cursor structure
because data is NULL and total_resid contains zero.
Moreover, we don't have likewise issue for the case of
Ceph msgr1 protocol because ceph_msg_data_cursor_init()
has been called before reading sparse data.
This patch adds calling of ceph_msg_data_cursor_init()
in the beginning of process_v2_sparse_read() with
the goal to guarantee that logic of reading sparse data
works correctly for the case of Ceph msgr2 protocol.
Cc: stable(a)vger.kernel.org
Link: https://tracker.ceph.com/issues/73152
Signed-off-by: Viacheslav Dubeyko <Slava.Dubeyko(a)ibm.com>
Reviewed-by: Ilya Dryomov <idryomov(a)gmail.com>
Signed-off-by: Ilya Dryomov <idryomov(a)gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Fanhua Li <lifanhua5(a)huawei.com>
---
net/ceph/messenger_v2.c | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index f9ed6bf6c4776..73ac1c1f3394a 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -1091,13 +1091,16 @@ static int decrypt_control_remainder(struct ceph_connection *con)
static int process_v2_sparse_read(struct ceph_connection *con,
struct page **pages, int spos)
{
- struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
+ struct ceph_msg_data_cursor cursor;
int ret;
+ ceph_msg_data_cursor_init(&cursor, con->in_msg,
+ con->in_msg->sparse_read_total);
+
for (;;) {
char *buf = NULL;
- ret = con->ops->sparse_read(con, cursor, &buf);
+ ret = con->ops->sparse_read(con, &cursor, &buf);
if (ret <= 0)
return ret;
@@ -1115,11 +1118,11 @@ static int process_v2_sparse_read(struct ceph_connection *con,
} else {
struct bio_vec bv;
- get_bvec_at(cursor, &bv);
+ get_bvec_at(&cursor, &bv);
len = min_t(int, len, bv.bv_len);
memcpy_page(bv.bv_page, bv.bv_offset,
spage, soff, len);
- ceph_msg_data_advance(cursor, len);
+ ceph_msg_data_advance(&cursor, len);
}
spos += len;
ret -= len;
--
2.43.0
2
1
12 Jan '26
From: Jiefeng Zhang <jiefeng.z.zhang(a)gmail.com>
stable inclusion
from stable-v6.6.119
commit 3be37c3c96b16462394fcb8e15e757c691377038
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11644
CVE: CVE-2025-68301
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 5ffcb7b890f61541201461580bb6622ace405aec ]
The atlantic driver can receive packets with more than MAX_SKB_FRAGS (17)
fragments when handling large multi-descriptor packets. This causes an
out-of-bounds write in skb_add_rx_frag_netmem() leading to kernel panic.
The issue occurs because the driver doesn't check the total number of
fragments before calling skb_add_rx_frag(). When a packet requires more
than MAX_SKB_FRAGS fragments, the fragment index exceeds the array bounds.
Fix by assuming there will be an extra frag if buff->len > AQ_CFG_RX_HDR_SIZE,
then all fragments are accounted for. And reusing the existing check to
prevent the overflow earlier in the code path.
This crash occurred in production with an Aquantia AQC113 10G NIC.
Stack trace from production environment:
```
RIP: 0010:skb_add_rx_frag_netmem+0x29/0xd0
Code: 90 f3 0f 1e fa 0f 1f 44 00 00 48 89 f8 41 89
ca 48 89 d7 48 63 ce 8b 90 c0 00 00 00 48 c1 e1 04 48 01 ca 48 03 90
c8 00 00 00 <48> 89 7a 30 44 89 52 3c 44 89 42 38 40 f6 c7 01 75 74 48
89 fa 83
RSP: 0018:ffffa9bec02a8d50 EFLAGS: 00010287
RAX: ffff925b22e80a00 RBX: ffff925ad38d2700 RCX:
fffffffe0a0c8000
RDX: ffff9258ea95bac0 RSI: ffff925ae0a0c800 RDI:
0000000000037a40
RBP: 0000000000000024 R08: 0000000000000000 R09:
0000000000000021
R10: 0000000000000848 R11: 0000000000000000 R12:
ffffa9bec02a8e24
R13: ffff925ad8615570 R14: 0000000000000000 R15:
ffff925b22e80a00
FS: 0000000000000000(0000)
GS:ffff925e47880000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: ffff9258ea95baf0 CR3: 0000000166022004 CR4:
0000000000f72ef0
PKRU: 55555554
Call Trace:
<IRQ>
aq_ring_rx_clean+0x175/0xe60 [atlantic]
? aq_ring_rx_clean+0x14d/0xe60 [atlantic]
? aq_ring_tx_clean+0xdf/0x190 [atlantic]
? kmem_cache_free+0x348/0x450
? aq_vec_poll+0x81/0x1d0 [atlantic]
? __napi_poll+0x28/0x1c0
? net_rx_action+0x337/0x420
```
Fixes: 6aecbba12b5c ("net: atlantic: add check for MAX_SKB_FRAGS")
Changes in v4:
- Add Fixes: tag to satisfy patch validation requirements.
Changes in v3:
- Fix by assuming there will be an extra frag if buff->len > AQ_CFG_RX_HDR_SIZE,
then all fragments are accounted for.
Signed-off-by: Jiefeng Zhang <jiefeng.z.zhang(a)gmail.com>
Link: https://patch.msgid.link/20251126032249.69358-1-jiefeng.z.zhang@gmail.com
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Fanhua Li <lifanhua5(a)huawei.com>
---
drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index f7433abd65915..3f004d08307fb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -547,6 +547,11 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
+
+ /* There will be an extra fragment */
+ if (buff->len > AQ_CFG_RX_HDR_SIZE)
+ frag_cnt++;
+
buff_ = buff;
do {
bool is_rsc_completed = true;
--
2.43.0
2
1
12 Jan '26
From: Cen Zhang <zzzccc427(a)163.com>
stable inclusion
from stable-v6.6.117
commit 932c0a4f77ac13e526fdd5b42914d29c9821d389
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11309
CVE: CVE-2025-40318
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit 09b0cd1297b4dbfe736aeaa0ceeab2265f47f772 ]
hci_cmd_sync_dequeue_once() does lookup and then cancel
the entry under two separate lock sections. Meanwhile,
hci_cmd_sync_work() can also delete the same entry,
leading to double list_del() and "UAF".
Fix this by holding cmd_sync_work_lock across both
lookup and cancel, so that the entry cannot be removed
concurrently.
Fixes: 505ea2b29592 ("Bluetooth: hci_sync: Add helper functions to manipulate cmd_sync queue")
Reported-by: Cen Zhang <zzzccc427(a)163.com>
Signed-off-by: Cen Zhang <zzzccc427(a)163.com>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz(a)intel.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Fanhua Li <lifanhua5(a)huawei.com>
---
net/bluetooth/hci_sync.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 9d31b7881af5f..16ddaf71fa6f2 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -881,11 +881,17 @@ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
{
struct hci_cmd_sync_work_entry *entry;
- entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
- if (!entry)
+ mutex_lock(&hdev->cmd_sync_work_lock);
+
+ entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ if (!entry) {
+ mutex_unlock(&hdev->cmd_sync_work_lock);
return false;
+ }
- hci_cmd_sync_cancel_entry(hdev, entry);
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+
+ mutex_unlock(&hdev->cmd_sync_work_lock);
return true;
}
--
2.43.0
2
1
[PATCH openEuler-1.0-LTS] ipv4: route: Prevent rt_bind_exception() from rebinding stale fnhe
by Li Xiasong 12 Jan '26
by Li Xiasong 12 Jan '26
12 Jan '26
From: Chuang Wang <nashuiliang(a)gmail.com>
mainline inclusion
from mainline-v6.18-rc6
commit ac1499fcd40fe06479e9b933347b837ccabc2a40
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/11640
CVE: CVE-2025-68241
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
The sit driver's packet transmission path calls: sit_tunnel_xmit() ->
update_or_create_fnhe(), which lead to fnhe_remove_oldest() being called
to delete entries exceeding FNHE_RECLAIM_DEPTH+random.
The race window is between fnhe_remove_oldest() selecting fnheX for
deletion and the subsequent kfree_rcu(). During this time, the
concurrent path's __mkroute_output() -> find_exception() can fetch the
soon-to-be-deleted fnheX, and rt_bind_exception() then binds it with a
new dst using a dst_hold(). When the original fnheX is freed via RCU,
the dst reference remains permanently leaked.
CPU 0 CPU 1
__mkroute_output()
find_exception() [fnheX]
update_or_create_fnhe()
fnhe_remove_oldest() [fnheX]
rt_bind_exception() [bind dst]
RCU callback [fnheX freed, dst leak]
This issue manifests as a device reference count leak and a warning in
dmesg when unregistering the net device:
unregister_netdevice: waiting for sitX to become free. Usage count = N
Ido Schimmel provided the simple test validation method [1].
The fix clears 'oldest->fnhe_daddr' before calling fnhe_flush_routes().
Since rt_bind_exception() checks this field, setting it to zero prevents
the stale fnhe from being reused and bound to a new dst just before it
is freed.
[1]
ip netns add ns1
ip -n ns1 link set dev lo up
ip -n ns1 address add 192.0.2.1/32 dev lo
ip -n ns1 link add name dummy1 up type dummy
ip -n ns1 route add 192.0.2.2/32 dev dummy1
ip -n ns1 link add name gretap1 up arp off type gretap \
local 192.0.2.1 remote 192.0.2.2
ip -n ns1 route add 198.51.0.0/16 dev gretap1
taskset -c 0 ip netns exec ns1 mausezahn gretap1 \
-A 198.51.100.1 -B 198.51.0.0/16 -t udp -p 1000 -c 0 -q &
taskset -c 2 ip netns exec ns1 mausezahn gretap1 \
-A 198.51.100.1 -B 198.51.0.0/16 -t udp -p 1000 -c 0 -q &
sleep 10
ip netns pids ns1 | xargs kill
ip netns del ns1
Cc: stable(a)vger.kernel.org
Fixes: 67d6d681e15b ("ipv4: make exception cache less predictible")
Signed-off-by: Chuang Wang <nashuiliang(a)gmail.com>
Reviewed-by: Ido Schimmel <idosch(a)nvidia.com>
Reviewed-by: Eric Dumazet <edumazet(a)google.com>
Link: https://patch.msgid.link/20251111064328.24440-1-nashuiliang@gmail.com
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Conflicts:
net/ipv4/route.c
[conflicts due to merged f9072e26ea8b ("ipv4: fix uninitialized warnings in fnhe_remove_oldest()")]
Signed-off-by: Li Xiasong <lixiasong1(a)huawei.com>
---
net/ipv4/route.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3b090c9c24bb..e8b506dae189 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -618,6 +618,10 @@ static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
}
}
if (oldest) {
+ /* Clear oldest->fnhe_daddr to prevent this fnhe from being
+ * rebound with new dsts in rt_bind_exception().
+ */
+ oldest->fnhe_daddr = 0;
fnhe_flush_routes(oldest);
*oldest_p = oldest->fnhe_next;
kfree_rcu(oldest, rcu);
--
2.34.1
2
1
[PATCH OLK-5.10] fs/buffer: fix use-after-free when call bh_read() helper
by Yongjian Sun 09 Jan '26
by Yongjian Sun 09 Jan '26
09 Jan '26
From: Ye Bin <yebin10(a)huawei.com>
mainline inclusion
from mainline-v6.17-rc3
commit 7375f22495e7cd1c5b3b5af9dcc4f6dffe34ce49
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/9016
CVE: CVE-2025-39691
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
There's issue as follows:
BUG: KASAN: stack-out-of-bounds in end_buffer_read_sync+0xe3/0x110
Read of size 8 at addr ffffc9000168f7f8 by task swapper/3/0
CPU: 3 UID: 0 PID: 0 Comm: swapper/3 Not tainted 6.16.0-862.14.0.6.x86_64
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
Call Trace:
<IRQ>
dump_stack_lvl+0x55/0x70
print_address_description.constprop.0+0x2c/0x390
print_report+0xb4/0x270
kasan_report+0xb8/0xf0
end_buffer_read_sync+0xe3/0x110
end_bio_bh_io_sync+0x56/0x80
blk_update_request+0x30a/0x720
scsi_end_request+0x51/0x2b0
scsi_io_completion+0xe3/0x480
? scsi_device_unbusy+0x11e/0x160
blk_complete_reqs+0x7b/0x90
handle_softirqs+0xef/0x370
irq_exit_rcu+0xa5/0xd0
sysvec_apic_timer_interrupt+0x6e/0x90
</IRQ>
Above issue happens when do ntfs3 filesystem mount, issue may happens
as follows:
mount IRQ
ntfs_fill_super
read_cache_page
do_read_cache_folio
filemap_read_folio
mpage_read_folio
do_mpage_readpage
ntfs_get_block_vbo
bh_read
submit_bh
wait_on_buffer(bh);
blk_complete_reqs
scsi_io_completion
scsi_end_request
blk_update_request
end_bio_bh_io_sync
end_buffer_read_sync
__end_buffer_read_notouch
unlock_buffer
wait_on_buffer(bh);--> return will return to caller
put_bh
--> trigger stack-out-of-bounds
In the mpage_read_folio() function, the stack variable 'map_bh' is
passed to ntfs_get_block_vbo(). Once unlock_buffer() unlocks and
wait_on_buffer() returns to continue processing, the stack variable
is likely to be reclaimed. Consequently, during the end_buffer_read_sync()
process, calling put_bh() may result in stack overrun.
If the bh is not allocated on the stack, it belongs to a folio. Freeing
a buffer head which belongs to a folio is done by drop_buffers() which
will fail to free buffers which are still locked. So it is safe to call
put_bh() before __end_buffer_read_notouch().
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Ye Bin <yebin10(a)huawei.com>
Link: https://lore.kernel.org/20250811141830.343774-1-yebin@huaweicloud.com
Reviewed-by: Matthew Wilcox (Oracle) <willy(a)infradead.org>
Signed-off-by: Christian Brauner <brauner(a)kernel.org>
Signed-off-by: Yongjian Sun <sunyongjian1(a)huawei.com>
---
fs/buffer.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/buffer.c b/fs/buffer.c
index fb8cb9993fc1..7381f0521971 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -156,8 +156,8 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
*/
void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
{
- __end_buffer_read_notouch(bh, uptodate);
put_bh(bh);
+ __end_buffer_read_notouch(bh, uptodate);
}
EXPORT_SYMBOL(end_buffer_read_sync);
--
2.39.2
2
1
[PATCH openEuler-1.0-LTS] [Huawei] sched/rt: Skip currently executing CPU in rto_next_cpu()
by Chen Jinghuang 09 Jan '26
by Chen Jinghuang 09 Jan '26
09 Jan '26
Offering: HULK
hulk inclusion
category: bugfix
bugzilla: IDC4VN
--------------------------------
CPU0 becomes overloaded when hosting a CPU-bound RT task, a non-CPU-bound
RT task, and a CFS task stuck in kernel space. When other CPUs switch from
RT to non-RT tasks, RT load balancing (LB) is triggered; with
HAVE_RT_PUSH_IPI enabled, they send IPIs to CPU0 to drive the execution
of rto_push_irq_work_func. During push_rt_task on CPU0,
if next_task->prio < rq->donor->prio, resched_curr() sets NEED_RESCHED
and after the push operation completes, CPU0 calls rto_next_cpu().
Since only CPU0 is overloaded in this scenario, rto_next_cpu() should
ideally return -1 (no further IPI needed).
However, multiple CPUs invoking tell_cpu_to_push() during LB increments
rd->rto_loop_next. Even when rd->rto_cpu is set to -1, the mismatch between
rd->rto_loop and rd->rto_loop_next forces rto_next_cpu() to restart its
search from -1. With CPU0 remaining overloaded (satisfying rt_nr_migratory
&& rt_nr_total > 1), it gets reselected, causing CPU0 to queue irq_work to
itself and send self-IPIs repeatedly. As long as CPU0 stays overloaded and
other CPUs run pull_rt_tasks(), it falls into an infinite self-IPI loop,
which triggers a CPU hardlockup due to continuous self-interrupts.
The trigging scenario is as follows:
cpu0 cpu1 cpu2
pull_rt_task
tell_cpu_to_push
<------------irq_work_queue_on
rto_push_irq_work_func
push_rt_task
resched_curr(rq) pull_rt_task
rto_next_cpu tell_cpu_to_push
<-------------------------- atomic_inc(rto_loop_next)
rd->rto_loop != next
rto_next_cpu
irq_work_queue_on
rto_push_irq_work_func
Fix redundant self-IPI by filtering the initiating CPU in rto_next_cpu().
This solution has been verified to effectively eliminate spurious self-IPIs
and prevent CPU hardlockup scenarios.
Fixes: 4bdced5c9a29 ("sched/rt: Simplify the IPI based RT balancing logic")
Suggested-by: Steven Rostedt (Google) <rostedt(a)goodmis.org>
Signed-off-by: Chen Jinghuang <chenjinghuang2(a)huawei.com>
Reviewed-by: Steven Rostedt (Google) <rostedt(a)goodmis.org>
---
kernel/sched/rt.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7f512203d9e7..4f37f97cb908 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1952,6 +1952,7 @@ static void push_rt_tasks(struct rq *rq)
*/
static int rto_next_cpu(struct root_domain *rd)
{
+ int this_cpu = smp_processor_id();
int next;
int cpu;
@@ -1970,10 +1971,13 @@ static int rto_next_cpu(struct root_domain *rd)
*/
for (;;) {
- /* When rto_cpu is -1 this acts like cpumask_first() */
- cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
+ do {
+ /* When rto_cpu is -1 this acts like cpumask_first() */
+ cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
- rd->rto_cpu = cpu;
+ rd->rto_cpu = cpu;
+ /* Do not send IPI to self */
+ } while (cpu == this_cpu);
if (cpu < nr_cpu_ids)
return cpu;
--
2.34.1
2
1
hulk inclusion
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8330
----------------------------------------------------------------------
A bitmap inconsistency issue was observed during stress tests under
mixed huge-page workloads. Ext4 reported multiple e4b bitmap check
failures like:
ext4_mb_complex_scan_group:2508: group 350, 8179 free clusters as
per group info. But got 8192 blocks
Analysis and experimentation confirmed that the issue is caused by a
race condition between page migration and bitmap modification. Although
this timing window is extremely narrow, it is still hit in practice:
folio_lock ext4_mb_load_buddy
__migrate_folio
check ref count
folio_mc_copy __filemap_get_folio
folio_try_get(folio)
......
mb_mark_used
ext4_mb_unload_buddy
__folio_migrate_mapping
folio_ref_freeze
folio_unlock
The root cause of this issue is that the fast path of load_buddy only
increments the folio's reference count, which is insufficient to prevent
concurrent folio migration. We observed that the folio migration process
acquires the folio lock. Therefore, we can determine whether to take the
fast path in load_buddy by checking the lock status. If the folio is
locked, we opt for the slow path (which acquires the lock) to close this
concurrency window.
Additionally, this change addresses the following issues:
When the DOUBLE_CHECK macro is enabled to inspect bitmap-related
issues, the following error may be triggered:
corruption in group 324 at byte 784(6272): f in copy != ff on
disk/prealloc
Analysis reveals that this is a false positive. There is a specific race
window where the bitmap and the group descriptor become momentarily
inconsistent, leading to this error report:
ext4_mb_load_buddy ext4_mb_load_buddy
__filemap_get_folio(create|lock)
folio_lock
ext4_mb_init_cache
folio_mark_uptodate
__filemap_get_folio(no lock)
......
mb_mark_used
mb_mark_used_double
mb_cmp_bitmaps
mb_set_bits(e4b->bd_bitmap)
folio_unlock
The original logic assumed that since mb_cmp_bitmaps is called when the
bitmap is newly loaded from disk, the folio lock would be sufficient to
prevent concurrent access. However, this overlooks a specific race
condition: if another process attempts to load buddy and finds the folio
is already in an uptodate state, it will immediately begin using it without
holding folio lock.
Fixes: 060913999d7a ("mm: migrate: support poisoned recover from migrate folio")
Signed-off-by: Yongjian Sun <sunyongjian1(a)huawei.com>
---
fs/ext4/mballoc.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index a196903ed109..ba5a8f2c7bff 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1697,16 +1697,17 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
/* we could use find_or_create_page(), but it locks page
* what we'd like to avoid in fast path ... */
page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
- if (page == NULL || !PageUptodate(page)) {
+ if (page == NULL || !PageUptodate(page) || PageLocked(page)) {
+ /*
+ * PageLocked is employed to detect ongoing page
+ * migrations, since concurrent migrations can lead to
+ * bitmap inconsistency. And if we are not uptodate that
+ * implies somebody just created the page but is yet to
+ * initialize it. We can drop the page reference and
+ * try to get the page with lock in both cases to avoid
+ * concurrency.
+ */
if (page)
- /*
- * drop the page reference and try
- * to get the page with lock. If we
- * are not uptodate that implies
- * somebody just created the page but
- * is yet to initialize the same. So
- * wait for it to initialize.
- */
put_page(page);
page = find_or_create_page(inode->i_mapping, pnum, gfp);
if (page) {
@@ -1747,7 +1748,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
poff = block % blocks_per_page;
page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
- if (page == NULL || !PageUptodate(page)) {
+ if (page == NULL || !PageUptodate(page) || PageLocked(page)) {
if (page)
put_page(page);
page = find_or_create_page(inode->i_mapping, pnum, gfp);
--
2.39.2
2
1
09 Jan '26
From: Namjae Jeon <linkinjeon(a)kernel.org>
mainline inclusion
from mainline-v6.18-rc1
commit c20988c21751ef67df4191e262675e231610e9ab
category: bugfix
bugzilla: https://gitcode.com/openeuler/kernel/issues/8348
Reference: https://web.git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/comm…
-------------------------------
cifs.ko request to copy overlapped range within the same file.
ksmbd is using vfs_copy_file_range for this, vfs_copy_file_range() does not
allow overlapped copying within the same file.
This patch use do_splice_direct() if offset and length are overlapped.
Signed-off-by: Namjae Jeon <linkinjeon(a)kernel.org>
Signed-off-by: Steve French <stfrench(a)microsoft.com>
Signed-off-by: Wang Zhaolong <wangzhaolong(a)huaweicloud.com>
---
fs/smb/server/vfs.c | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index fa5b7e63eb83..26f84021b2b5 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -17,10 +17,11 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/sched/xacct.h>
#include <linux/crc32c.h>
#include <linux/namei.h>
+#include <linux/splice.h>
#include "glob.h"
#include "oplock.h"
#include "connection.h"
#include "vfs.h"
@@ -1830,12 +1831,23 @@ int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
len = le32_to_cpu(chunks[i].Length);
if (src_off + len > src_file_size)
return -E2BIG;
- ret = vfs_copy_file_range(src_fp->filp, src_off,
- dst_fp->filp, dst_off, len, 0);
+ /*
+ * vfs_copy_file_range does not allow overlapped copying
+ * within the same file.
+ */
+ if (file_inode(src_fp->filp) == file_inode(dst_fp->filp) &&
+ dst_off + len > src_off &&
+ dst_off < src_off + len)
+ ret = do_splice_direct(src_fp->filp, &src_off,
+ dst_fp->filp, &dst_off,
+ min_t(size_t, len, MAX_RW_COUNT), 0);
+ else
+ ret = vfs_copy_file_range(src_fp->filp, src_off,
+ dst_fp->filp, dst_off, len, 0);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
ret = vfs_copy_file_range(src_fp->filp, src_off,
dst_fp->filp, dst_off, len,
COPY_FILE_SPLICE);
if (ret < 0)
--
2.34.3
2
1
[PATCH OLK-6.6] kernfs: Don't re-lock kernfs_root::kernfs_rwsem in kernfs_fop_readdir().
by Zizhi Wo 09 Jan '26
by Zizhi Wo 09 Jan '26
09 Jan '26
From: Sebastian Andrzej Siewior <bigeasy(a)linutronix.de>
mainline inclusion
from mainline-v6.15-rc1
commit 9aab10a0249eab4ec77c6a5e4f66442610c12a09
category: bugfix
bugzilla: https://atomgit.com/openeuler/kernel/issues/8346
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
The readdir operation iterates over all entries and invokes dir_emit()
for every entry passing kernfs_node::name as argument.
Since the name argument can change, and become invalid, the
kernfs_root::kernfs_rwsem lock should not be dropped to prevent renames
during the operation.
The lock drop around dir_emit() has been initially introduced in commit
1e5289c97bba2 ("sysfs: Cache the last sysfs_dirent to improve readdir scalability v2")
to avoid holding a global lock during a page fault. The lock drop is
wrong since the support of renames and not a big burden since the lock
is no longer global.
Don't re-acquire kernfs_root::kernfs_rwsem while copying the name to the
userpace buffer.
Acked-by: Tejun Heo <tj(a)kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy(a)linutronix.de>
Link: https://lore.kernel.org/r/20250213145023.2820193-5-bigeasy@linutronix.de
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Zizhi Wo <wozizhi(a)huawei.com>
---
fs/kernfs/dir.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 09328586d60a..521e2b09ed1b 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -1869,10 +1869,10 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
file->private_data = pos;
kernfs_get(pos);
- up_read(&root->kernfs_rwsem);
- if (!dir_emit(ctx, name, len, ino, type))
+ if (!dir_emit(ctx, name, len, ino, type)) {
+ up_read(&root->kernfs_rwsem);
return 0;
- down_read(&root->kernfs_rwsem);
+ }
}
up_read(&root->kernfs_rwsem);
file->private_data = NULL;
--
2.39.2
2
1
Fix UAF issue.
Imran Khan (1):
kernfs: remove redundant kernfs_rwsem declaration.
Minchan Kim (3):
kernfs: switch global kernfs_rwsem lock to per-fs lock
kernfs: prevent early freeing of root node
kernfs: fix NULL dereferencing in kernfs_remove
Sebastian Andrzej Siewior (1):
kernfs: Don't re-lock kernfs_root::kernfs_rwsem in
kernfs_fop_readdir().
Yushan Zhou (1):
kernfs: fix potential NULL dereference in __kernfs_remove
Zizhi Wo (1):
kernfs: Fix kabi broken in struct kernfs_root
fs/kernfs/dir.c | 132 +++++++++++++++++++++++-------------
fs/kernfs/file.c | 6 +-
fs/kernfs/inode.c | 22 +++---
fs/kernfs/kernfs-internal.h | 1 -
fs/kernfs/mount.c | 15 ++--
fs/kernfs/symlink.c | 5 +-
include/linux/kernfs.h | 2 +
7 files changed, 115 insertions(+), 68 deletions(-)
--
2.39.2
2
8
Fix CVE-2025-40242.
Andreas Gruenbacher (2):
gfs2: Add proper lockspace locking
gfs2: Fix unlikely race in gdlm_put_lock
fs/gfs2/file.c | 23 ++++++++++++-------
fs/gfs2/glock.c | 5 ++---
fs/gfs2/incore.h | 2 ++
fs/gfs2/lock_dlm.c | 56 ++++++++++++++++++++++++++++++++++------------
4 files changed, 61 insertions(+), 25 deletions(-)
--
2.39.2
2
3
08 Jan '26
From: Eric Sandeen <sandeen(a)redhat.com>
stable inclusion
from stable-v6.6.103
commit d3cc7476b89fb45b7e00874f4f56f6b928467c60
category: bugfix
bugzilla: https://atomgit.com/src-openeuler/kernel/issues/8667
CVE: CVE-2025-39835
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=…
--------------------------------
commit ae668cd567a6a7622bc813ee0bb61c42bed61ba7 upstream.
ENODATA (aka ENOATTR) has a very specific meaning in the xfs xattr code;
namely, that the requested attribute name could not be found.
However, a medium error from disk may also return ENODATA. At best,
this medium error may escape to userspace as "attribute not found"
when in fact it's an IO (disk) error.
At worst, we may oops in xfs_attr_leaf_get() when we do:
error = xfs_attr_leaf_hasname(args, &bp);
if (error == -ENOATTR) {
xfs_trans_brelse(args->trans, bp);
return error;
}
because an ENODATA/ENOATTR error from disk leaves us with a null bp,
and the xfs_trans_brelse will then null-deref it.
As discussed on the list, we really need to modify the lower level
IO functions to trap all disk errors and ensure that we don't let
unique errors like this leak up into higher xfs functions - many
like this should be remapped to EIO.
However, this patch directly addresses a reported bug in the xattr
code, and should be safe to backport to stable kernels. A larger-scope
patch to handle more unique errors at lower levels can follow later.
(Note, prior to 07120f1abdff we did not oops, but we did return the
wrong error code to userspace.)
Signed-off-by: Eric Sandeen <sandeen(a)redhat.com>
Fixes: 07120f1abdff ("xfs: Add xfs_has_attr and subroutines")
Cc: stable(a)vger.kernel.org # v5.9+
Reviewed-by: Darrick J. Wong <djwong(a)kernel.org>
Signed-off-by: Carlos Maiolino <cem(a)kernel.org>
[ Adjust context: removed metadata health tracking calls ]
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Long Li <leo.lilong(a)huawei.com>
---
fs/xfs/libxfs/xfs_attr_remote.c | 7 +++++++
fs/xfs/libxfs/xfs_da_btree.c | 6 ++++++
2 files changed, 13 insertions(+)
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 54de405cbab5..4d369876487b 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -418,6 +418,13 @@ xfs_attr_rmtval_get(
dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt,
0, &bp, &xfs_attr3_rmt_buf_ops);
+ /*
+ * ENODATA from disk implies a disk medium failure;
+ * ENODATA for xattrs means attribute not found, so
+ * disambiguate that here.
+ */
+ if (error == -ENODATA)
+ error = -EIO;
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 6b5abbcb61c6..6daf13898f33 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2676,6 +2676,12 @@ xfs_da_read_buf(
error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0,
&bp, ops);
+ /*
+ * ENODATA from disk implies a disk medium failure; ENODATA for
+ * xattrs means attribute not found, so disambiguate that here.
+ */
+ if (error == -ENODATA && whichfork == XFS_ATTR_FORK)
+ error = -EIO;
if (error)
goto out_free;
--
2.39.2
2
1
From: Hao Dongdong <doubled(a)leap-io-kernel.com>
LeapIO inclusion
category: feature
bugzilla: https://atomgit.com/openeuler/kernel/issues/8340
------------------------------------------
The LeapRAID driver provides support for LeapRAID PCIe RAID controllers,
enabling communication between the host operating system, firmware, and
hardware for efficient storage management.
The main source files are organized as follows:
leapraid_os.c:
Implements the scsi_host_template functions, PCIe device probing, and
initialization routines, integrating the driver with the Linux SCSI
subsystem.
leapraid_func.c:
Provides the core functional routines that handle low-level interactions
with the controller firmware and hardware, including interrupt handling,
topology management, and reset sequence processing, and other related
operations.
leapraid_app.c:
Implements the ioctl interface, providing user-space tools access to device
management and diagnostic operations.
leapraid_transport.c:
Interacts with the Linux SCSI transport layer to add SAS phys and ports.
leapraid_func.h:
Declares common data structures, constants, and function prototypes shared
across the driver.
leapraid.h:
Provides global constants, register mappings, and interface definitions
that facilitate communication between the driver and the controller
firmware.
The leapraid_probe function is called when the driver detects a supported
LeapRAID PCIe device. It allocates and initializes the Scsi_Host structure,
configures hardware and firmware interfaces, and registers the host adapter
with the Linux SCSI mid-layer.
After registration, the driver invokes scsi_scan_host() to initiate device
discovery. The firmware then reports discovered logical and physical
devices to the host through interrupt-driven events and synchronizes their
operational states.
leapraid_adapter is the core data structure that encapsulates all resources
and runtime state information maintained during driver operation, described
as follows:
/**
* struct leapraid_adapter - Main LeapRaid adapter structure
* @list: List head for adapter management
* @shost: SCSI host structure
* @pdev: PCI device structure
* @iomem_base: I/O memory mapped base address
* @rep_msg_host_idx: Host index for reply messages
* @mask_int: Interrupt masking flag
* @timestamp_sync_cnt: Timestamp synchronization counter
* @adapter_attr: Adapter attributes
* @mem_desc: Memory descriptor
* @driver_cmds: Driver commands
* @dynamic_task_desc: Dynamic task descriptor
* @fw_evt_s: Firmware event structure
* @notification_desc: Notification descriptor
* @reset_desc: Reset descriptor
* @scan_dev_desc: Device scan descriptor
* @access_ctrl: Access control
* @fw_log_desc: Firmware log descriptor
* @dev_topo: Device topology
* @boot_devs: Boot devices
* @smart_poll_desc: SMART polling descriptor
*/
struct leapraid_adapter {
struct list_head list;
struct Scsi_Host *shost;
struct pci_dev *pdev;
struct leapraid_reg_base __iomem *iomem_base;
u32 rep_msg_host_idx;
bool mask_int;
u32 timestamp_sync_cnt;
struct leapraid_adapter_attr adapter_attr;
struct leapraid_mem_desc mem_desc;
struct leapraid_driver_cmds driver_cmds;
struct leapraid_dynamic_task_desc dynamic_task_desc;
struct leapraid_fw_evt_struct fw_evt_s;
struct leapraid_notification_desc notification_desc;
struct leapraid_reset_desc reset_desc;
struct leapraid_scan_dev_desc scan_dev_desc;
struct leapraid_access_ctrl access_ctrl;
struct leapraid_fw_log_desc fw_log_desc;
struct leapraid_dev_topo dev_topo;
struct leapraid_boot_devs boot_devs;
struct leapraid_smart_poll_desc smart_poll_desc;
};
Signed-off-by: Hao Dongdong <doubled(a)leap-io-kernel.com>
---
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/openeuler_defconfig | 1 +
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/leapraid/Kconfig | 14 +
drivers/scsi/leapraid/Makefile | 10 +
drivers/scsi/leapraid/leapraid.h | 2070 +++++
drivers/scsi/leapraid/leapraid_app.c | 675 ++
drivers/scsi/leapraid/leapraid_func.c | 8264 ++++++++++++++++++++
drivers/scsi/leapraid/leapraid_func.h | 1423 ++++
drivers/scsi/leapraid/leapraid_os.c | 2271 ++++++
drivers/scsi/leapraid/leapraid_transport.c | 1256 +++
12 files changed, 15987 insertions(+)
create mode 100644 drivers/scsi/leapraid/Kconfig
create mode 100644 drivers/scsi/leapraid/Makefile
create mode 100644 drivers/scsi/leapraid/leapraid.h
create mode 100644 drivers/scsi/leapraid/leapraid_app.c
create mode 100644 drivers/scsi/leapraid/leapraid_func.c
create mode 100644 drivers/scsi/leapraid/leapraid_func.h
create mode 100644 drivers/scsi/leapraid/leapraid_os.c
create mode 100644 drivers/scsi/leapraid/leapraid_transport.c
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index d190cc0cb030..12a48e4f54c3 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2465,6 +2465,7 @@ CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE=y
# CONFIG_MEGARAID_LEGACY is not set
CONFIG_MEGARAID_SAS=m
CONFIG_SCSI_3SNIC_SSSRAID=m
+CONFIG_SCSI_LEAPRAID=m
CONFIG_SCSI_MPT3SAS=m
CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index fdd8d59bad01..2ef8a9d6dcbb 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2393,6 +2393,7 @@ CONFIG_SCSI_AACRAID=m
# CONFIG_MEGARAID_LEGACY is not set
CONFIG_MEGARAID_SAS=m
CONFIG_SCSI_3SNIC_SSSRAID=m
+CONFIG_SCSI_LEAPRAID=m
CONFIG_SCSI_MPT3SAS=m
CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index edec9aa0993e..528a62318a48 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -432,6 +432,7 @@ source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
source "drivers/scsi/aic94xx/Kconfig"
source "drivers/scsi/hisi_sas/Kconfig"
source "drivers/scsi/mvsas/Kconfig"
+source "drivers/scsi/leapraid/Kconfig"
config SCSI_MVUMI
tristate "Marvell UMI driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b27758db0c02..04864ff0db84 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -156,6 +156,7 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o
obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/
+obj-$(CONFIG_SCSI_LEAPRAID) += leapraid/
# This goes last, so that "real" scsi devices probe earlier
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
diff --git a/drivers/scsi/leapraid/Kconfig b/drivers/scsi/leapraid/Kconfig
new file mode 100644
index 000000000000..b539183b24a7
--- /dev/null
+++ b/drivers/scsi/leapraid/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config SCSI_LEAPRAID
+ tristate "LeapIO RAID Adapter"
+ depends on PCI && SCSI
+ select SCSI_SAS_ATTRS
+ help
+ This driver supports LeapIO PCIe-based Storage
+ and RAID controllers.
+
+ <http://www.leap-io.com>
+
+ To compile this driver as a module, choose M here: the
+ resulting kernel module will be named leapraid.
diff --git a/drivers/scsi/leapraid/Makefile b/drivers/scsi/leapraid/Makefile
new file mode 100644
index 000000000000..bdafc036cd00
--- /dev/null
+++ b/drivers/scsi/leapraid/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the LEAPRAID drivers.
+#
+
+obj-$(CONFIG_SCSI_LEAPRAID) += leapraid.o
+leapraid-objs += leapraid_func.o \
+ leapraid_os.o \
+ leapraid_transport.o \
+ leapraid_app.o
diff --git a/drivers/scsi/leapraid/leapraid.h b/drivers/scsi/leapraid/leapraid.h
new file mode 100644
index 000000000000..842810d41542
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid.h
@@ -0,0 +1,2070 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+#ifndef LEAPRAID_H
+#define LEAPRAID_H
+
+/* doorbell register definitions */
+#define LEAPRAID_DB_RESET 0x00000000
+#define LEAPRAID_DB_READY 0x10000000
+#define LEAPRAID_DB_OPERATIONAL 0x20000000
+#define LEAPRAID_DB_FAULT 0x40000000
+
+#define LEAPRAID_DB_MASK 0xF0000000
+
+#define LEAPRAID_DB_OVER_TEMPERATURE 0x2810
+
+#define LEAPRAID_DB_USED 0x08000000
+#define LEAPRAID_DB_DATA_MASK 0x0000FFFF
+#define LEAPRAID_DB_FUNC_SHIFT 24
+#define LEAPRAID_DB_ADD_DWORDS_SHIFT 16
+
+/* maximum number of retries waiting for doorbell to become ready */
+#define LEAPRAID_DB_RETRY_COUNT_MAX 10
+/* maximum number of retries waiting for doorbell to become operational */
+#define LEAPRAID_DB_WAIT_OPERATIONAL 10
+/* sleep interval (in seconds) between doorbell polls */
+#define LEAPRAID_DB_POLL_INTERVAL_S 1
+
+/* maximum number of retries waiting for host to end recovery */
+#define LEAPRAID_WAIT_SHOST_RECOVERY 30
+
+/* diagnostic register definitions */
+#define LEAPRAID_DIAG_WRITE_ENABLE 0x00000080
+#define LEAPRAID_DIAG_RESET 0x00000004
+#define LEAPRAID_DIAG_HOLD_ADAPTER_RESET 0x00000002
+
+/* interrupt status register definitions */
+#define LEAPRAID_HOST2ADAPTER_DB_STATUS 0x80000000
+#define LEAPRAID_ADAPTER2HOST_DB_STATUS 0x00000001
+
+/* the number of debug register */
+#define LEAPRAID_DEBUGLOG_SZ_MAX 16
+
+/* reply post host register defines */
+#define REP_POST_HOST_IDX_REG_CNT 16
+#define LEAPRAID_RPHI_MSIX_IDX_SHIFT 24
+
+/* vphy flags */
+#define LEAPRAID_SAS_PHYINFO_VPHY 0x00001000
+
+/* linux driver init fw */
+#define LEAPRAID_WHOINIT_LINUX_DRIVER 0x04
+
+/* rdpq array mode */
+#define LEAPRAID_ADAPTER_INIT_MSGFLG_RDPQ_ARRAY_MODE 0x01
+
+/* request description flags */
+#define LEAPRAID_REQ_DESC_FLG_SCSI_IO 0x00
+#define LEAPRAID_REQ_DESC_FLG_HPR 0x06
+#define LEAPRAID_REQ_DESC_FLG_DFLT_TYPE 0x08
+
+/* reply description flags */
+#define LEAPRAID_RPY_DESC_FLG_TYPE_MASK 0x0F
+#define LEAPRAID_RPY_DESC_FLG_SCSI_IO_SUCCESS 0x00
+#define LEAPRAID_RPY_DESC_FLG_ADDRESS_REPLY 0x01
+#define LEAPRAID_RPY_DESC_FLG_FP_SCSI_IO_SUCCESS 0x06
+#define LEAPRAID_RPY_DESC_FLG_UNUSED 0x0F
+
+/* MPI functions */
+#define LEAPRAID_FUNC_SCSIIO_REQ 0x00
+#define LEAPRAID_FUNC_SCSI_TMF 0x01
+#define LEAPRAID_FUNC_ADAPTER_INIT 0x02
+#define LEAPRAID_FUNC_GET_ADAPTER_FEATURES 0x03
+#define LEAPRAID_FUNC_CONFIG_OP 0x04
+#define LEAPRAID_FUNC_SCAN_DEV 0x06
+#define LEAPRAID_FUNC_EVENT_NOTIFY 0x07
+#define LEAPRAID_FUNC_FW_DOWNLOAD 0x09
+#define LEAPRAID_FUNC_FW_UPLOAD 0x12
+#define LEAPRAID_FUNC_RAID_ACTION 0x15
+#define LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH 0x16
+#define LEAPRAID_FUNC_SCSI_ENC_PROCESSOR 0x18
+#define LEAPRAID_FUNC_SMP_PASSTHROUGH 0x1A
+#define LEAPRAID_FUNC_SAS_IO_UNIT_CTRL 0x1B
+#define LEAPRAID_FUNC_SATA_PASSTHROUGH 0x1C
+#define LEAPRAID_FUNC_ADAPTER_UNIT_RESET 0x40
+#define LEAPRAID_FUNC_HANDSHAKE 0x42
+#define LEAPRAID_FUNC_LOGBUF_INIT 0x57
+
+/* adapter status values */
+#define LEAPRAID_ADAPTER_STATUS_MASK 0x7FFF
+#define LEAPRAID_ADAPTER_STATUS_SUCCESS 0x0000
+#define LEAPRAID_ADAPTER_STATUS_BUSY 0x0002
+#define LEAPRAID_ADAPTER_STATUS_INTERNAL_ERROR 0x0004
+#define LEAPRAID_ADAPTER_STATUS_INSUFFICIENT_RESOURCES 0x0006
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_ACTION 0x0020
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_TYPE 0x0021
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE 0x0022
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_DATA 0x0023
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_NO_DEFAULTS 0x0024
+#define LEAPRAID_ADAPTER_STATUS_CONFIG_CANT_COMMIT 0x0025
+#define LEAPRAID_ADAPTER_STATUS_SCSI_RECOVERED_ERROR 0x0040
+#define LEAPRAID_ADAPTER_STATUS_SCSI_DEVICE_NOT_THERE 0x0043
+#define LEAPRAID_ADAPTER_STATUS_SCSI_DATA_OVERRUN 0x0044
+#define LEAPRAID_ADAPTER_STATUS_SCSI_DATA_UNDERRUN 0x0045
+#define LEAPRAID_ADAPTER_STATUS_SCSI_IO_DATA_ERROR 0x0046
+#define LEAPRAID_ADAPTER_STATUS_SCSI_PROTOCOL_ERROR 0x0047
+#define LEAPRAID_ADAPTER_STATUS_SCSI_TASK_TERMINATED 0x0048
+#define LEAPRAID_ADAPTER_STATUS_SCSI_RESIDUAL_MISMATCH 0x0049
+#define LEAPRAID_ADAPTER_STATUS_SCSI_TASK_MGMT_FAILED 0x004A
+#define LEAPRAID_ADAPTER_STATUS_SCSI_ADAPTER_TERMINATED 0x004B
+#define LEAPRAID_ADAPTER_STATUS_SCSI_EXT_TERMINATED 0x004C
+
+/* sge flags */
+#define LEAPRAID_SGE_FLG_LAST_ONE 0x80
+#define LEAPRAID_SGE_FLG_EOB 0x40
+#define LEAPRAID_SGE_FLG_EOL 0x01
+#define LEAPRAID_SGE_FLG_SHIFT 24
+#define LEAPRAID_SGE_FLG_SIMPLE_ONE 0x10
+#define LEAPRAID_SGE_FLG_SYSTEM_ADDR 0x00
+#define LEAPRAID_SGE_FLG_H2C 0x04
+#define LEAPRAID_SGE_FLG_32 0x00
+#define LEAPRAID_SGE_FLG_64 0x02
+
+#define LEAPRAID_IEEE_SGE_FLG_EOL 0x40
+#define LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE 0x00
+#define LEAPRAID_IEEE_SGE_FLG_CHAIN_ONE 0x80
+#define LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR 0x00
+
+#define LEAPRAID_SGE_OFFSET_SIZE 4
+
+/* page and ext page type */
+#define LEAPRAID_CFG_PT_IO_UNIT 0x00
+#define LEAPRAID_CFG_PT_ADAPTER 0x01
+#define LEAPRAID_CFG_PT_BIOS 0x02
+#define LEAPRAID_CFG_PT_RAID_VOLUME 0x08
+#define LEAPRAID_CFG_PT_RAID_PHYSDISK 0x0A
+#define LEAPRAID_CFG_PT_EXTENDED 0x0F
+#define LEAPRAID_CFG_EXTPT_SAS_IO_UNIT 0x10
+#define LEAPRAID_CFG_EXTPT_SAS_EXP 0x11
+#define LEAPRAID_CFG_EXTPT_SAS_DEV 0x12
+#define LEAPRAID_CFG_EXTPT_SAS_PHY 0x13
+#define LEAPRAID_CFG_EXTPT_ENC 0x15
+#define LEAPRAID_CFG_EXTPT_RAID_CONFIG 0x16
+
+/* config page address */
+#define LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP 0x00000000
+#define LEAPRAID_SAS_ENC_CFG_PGAD_HDL 0x10000000
+#define LEAPRAID_SAS_DEV_CFG_PGAD_HDL 0x20000000
+#define LEAPRAID_SAS_EXP_CFG_PGAD_HDL_PHY_NUM 0x10000000
+#define LEAPRAID_SAS_EXP_CFD_PGAD_HDL 0x20000000
+#define LEAPRAID_SAS_EXP_CFG_PGAD_PHYNUM_SHIFT 16
+#define LEAPRAID_RAID_VOL_CFG_PGAD_HDL 0x10000000
+#define LEAPRAID_SAS_PHY_CFG_PGAD_PHY_NUMBER 0x00000000
+#define LEAPRAID_PHYSDISK_CFG_PGAD_PHYSDISKNUM 0x10000000
+
+/* config page operations */
+#define LEAPRAID_CFG_ACT_PAGE_HEADER 0x00
+#define LEAPRAID_CFG_ACT_PAGE_READ_CUR 0x01
+#define LEAPRAID_CFG_ACT_PAGE_WRITE_CUR 0x02
+
+/* bios pages */
+#define LEAPRAID_CFG_PAGE_NUM_BIOS2 0x2
+#define LEAPRAID_CFG_PAGE_NUM_BIOS3 0x3
+
+/* sas device pages */
+#define LEAPRAID_CFG_PAGE_NUM_DEV0 0x0
+
+/* sas device page 0 flags */
+#define LEAPRAID_SAS_DEV_P0_FLG_FP_CAP 0x2000
+#define LEAPRAID_SAS_DEV_P0_FLG_SATA_SMART 0x0040
+#define LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID 0x0002
+#define LEAPRAID_SAS_DEV_P0_FLG_DEV_PRESENT 0x0001
+
+/* sas IO unit pages */
+#define LEAPRAID_CFG_PAGE_NUM_IOUNIT0 0x0
+#define LEAPRAID_CFG_PAGE_NUM_IOUNIT1 0x1
+
+/* sas expander pages */
+#define LEAPRAID_CFG_PAGE_NUM_EXP0 0x0
+#define LEAPRAID_CFG_PAGE_NUM_EXP1 0x1
+
+/* sas enclosure page */
+#define LEAPRAID_CFG_PAGE_NUM_ENC0 0x0
+
+/* sas phy page */
+#define LEAPRAID_CFG_PAGE_NUM_PHY0 0x0
+
+/* raid volume pages */
+#define LEAPRAID_CFG_PAGE_NUM_VOL0 0x0
+#define LEAPRAID_CFG_PAGE_NUM_VOL1 0x1
+
+/* physical disk page */
+#define LEAPRAID_CFG_PAGE_NUM_PD0 0x0
+
+/* adapter page */
+#define LEAPRAID_CFG_PAGE_NUM_ADAPTER1 0x1
+
+#define LEAPRAID_CFG_UNIT_SIZE 4
+
+/* raid volume type and state */
+#define LEAPRAID_VOL_STATE_MISSING 0x00
+#define LEAPRAID_VOL_STATE_FAILED 0x01
+#define LEAPRAID_VOL_STATE_INITIALIZING 0x02
+#define LEAPRAID_VOL_STATE_ONLINE 0x03
+#define LEAPRAID_VOL_STATE_DEGRADED 0x04
+#define LEAPRAID_VOL_STATE_OPTIMAL 0x05
+#define LEAPRAID_VOL_TYPE_RAID0 0x00
+#define LEAPRAID_VOL_TYPE_RAID1E 0x01
+#define LEAPRAID_VOL_TYPE_RAID1 0x02
+#define LEAPRAID_VOL_TYPE_RAID10 0x05
+#define LEAPRAID_VOL_TYPE_UNKNOWN 0xFF
+
+/* raid volume element flags */
+#define LEAPRAID_RAIDCFG_P0_EFLG_MASK_ELEMENT_TYPE 0x000F
+#define LEAPRAID_RAIDCFG_P0_EFLG_VOL_PHYS_DISK_ELEMENT 0x0001
+#define LEAPRAID_RAIDCFG_P0_EFLG_HOT_SPARE_ELEMENT 0x0002
+#define LEAPRAID_RAIDCFG_P0_EFLG_OCE_ELEMENT 0x0003
+
+/* raid action */
+#define LEAPRAID_RAID_ACT_SYSTEM_SHUTDOWN_INITIATED 0x20
+#define LEAPRAID_RAID_ACT_PHYSDISK_HIDDEN 0x24
+
+/* sas negotiated link rates */
+#define LEAPRAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL 0x0F
+#define LEAPRAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE 0x00
+#define LEAPRAID_SAS_NEG_LINK_RATE_PHY_DISABLED 0x01
+#define LEAPRAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED 0x02
+#define LEAPRAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE 0x03
+#define LEAPRAID_SAS_NEG_LINK_RATE_PORT_SELECTOR 0x04
+#define LEAPRAID_SAS_NEG_LINK_RATE_SMP_RESETTING 0x05
+
+#define LEAPRAID_SAS_NEG_LINK_RATE_1_5 0x08
+#define LEAPRAID_SAS_NEG_LINK_RATE_3_0 0x09
+#define LEAPRAID_SAS_NEG_LINK_RATE_6_0 0x0A
+#define LEAPRAID_SAS_NEG_LINK_RATE_12_0 0x0B
+
+#define LEAPRAID_SAS_PRATE_MIN_RATE_MASK 0x0F
+#define LEAPRAID_SAS_HWRATE_MIN_RATE_MASK 0x0F
+
+/* scsi IO control bits */
+#define LEAPRAID_SCSIIO_CTRL_ADDCDBLEN_SHIFT 26
+#define LEAPRAID_SCSIIO_CTRL_NODATATRANSFER 0x00000000
+#define LEAPRAID_SCSIIO_CTRL_WRITE 0x01000000
+#define LEAPRAID_SCSIIO_CTRL_READ 0x02000000
+#define LEAPRAID_SCSIIO_CTRL_BIDIRECTIONAL 0x03000000
+#define LEAPRAID_SCSIIO_CTRL_SIMPLEQ 0x00000000
+#define LEAPRAID_SCSIIO_CTRL_ORDEREDQ 0x00000200
+#define LEAPRAID_SCSIIO_CTRL_CMDPRI 0x00000800
+
+/* scsi state and status */
+#define LEAPRAID_SCSI_STATUS_BUSY 0x08
+#define LEAPRAID_SCSI_STATUS_RESERVATION_CONFLICT 0x18
+#define LEAPRAID_SCSI_STATUS_TASK_SET_FULL 0x28
+
+#define LEAPRAID_SCSI_STATE_RESPONSE_INFO_VALID 0x10
+#define LEAPRAID_SCSI_STATE_TERMINATED 0x08
+#define LEAPRAID_SCSI_STATE_NO_SCSI_STATUS 0x04
+#define LEAPRAID_SCSI_STATE_AUTOSENSE_FAILED 0x02
+#define LEAPRAID_SCSI_STATE_AUTOSENSE_VALID 0x01
+
+/* scsi task management defines */
+#define LEAPRAID_TM_TASKTYPE_ABORT_TASK 0x01
+#define LEAPRAID_TM_TASKTYPE_ABRT_TASK_SET 0x02
+#define LEAPRAID_TM_TASKTYPE_TARGET_RESET 0x03
+#define LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET 0x05
+#define LEAPRAID_TM_TASKTYPE_CLEAR_TASK_SET 0x06
+#define LEAPRAID_TM_TASKTYPE_QUERY_TASK 0x07
+#define LEAPRAID_TM_TASKTYPE_CLEAR_ACA 0x08
+#define LEAPRAID_TM_TASKTYPE_QUERY_TASK_SET 0x09
+#define LEAPRAID_TM_TASKTYPE_QUERY_ASYNC_EVENT 0x0A
+
+#define LEAPRAID_TM_MSGFLAGS_LINK_RESET 0x00
+#define LEAPRAID_TM_RSP_INVALID_FRAME 0x02
+#define LEAPRAID_TM_RSP_TM_SUCCEEDED 0x08
+#define LEAPRAID_TM_RSP_IO_QUEUED_ON_ADAPTER 0x80
+
+/* scsi sep request defines */
+#define LEAPRAID_SEP_REQ_ACT_WRITE_STATUS 0x00
+#define LEAPRAID_SEP_REQ_FLG_DEVHDL_ADDRESS 0x00
+#define LEAPRAID_SEP_REQ_FLG_ENCLOSURE_SLOT_ADDRESS 0x01
+#define LEAPRAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT 0x00000040
+
+/* the capabilities of the adapter */
+#define LEAPRAID_ADAPTER_FEATURES_CAP_ATOMIC_REQ 0x00080000
+#define LEAPRAID_ADAPTER_FEATURES_CAP_RDPQ_ARRAY_CAPABLE 0x00040000
+#define LEAPRAID_ADAPTER_FEATURES_CAP_EVENT_REPLAY 0x00002000
+#define LEAPRAID_ADAPTER_FEATURES_CAP_INTEGRATED_RAID 0x00001000
+
+/* event code definitions for the firmware */
+#define LEAPRAID_EVT_SAS_DEV_STATUS_CHANGE 0x000F
+#define LEAPRAID_EVT_SAS_DISCOVERY 0x0016
+#define LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST 0x001C
+#define LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE 0x001D
+#define LEAPRAID_EVT_IR_CHANGE 0x0020
+#define LEAPRAID_EVT_TURN_ON_PFA_LED 0xFFFC
+#define LEAPRAID_EVT_SCAN_DEV_DONE 0xFFFD
+#define LEAPRAID_EVT_REMOVE_DEAD_DEV 0xFFFF
+#define LEAPRAID_MAX_EVENT_NUM 128
+
+#define LEAPRAID_EVT_SAS_DEV_STAT_RC_INTERNAL_DEV_RESET 0x08
+#define LEAPRAID_EVT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET 0x0E
+
+/* raid configuration change event */
+#define LEAPRAID_EVT_IR_RC_VOLUME_ADD 0x01
+#define LEAPRAID_EVT_IR_RC_VOLUME_DELETE 0x02
+#define LEAPRAID_EVT_IR_RC_PD_HIDDEN_TO_ADD 0x03
+#define LEAPRAID_EVT_IR_RC_PD_UNHIDDEN_TO_DELETE 0x04
+#define LEAPRAID_EVT_IR_RC_PD_CREATED_TO_HIDE 0x05
+#define LEAPRAID_EVT_IR_RC_PD_DELETED_TO_EXPOSE 0x06
+
+/* sas topology change event */
+#define LEAPRAID_EVT_SAS_TOPO_ES_NO_EXPANDER 0x00
+#define LEAPRAID_EVT_SAS_TOPO_ES_ADDED 0x01
+#define LEAPRAID_EVT_SAS_TOPO_ES_NOT_RESPONDING 0x02
+#define LEAPRAID_EVT_SAS_TOPO_ES_RESPONDING 0x03
+
+#define LEAPRAID_EVT_SAS_TOPO_RC_MASK 0x0F
+#define LEAPRAID_EVT_SAS_TOPO_RC_CLEAR_MASK 0xF0
+#define LEAPRAID_EVT_SAS_TOPO_RC_TARG_ADDED 0x01
+#define LEAPRAID_EVT_SAS_TOPO_RC_TARG_NOT_RESPONDING 0x02
+#define LEAPRAID_EVT_SAS_TOPO_RC_PHY_CHANGED 0x03
+
+/* sas discovery event defines */
+#define LEAPRAID_EVT_SAS_DISC_RC_STARTED 0x01
+#define LEAPRAID_EVT_SAS_DISC_RC_COMPLETED 0x02
+
+/* enclosure device status change event */
+#define LEAPRAID_EVT_SAS_ENCL_RC_ADDED 0x01
+#define LEAPRAID_EVT_SAS_ENCL_RC_NOT_RESPONDING 0x02
+
+/* device type and identifiers */
+#define LEAPRAID_DEVTYP_SEP 0x00004000
+#define LEAPRAID_DEVTYP_SSP_TGT 0x00000400
+#define LEAPRAID_DEVTYP_STP_TGT 0x00000200
+#define LEAPRAID_DEVTYP_SMP_TGT 0x00000100
+#define LEAPRAID_DEVTYP_SATA_DEV 0x00000080
+#define LEAPRAID_DEVTYP_SSP_INIT 0x00000040
+#define LEAPRAID_DEVTYP_STP_INIT 0x00000020
+#define LEAPRAID_DEVTYP_SMP_INIT 0x00000010
+#define LEAPRAID_DEVTYP_SATA_HOST 0x00000008
+
+#define LEAPRAID_DEVTYP_MASK_DEV_TYPE 0x00000007
+#define LEAPRAID_DEVTYP_NO_DEV 0x00000000
+#define LEAPRAID_DEVTYP_END_DEV 0x00000001
+#define LEAPRAID_DEVTYP_EDGE_EXPANDER 0x00000002
+#define LEAPRAID_DEVTYP_FANOUT_EXPANDER 0x00000003
+
+/* sas control operation */
+#define LEAPRAID_SAS_OP_PHY_LINK_RESET 0x06
+#define LEAPRAID_SAS_OP_PHY_HARD_RESET 0x07
+#define LEAPRAID_SAS_OP_SET_PARAMETER 0x0F
+
+/* boot device defines */
+#define LEAPRAID_BOOTDEV_FORM_MASK 0x0F
+#define LEAPRAID_BOOTDEV_FORM_NONE 0x00
+#define LEAPRAID_BOOTDEV_FORM_SAS_WWID 0x05
+#define LEAPRAID_BOOTDEV_FORM_ENC_SLOT 0x06
+#define LEAPRAID_BOOTDEV_FORM_DEV_NAME 0x07
+
+/**
+ * struct leapraid_reg_base - Register layout of the LeapRAID controller
+ *
+ * @db: Doorbell register used to signal commands or status to firmware
+ * @ws: Write sequence register for synchronizing doorbell operations
+ * @host_diag: Diagnostic register used for status or debug reporting
+ * @r1: Reserved
+ * @host_int_status: Interrupt status register reporting active interrupts
+ * @host_int_mask: Interrupt mask register enabling or disabling sources
+ * @r2: Reserved
+ * @rep_msg_host_idx: Reply message index for the next available reply slot
+ * @r3: Reserved
+ * @debug_log: DebugLog registers for firmware debug and diagnostic output
+ * @r4: Reserved
+ * @atomic_req_desc_post: Atomic register for single descriptor posting
+ * @adapter_log_buf_pos: Adapter log buffer write position
+ * @host_log_buf_pos: Host log buffer write position
+ * @r5: Reserved
+ * @rep_post_reg_idx: Array of reply post index registers, one per queue.
+ * The number of entries is defined by
+ * REP_POST_HOST_IDX_REG_CNT.
+ */
+struct leapraid_reg_base {
+ __le32 db;
+ __le32 ws;
+ __le32 host_diag;
+ __le32 r1[9];
+ __le32 host_int_status;
+ __le32 host_int_mask;
+ __le32 r2[4];
+ __le32 rep_msg_host_idx;
+ __le32 r3[13];
+ __le32 debug_log[LEAPRAID_DEBUGLOG_SZ_MAX];
+ __le32 r4[2];
+ __le32 atomic_req_desc_post;
+ __le32 adapter_log_buf_pos;
+ __le32 host_log_buf_pos;
+ __le32 r5[142];
+ struct leapraid_rep_post_reg_idx {
+ __le32 idx;
+ __le32 r1;
+ __le32 r2;
+ __le32 r3;
+ } rep_post_reg_idx[REP_POST_HOST_IDX_REG_CNT];
+} __packed;
+
+/**
+ * struct leapraid_atomic_req_desc - Atomic request descriptor
+ *
+ * @flg: Descriptor flag indicating the type of request (e.g. SCSI I/O)
+ * @msix_idx: MSI-X vector index used for interrupt routing
+ * @taskid: Unique task identifier associated with this request
+ */
+struct leapraid_atomic_req_desc {
+ u8 flg;
+ u8 msix_idx;
+ __le16 taskid;
+};
+
+/**
+ * union leapraid_rep_desc_union - Unified reply descriptor format
+ *
+ * @dflt_rep: Default reply descriptor containing basic completion info
+ * @dflt_rep.rep_flg: Reply flag indicating reply type or status
+ * @dflt_rep.msix_idx: MSI-X index for interrupt routing
+ * @dflt_rep.taskid: Task identifier matching the submitted request
+ * @r1: Reserved
+ *
+ * @addr_rep: Address reply descriptor used when firmware returns a
+ * memory address associated with the reply
+ * @addr_rep.rep_flg: Reply flag indicating reply type or status
+ * @addr_rep.msix_idx: MSI-X index for interrupt routing
+ * @addr_rep.taskid: Task identifier matching the submitted request
+ * @addr_rep.rep_frame_addr: Physical address of the reply frame
+ *
+ * @words: Raw 64-bit representation of the reply descriptor
+ * @u: Alternative access using 32-bit low/high words
+ * @u.low: Lower 32 bits of the descriptor
+ * @u.high: Upper 32 bits of the descriptor
+ */
+union leapraid_rep_desc_union {
+ struct leapraid_rep_desc {
+ u8 rep_flg;
+ u8 msix_idx;
+ __le16 taskid;
+ u8 r1[4];
+ } dflt_rep;
+ struct leapraid_add_rep_desc {
+ u8 rep_flg;
+ u8 msix_idx;
+ __le16 taskid;
+ __le32 rep_frame_addr;
+ } addr_rep;
+ __le64 words;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_req - Generic request header
+ *
+ * @func_dep1: Function-dependent parameter (low 16 bits)
+ * @r1: Reserved
+ * @func: Function code identifying the command type
+ * @r2: Reserved
+ */
+struct leapraid_req {
+ __le16 func_dep1;
+ u8 r1;
+ u8 func;
+ u8 r2[8];
+};
+
+/**
+ * struct leapraid_rep - Generic reply header
+ *
+ * @r1: Reserved
+ * @msg_len: Length of the reply message in bytes
+ * @function: Function code corresponding to the request
+ * @r2: Reserved
+ * @adapter_status: Status code reported by the adapter
+ * @r3: Reserved
+ */
+struct leapraid_rep {
+ u8 r1[2];
+ u8 msg_len;
+ u8 function;
+ u8 r2[10];
+ __le16 adapter_status;
+ u8 r3[4];
+};
+
+/**
+ * struct leapraid_sge_simple32 - 32-bit simple scatter-gather entry
+ *
+ * @flg_and_len: Combined field for flags and segment length
+ * @addr: 32-bit physical address of the data buffer
+ */
+struct leapraid_sge_simple32 {
+ __le32 flg_and_len;
+ __le32 addr;
+};
+
+/**
+ * struct leapraid_sge_simple64 - 64-bit simple scatter-gather entry
+ *
+ * @flg_and_len: Combined field for flags and segment length
+ * @addr: 64-bit physical address of the data buffer
+ */
+struct leapraid_sge_simple64 {
+ __le32 flg_and_len;
+ __le64 addr;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_sge_simple_union - Unified 32/64-bit SGE representation
+ *
+ * @flg_and_len: Combined field for flags and segment length
+ * @u.addr32: 32-bit address field
+ * @u.addr64: 64-bit address field
+ */
+struct leapraid_sge_simple_union {
+ __le32 flg_and_len;
+ union {
+ __le32 addr32;
+ __le64 addr64;
+ } u;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_sge_chain_union - Chained scatter-gather entry
+ *
+ * @len: Length of the chain descriptor
+ * @next_chain_offset: Offset to the next SGE chain
+ * @flg: Flags indicating chain or termination properties
+ * @u.addr32: 32-bit physical address
+ * @u.addr64: 64-bit physical address
+ */
+struct leapraid_sge_chain_union {
+ __le16 len;
+ u8 next_chain_offset;
+ u8 flg;
+ union {
+ __le32 addr32;
+ __le64 addr64;
+ } u;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_ieee_sge_simple32 - IEEE 32-bit simple SGE format
+ *
+ * @addr: 32-bit physical address of the data buffer
+ * @flg_and_len: Combined field for flags and data length
+ */
+struct leapraid_ieee_sge_simple32 {
+ __le32 addr;
+ __le32 flg_and_len;
+};
+
+/**
+ * struct leapraid_ieee_sge_simple64 - IEEE 64-bit simple SGE format
+ *
+ * @addr: 64-bit physical address of the data buffer
+ * @len: Length of the data segment
+ * @r1: Reserved
+ * @flg: Flags indicating transfer properties
+ */
+struct leapraid_ieee_sge_simple64 {
+ __le64 addr;
+ __le32 len;
+ u8 r1[3];
+ u8 flg;
+} __packed __aligned(4);
+
+/**
+ * union leapraid_ieee_sge_simple_union - Unified IEEE SGE format
+ *
+ * @simple32: IEEE 32-bit simple SGE entry
+ * @simple64: IEEE 64-bit simple SGE entry
+ */
+union leapraid_ieee_sge_simple_union {
+ struct leapraid_ieee_sge_simple32 simple32;
+ struct leapraid_ieee_sge_simple64 simple64;
+};
+
+/**
+ * union leapraid_ieee_sge_chain_union - Unified IEEE SGE chain format
+ *
+ * @chain32: IEEE 32-bit chain SGE entry
+ * @chain64: IEEE 64-bit chain SGE entry
+ */
+union leapraid_ieee_sge_chain_union {
+ struct leapraid_ieee_sge_simple32 chain32;
+ struct leapraid_ieee_sge_simple64 chain64;
+};
+
+/**
+ * struct leapraid_chain64_ieee_sg - 64-bit IEEE chain SGE descriptor
+ *
+ * @addr: Physical address of the next chain segment
+ * @len: Length of the current SGE
+ * @r1: Reserved
+ * @next_chain_offset: Offset to the next chain element
+ * @flg: Flags that describe SGE attributes
+ */
+struct leapraid_chain64_ieee_sg {
+ __le64 addr;
+ __le32 len;
+ u8 r1[2];
+ u8 next_chain_offset;
+ u8 flg;
+} __packed __aligned(4);
+
+/**
+ * union leapraid_ieee_sge_io_union - IEEE-style SGE union for I/O
+ *
+ * @ieee_simple: Simple IEEE SGE descriptor
+ * @ieee_chain: IEEE chain SGE descriptor
+ */
+union leapraid_ieee_sge_io_union {
+ struct leapraid_ieee_sge_simple64 ieee_simple;
+ struct leapraid_chain64_ieee_sg ieee_chain;
+};
+
+/**
+ * union leapraid_simple_sge_union - Union of simple SGE descriptors
+ *
+ * @leapio_simple: LeapIO-style simple SGE
+ * @ieee_simple: IEEE-style simple SGE
+ */
+union leapraid_simple_sge_union {
+ struct leapraid_sge_simple_union leapio_simple;
+ union leapraid_ieee_sge_simple_union ieee_simple;
+};
+
+/**
+ * union leapraid_sge_io_union - Combined SGE union for all I/O types
+ *
+ * @leapio_simple: LeapIO simple SGE format
+ * @leapio_chain: LeapIO chain SGE format
+ * @ieee_simple: IEEE simple SGE format
+ * @ieee_chain: IEEE chain SGE format
+ */
+union leapraid_sge_io_union {
+ struct leapraid_sge_simple_union leapio_simple;
+ struct leapraid_sge_chain_union leapio_chain;
+ union leapraid_ieee_sge_simple_union ieee_simple;
+ union leapraid_ieee_sge_chain_union ieee_chain;
+};
+
+/**
+ * struct leapraid_cfg_pg_header - Standard configuration page header
+ *
+ * @r1: Reserved
+ * @page_len: Length of the page in 4-byte units
+ * @page_num: Page number
+ * @page_type: Page type
+ */
+struct leapraid_cfg_pg_header {
+ u8 r1;
+ u8 page_len;
+ u8 page_num;
+ u8 page_type;
+};
+
+/**
+ * struct leapraid_cfg_ext_pg_header - Extended configuration page header
+ *
+ * @r1: Reserved
+ * @r2: Reserved
+ * @page_num: Page number
+ * @page_type: Page type
+ * @ext_page_len: Extended page length
+ * @ext_page_type: Extended page type
+ * @r3: Reserved
+ */
+struct leapraid_cfg_ext_pg_header {
+ u8 r1;
+ u8 r2;
+ u8 page_num;
+ u8 page_type;
+ __le16 ext_page_len;
+ u8 ext_page_type;
+ u8 r3;
+};
+
+/**
+ * struct leapraid_cfg_req - Configuration request message
+ *
+ * @action: Requested action type
+ * @sgl_flag: SGL flag field
+ * @chain_offset: Offset to next chain SGE
+ * @func: Function code
+ * @ext_page_len: Extended page length
+ * @ext_page_type: Extended page type
+ * @msg_flag: Message flags
+ * @r1: Reserved
+ * @header: Configuration page header
+ * @page_addr: Address of the page buffer
+ * @page_buf_sge: SGE describing the page buffer
+ */
+struct leapraid_cfg_req {
+ u8 action;
+ u8 sgl_flag;
+ u8 chain_offset;
+ u8 func;
+ __le16 ext_page_len;
+ u8 ext_page_type;
+ u8 msg_flag;
+ u8 r1[12];
+ struct leapraid_cfg_pg_header header;
+ __le32 page_addr;
+ union leapraid_sge_io_union page_buf_sge;
+};
+
+/**
+ * struct leapraid_cfg_rep - Configuration reply message
+ *
+ * @action: Action type from the request
+ * @r1: Reserved
+ * @msg_len: Message length in bytes
+ * @func: Function code
+ * @ext_page_len: Extended page length
+ * @ext_page_type: Extended page type
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Adapter status code
+ * @r3: Reserved
+ * @header: Configuration page header
+ */
+struct leapraid_cfg_rep {
+ u8 action;
+ u8 r1;
+ u8 msg_len;
+ u8 func;
+ __le16 ext_page_len;
+ u8 ext_page_type;
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+ u8 r3[4];
+ struct leapraid_cfg_pg_header header;
+};
+
+/**
+ * struct leapraid_boot_dev_format_sas_wwid - Boot device identified by wwid
+ *
+ * @sas_addr: SAS address of the device
+ * @lun: Logical unit number
+ * @r1: Reserved
+ */
+struct leapraid_boot_dev_format_sas_wwid {
+ __le64 sas_addr;
+ u8 lun[8];
+ u8 r1[8];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_boot_dev_format_enc_slot - identified by enclosure
+ *
+ * @enc_lid: Enclosure logical ID
+ * @r1: Reserved
+ * @slot_num: Slot number in the enclosure
+ * @r2: Reserved
+ */
+struct leapraid_boot_dev_format_enc_slot {
+ __le64 enc_lid;
+ u8 r1[8];
+ __le16 slot_num;
+ u8 r2[6];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_boot_dev_format_dev_name - Boot device by device name
+ *
+ * @dev_name: Device name identifier
+ * @lun: Logical unit number
+ * @r1: Reserved
+ */
+struct leapraid_boot_dev_format_dev_name {
+ __le64 dev_name;
+ u8 lun[8];
+ u8 r1[8];
+} __packed __aligned(4);
+
+/**
+ * union leapraid_boot_dev_format - Boot device format union
+ *
+ * @sas_wwid: Format using SAS WWID and LUN
+ * @enc_slot: Format using enclosure slot and ID
+ * @dev_name: Format using device name and LUN
+ */
+union leapraid_boot_dev_format {
+ struct leapraid_boot_dev_format_sas_wwid sas_wwid;
+ struct leapraid_boot_dev_format_enc_slot enc_slot;
+ struct leapraid_boot_dev_format_dev_name dev_name;
+};
+
+/**
+ * struct leapraid_bios_page2 - BIOS configuration page 2
+ *
+ * @header: Configuration page header
+ * @r1: Reserved
+ * @requested_boot_dev_form: Format type of the requested boot device
+ * @r2: Reserved
+ * @requested_boot_dev: Boot device requested by BIOS or user
+ * @requested_alt_boot_dev_form: Format of the alternate boot device
+ * @r3: Reserved
+ * @requested_alt_boot_dev: Alternate boot device requested
+ * @current_boot_dev_form: Format type of the active boot device
+ * @r4: Reserved
+ * @current_boot_dev: Currently active boot device in use
+ */
+struct leapraid_bios_page2 {
+ struct leapraid_cfg_pg_header header;
+ u8 r1[24];
+ u8 requested_boot_dev_form;
+ u8 r2[3];
+ union leapraid_boot_dev_format requested_boot_dev;
+ u8 requested_alt_boot_dev_form;
+ u8 r3[3];
+ union leapraid_boot_dev_format requested_alt_boot_dev;
+ u8 current_boot_dev_form;
+ u8 r4[3];
+ union leapraid_boot_dev_format current_boot_dev;
+};
+
+/**
+ * struct leapraid_bios_page3 - BIOS configuration page 3
+ *
+ * @header: Configuration page header
+ * @r1: Reserved
+ * @bios_version: BIOS firmware version number
+ * @r2: Reserved
+ */
+struct leapraid_bios_page3 {
+ struct leapraid_cfg_pg_header header;
+ u8 r1[4];
+ __le32 bios_version;
+ u8 r2[84];
+};
+
+/**
+ * struct leapraid_raidvol0_phys_disk - Physical disk in RAID volume
+ *
+ * @r1: Reserved
+ * @phys_disk_num: Physical disk number within the RAID volume
+ * @r2: Reserved
+ */
+struct leapraid_raidvol0_phys_disk {
+ u8 r1[2];
+ u8 phys_disk_num;
+ u8 r2;
+};
+
+/**
+ * struct leapraid_raidvol_p0 - RAID volume configuration page 0
+ *
+ * @header: Configuration page header
+ * @dev_hdl: Device handle for the RAID volume
+ * @volume_state: State of the RAID volume
+ * @volume_type: RAID type
+ * @r1: Reserved
+ * @num_phys_disks: Number of physical disks in the volume
+ * @r2: Reserved
+ * @phys_disk: Array of physical disks in this volume
+ */
+struct leapraid_raidvol_p0 {
+ struct leapraid_cfg_pg_header header;
+ __le16 dev_hdl;
+ u8 volume_state;
+ u8 volume_type;
+ u8 r1[28];
+ u8 num_phys_disks;
+ u8 r2[3];
+ struct leapraid_raidvol0_phys_disk phys_disk[];
+};
+
+/**
+ * struct leapraid_raidvol_p1 - RAID volume configuration page 1
+ *
+ * @header: Configuration page header
+ * @dev_hdl: Device handle of the RAID volume
+ * @r1: Reserved
+ * @wwid: World-wide identifier for the volume
+ * @r2: Reserved
+ */
+struct leapraid_raidvol_p1 {
+ struct leapraid_cfg_pg_header header;
+ __le16 dev_hdl;
+ u8 r1[42];
+ __le64 wwid;
+ u8 r2[8];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_raidpd_p0 - Physical disk configuration page 0
+ *
+ * @header: Configuration page header
+ * @dev_hdl: Device handle of the physical disk
+ * @r1: Reserved
+ * @phys_disk_num: Physical disk number
+ * @r2: Reserved
+ */
+struct leapraid_raidpd_p0 {
+ struct leapraid_cfg_pg_header header;
+ __le16 dev_hdl;
+ u8 r1;
+ u8 phys_disk_num;
+ u8 r2[112];
+};
+
+/**
+ * struct leapraid_sas_io_unit0_phy_info - PHY info for SAS I/O unit
+ *
+ * @port: Port number the PHY belongs to
+ * @port_flg: Flags describing port status
+ * @phy_flg: Flags describing PHY status
+ * @neg_link_rate: Negotiated link rate of the PHY
+ * @controller_phy_dev_info: Controller PHY device info
+ * @attached_dev_hdl: Handle of attached device
+ * @controller_dev_hdl: Handle of the controller device
+ * @r1: Reserved
+ */
+struct leapraid_sas_io_unit0_phy_info {
+ u8 port;
+ u8 port_flg;
+ u8 phy_flg;
+ u8 neg_link_rate;
+ __le32 controller_phy_dev_info;
+ __le16 attached_dev_hdl;
+ __le16 controller_dev_hdl;
+ u8 r1[8];
+};
+
+/**
+ * struct leapraid_sas_io_unit_p0 - SAS I/O unit configuration page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @phy_num: Number of PHYs in this unit
+ * @r2: Reserved
+ * @phy_info: Array of PHY information
+ */
+struct leapraid_sas_io_unit_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[4];
+ u8 phy_num;
+ u8 r2[3];
+ struct leapraid_sas_io_unit0_phy_info phy_info[];
+};
+
+/**
+ * struct leapraid_sas_io_unit1_phy_info - Placeholder for SAS unit page 1 PHY
+ *
+ * @r1: Reserved
+ */
+struct leapraid_sas_io_unit1_phy_info {
+ u8 r1[12];
+};
+
+/**
+ * struct leapraid_sas_io_unit_page1 - SAS I/O unit configuration page 1
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @narrowport_max_queue_depth: Maximum queue depth for narrow ports
+ * @r2: Reserved
+ * @wideport_max_queue_depth: Maximum queue depth for wide ports
+ * @r3: Reserved
+ * @sata_max_queue_depth: Maximum SATA queue depth
+ * @r4: Reserved
+ * @phy_info: Array of PHY info structures
+ */
+struct leapraid_sas_io_unit_page1 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[2];
+ __le16 narrowport_max_queue_depth;
+ u8 r2[2];
+ __le16 wideport_max_queue_depth;
+ u8 r3;
+ u8 sata_max_queue_depth;
+ u8 r4[2];
+ struct leapraid_sas_io_unit1_phy_info phy_info[];
+};
+
+/**
+ * struct leapraid_exp_p0 - SAS expander page 0
+ *
+ * @header: Extended page header
+ * @physical_port: Physical port number
+ * @r1: Reserved
+ * @enc_hdl: Enclosure handle
+ * @sas_address: SAS address of the expander
+ * @r2: Reserved
+ * @dev_hdl: Device handle of this expander
+ * @parent_dev_hdl: Device handle of parent expander
+ * @r3: Reserved
+ * @phy_num: Number of PHYs
+ * @r4: Reserved
+ */
+struct leapraid_exp_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 physical_port;
+ u8 r1;
+ __le16 enc_hdl;
+ __le64 sas_address;
+ u8 r2[4];
+ __le16 dev_hdl;
+ __le16 parent_dev_hdl;
+ u8 r3[4];
+ u8 phy_num;
+ u8 r4[27];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_exp_p1 - SAS expander page 1
+ *
+ * @header: Extended page header
+ * @r1: Reserved
+ * @p_link_rate: PHY link rate
+ * @hw_link_rate: Hardware supported link rate
+ * @attached_dev_hdl: Attached device handle
+ * @r2: Reserved
+ * @neg_link_rate: Negotiated link rate
+ * @r3: Reserved
+ */
+struct leapraid_exp_p1 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[8];
+ u8 p_link_rate;
+ u8 hw_link_rate;
+ __le16 attached_dev_hdl;
+ u8 r2[11];
+ u8 neg_link_rate;
+ u8 r3[12];
+};
+
+/**
+ * struct leapraid_sas_dev_p0 - SAS device page 0
+ *
+ * @header: Extended configuration page header
+ * @slot: Slot number
+ * @enc_hdl: Enclosure handle
+ * @sas_address: SAS address
+ * @parent_dev_hdl: Parent device handle
+ * @phy_num: Number of PHYs
+ * @r1: Reserved
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ * @dev_info: Device information
+ * @flg: Flags
+ * @physical_port: Physical port number
+ * @max_port_connections: Maximum port connections
+ * @dev_name: Device name
+ * @port_groups: Number of port groups
+ * @r3: Reserved
+ * @enc_level: Enclosure level
+ * @connector_name: Connector identifier
+ * @r4: Reserved
+ */
+struct leapraid_sas_dev_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ __le16 slot;
+ __le16 enc_hdl;
+ __le64 sas_address;
+ __le16 parent_dev_hdl;
+ u8 phy_num;
+ u8 r1;
+ __le16 dev_hdl;
+ u8 r2[2];
+ __le32 dev_info;
+ __le16 flg;
+ u8 physical_port;
+ u8 max_port_connections;
+ __le64 dev_name;
+ u8 port_groups;
+ u8 r3[2];
+ u8 enc_level;
+ u8 connector_name[4];
+ u8 r4[4];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_sas_phy_p0 - SAS PHY configuration page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @attached_dev_hdl: Handle of attached device
+ * @r2: Reserved
+ * @p_link_rate: PHY link rate
+ * @hw_link_rate: Hardware supported link rate
+ * @r3: Reserved
+ * @phy_info: PHY information
+ * @neg_link_rate: Negotiated link rate
+ * @r4: Reserved
+ */
+struct leapraid_sas_phy_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[4];
+ __le16 attached_dev_hdl;
+ u8 r2[6];
+ u8 p_link_rate;
+ u8 hw_link_rate;
+ u8 r3[2];
+ __le32 phy_info;
+ u8 neg_link_rate;
+ u8 r4[3];
+};
+
+/**
+ * struct leapraid_enc_p0 - SAS enclosure page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @enc_lid: Enclosure logical ID
+ * @r2: Reserved
+ * @enc_hdl: Enclosure handle
+ * @r3: Reserved
+ */
+struct leapraid_enc_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[4];
+ __le64 enc_lid;
+ u8 r2[2];
+ __le16 enc_hdl;
+ u8 r3[15];
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_raid_cfg_p0_element - RAID configuration element
+ *
+ * @element_flg: Element flags
+ * @vol_dev_hdl: Volume device handle
+ * @r1: Reserved
+ * @phys_disk_dev_hdl: Physical disk device handle
+ */
+struct leapraid_raid_cfg_p0_element {
+ __le16 element_flg;
+ __le16 vol_dev_hdl;
+ u8 r1[2];
+ __le16 phys_disk_dev_hdl;
+};
+
+/**
+ * struct leapraid_raid_cfg_p0 - RAID configuration page 0
+ *
+ * @header: Extended configuration page header
+ * @r1: Reserved
+ * @cfg_num: Configuration number
+ * @r2: Reserved
+ * @elements_num: Number of RAID elements
+ * @r3: Reserved
+ * @cfg_element: Array of RAID elements
+ */
+struct leapraid_raid_cfg_p0 {
+ struct leapraid_cfg_ext_pg_header header;
+ u8 r1[3];
+ u8 cfg_num;
+ u8 r2[32];
+ u8 elements_num;
+ u8 r3[3];
+ struct leapraid_raid_cfg_p0_element cfg_element[];
+};
+
+/**
+ * union leapraid_mpi_scsi_io_cdb_union - SCSI I/O CDB or simple SGE
+ *
+ * @cdb32: 32-byte SCSI command descriptor block
+ * @sge: Simple SGE format
+ */
+union leapraid_mpi_scsi_io_cdb_union {
+ u8 cdb32[32];
+ struct leapraid_sge_simple_union sge;
+};
+
+/**
+ * struct leapraid_mpi_scsiio_req - MPI SCSI I/O request
+ *
+ * @dev_hdl: Device handle for the target
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flg: Message flags
+ * @r2: Reserved
+ * @sense_buffer_low_add: Lower 32-bit address of sense buffer
+ * @dma_flag: DMA flags
+ * @r3: Reserved
+ * @sense_buffer_len: Sense buffer length
+ * @r4: Reserved
+ * @sgl_offset0..3: SGL offsets
+ * @skip_count: Bytes to skip before transfer
+ * @data_len: Length of data transfer
+ * @bi_dir_data_len: Bi-directional transfer length
+ * @io_flg: I/O flags
+ * @eedp_flag: EEDP flags
+ * @eedp_block_size: EEDP block size
+ * @r5: Reserved
+ * @secondary_ref_tag: Secondary reference tag
+ * @secondary_app_tag: Secondary application tag
+ * @app_tag_trans_mask: Application tag mask
+ * @lun: Logical Unit Number
+ * @ctrl: Control flags
+ * @cdb: SCSI Command Descriptor Block or simple SGE
+ * @sgl: Scatter-gather list
+ */
+struct leapraid_mpi_scsiio_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flg;
+ u8 r2[4];
+ __le32 sense_buffer_low_add;
+ u8 dma_flag;
+ u8 r3;
+ u8 sense_buffer_len;
+ u8 r4;
+ u8 sgl_offset0;
+ u8 sgl_offset1;
+ u8 sgl_offset2;
+ u8 sgl_offset3;
+ __le32 skip_count;
+ __le32 data_len;
+ __le32 bi_dir_data_len;
+ __le16 io_flg;
+ __le16 eedp_flag;
+ __le16 eedp_block_size;
+ u8 r5[2];
+ __le32 secondary_ref_tag;
+ __le16 secondary_app_tag;
+ __le16 app_tag_trans_mask;
+ u8 lun[8];
+ __le32 ctrl;
+ union leapraid_mpi_scsi_io_cdb_union cdb;
+ union leapraid_sge_io_union sgl;
+};
+
+/**
+ * union leapraid_scsi_io_cdb_union - SCSI I/O CDB or IEEE simple SGE
+ *
+ * @cdb32: 32-byte SCSI CDB
+ * @sge: IEEE simple 64-bit SGE
+ */
+union leapraid_scsi_io_cdb_union {
+ u8 cdb32[32];
+ struct leapraid_ieee_sge_simple64 sge;
+};
+
+/**
+ * struct leapraid_scsiio_req - SCSI I/O request
+ *
+ * @dev_hdl: Device handle
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flg: Message flags
+ * @r2: Reserved
+ * @sense_buffer_low_add: Lower 32-bit address of sense buffer
+ * @dma_flag: DMA flag
+ * @r3: Reserved
+ * @sense_buffer_len: Sense buffer length
+ * @r4: Reserved
+ * @sgl_offset0-3: SGL offsets
+ * @skip_count: Bytes to skip before transfer
+ * @data_len: Length of data transfer
+ * @bi_dir_data_len: Bi-directional transfer length
+ * @io_flg: I/O flags
+ * @eedp_flag: EEDP flags
+ * @eedp_block_size: EEDP block size
+ * @r5: Reserved
+ * @secondary_ref_tag: Secondary reference tag
+ * @secondary_app_tag: Secondary application tag
+ * @app_tag_trans_mask: Application tag mask
+ * @lun: Logical Unit Number
+ * @ctrl: Control flags
+ * @cdb: SCSI Command Descriptor Block or simple SGE
+ * @sgl: Scatter-gather list
+ */
+struct leapraid_scsiio_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flg;
+ u8 r2[4];
+ __le32 sense_buffer_low_add;
+ u8 dma_flag;
+ u8 r3;
+ u8 sense_buffer_len;
+ u8 r4;
+ u8 sgl_offset0;
+ u8 sgl_offset1;
+ u8 sgl_offset2;
+ u8 sgl_offset3;
+ __le32 skip_count;
+ __le32 data_len;
+ __le32 bi_dir_data_len;
+ __le16 io_flg;
+ __le16 eedp_flag;
+ __le16 eedp_block_size;
+ u8 r5[2];
+ __le32 secondary_ref_tag;
+ __le16 secondary_app_tag;
+ __le16 app_tag_trans_mask;
+ u8 lun[8];
+ __le32 ctrl;
+ union leapraid_scsi_io_cdb_union cdb;
+ union leapraid_ieee_sge_io_union sgl;
+};
+
+/**
+ * struct leapraid_scsiio_rep - SCSI I/O response
+ *
+ * @dev_hdl: Device handle
+ * @msg_len: Length of response message
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flg: Message flags
+ * @r2: Reserved
+ * @scsi_status: SCSI status
+ * @scsi_state: SCSI state
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ * @transfer_count: Number of bytes transferred
+ * @sense_count: Number of sense bytes
+ * @resp_info: Additional response info
+ * @task_tag: Task identifier
+ * @scsi_status_qualifier: SCSI status qualifier
+ * @bi_dir_trans_count: Bi-directional transfer count
+ * @r4: Reserved
+ */
+struct leapraid_scsiio_rep {
+ __le16 dev_hdl;
+ u8 msg_len;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flg;
+ u8 r2[4];
+ u8 scsi_status;
+ u8 scsi_state;
+ __le16 adapter_status;
+ u8 r3[4];
+ __le32 transfer_count;
+ __le32 sense_count;
+ __le32 resp_info;
+ __le16 task_tag;
+ __le16 scsi_status_qualifier;
+ __le32 bi_dir_trans_count;
+ __le32 r4[3];
+};
+
+/**
+ * struct leapraid_scsi_tm_req - SCSI Task Management request
+ *
+ * @dev_hdl: Device handle
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r1: Reserved
+ * @task_type: Task management function type
+ * @r2: Reserved
+ * @msg_flg: Message flags
+ * @r3: Reserved
+ * @lun: Logical Unit Number
+ * @r4: Reserved
+ * @task_mid: Task identifier
+ * @r5: Reserved
+ */
+struct leapraid_scsi_tm_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 r1;
+ u8 task_type;
+ u8 r2;
+ u8 msg_flg;
+ u8 r3[4];
+ u8 lun[8];
+ u8 r4[28];
+ __le16 task_mid;
+ u8 r5[2];
+};
+
+/**
+ * struct leapraid_scsi_tm_rep - SCSI Task Management response
+ *
+ * @dev_hdl: Device handle
+ * @msg_len: Length of response message
+ * @func: Function code
+ * @resp_code: Response code
+ * @task_type: Task management type
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ * @termination_count: Count of terminated tasks
+ * @response_info: Additional response info
+ */
+struct leapraid_scsi_tm_rep {
+ __le16 dev_hdl;
+ u8 msg_len;
+ u8 func;
+ u8 resp_code;
+ u8 task_type;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+ u8 r3[4];
+ __le32 termination_count;
+ __le32 response_info;
+};
+
+/**
+ * struct leapraid_sep_req - SEP (SCSI Enclosure Processor) request
+ *
+ * @dev_hdl: Device handle
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @act: Action to perform
+ * @flg: Flags
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @slot_status: Slot status
+ * @r3: Reserved
+ * @slot: Slot number
+ * @enc_hdl: Enclosure handle
+ */
+struct leapraid_sep_req {
+ __le16 dev_hdl;
+ u8 chain_offset;
+ u8 func;
+ u8 act;
+ u8 flg;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[4];
+ __le32 slot_status;
+ u8 r3[12];
+ __le16 slot;
+ __le16 enc_hdl;
+};
+
+/**
+ * struct leapraid_sep_rep - SEP response
+ *
+ * @dev_hdl: Device handle
+ * @msg_len: Message length
+ * @func: Function code
+ * @act: Action performed
+ * @flg: Flags
+ * @msg_flag: Message flags
+ * @r1: Reserved
+ * @adapter_status: Adapter status
+ * @r2: Reserved
+ * @slot_status: Slot status
+ * @r3: Reserved
+ * @slot: Slot number
+ * @enc_hdl: Enclosure handle
+ */
+struct leapraid_sep_rep {
+ __le16 dev_hdl;
+ u8 msg_len;
+ u8 func;
+ u8 act;
+ u8 flg;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+ u8 r3[4];
+ __le32 slot_status;
+ u8 r4[4];
+ __le16 slot;
+ __le16 enc_hdl;
+};
+
+/**
+ * struct leapraid_adapter_init_req - Adapter initialization request
+ *
+ * @who_init: Initiator of the initialization
+ * @r1: Reserved
+ * @chain_offset: Chain offset
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flg: Message flags
+ * @r3: Reserved
+ * @msg_ver: Message version
+ * @header_ver: Header version
+ * @host_buf_addr: Host buffer address (non adapter-ref)
+ * @r4: Reserved
+ * @host_buf_size: Host buffer size (non adapter-ref)
+ * @host_msix_vectors: Number of host MSI-X vectors
+ * @r6: Reserved
+ * @req_frame_size: Request frame size
+ * @rep_desc_qd: Reply descriptor queue depth
+ * @rep_msg_qd: Reply message queue depth
+ * @sense_buffer_add_high: High 32-bit of sense buffer address
+ * @rep_msg_dma_high: High 32-bit of reply message DMA address
+ * @task_desc_base_addr: Base address of task descriptors
+ * @rep_desc_q_arr_addr: Address of reply descriptor queue array
+ * @rep_msg_addr_dma: Reply message DMA address
+ * @time_stamp: Timestamp
+ */
+struct leapraid_adapter_init_req {
+ u8 who_init;
+ u8 r1;
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flg;
+ __le32 driver_ver;
+ __le16 msg_ver;
+ __le16 header_ver;
+ __le32 host_buf_addr;
+ u8 r4[2];
+ u8 host_buf_size;
+ u8 host_msix_vectors;
+ u8 r6[2];
+ __le16 req_frame_size;
+ __le16 rep_desc_qd;
+ __le16 rep_msg_qd;
+ __le32 sense_buffer_add_high;
+ __le32 rep_msg_dma_high;
+ __le64 task_desc_base_addr;
+ __le64 rep_desc_q_arr_addr;
+ __le64 rep_msg_addr_dma;
+ __le64 time_stamp;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_rep_desc_q_arr - Reply descriptor queue array
+ *
+ * @rep_desc_base_addr: Base address of the reply descriptors
+ * @r1: Reserved
+ */
+struct leapraid_rep_desc_q_arr {
+ __le64 rep_desc_base_addr;
+ __le64 r1;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_adapter_init_rep - Adapter initialization reply
+ *
+ * @who_init: Initiator of the initialization
+ * @r1: Reserved
+ * @msg_len: Length of reply message
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @adapter_status: Adapter status
+ * @r4: Reserved
+ */
+struct leapraid_adapter_init_rep {
+ u8 who_init;
+ u8 r1;
+ u8 msg_len;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[6];
+ __le16 adapter_status;
+ u8 r4[4];
+};
+
+/**
+ * struct leapraid_adapter_log_req - Adapter log request
+ *
+ * @action: Action code
+ * @type: Log type
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * r1: Reserved
+ * @msg_flag: Message flags
+ * r2: Reserved
+ * @mbox: Mailbox for command-specific parameters
+ * @sge: Scatter-gather entry for data buffer
+ */
+struct leapraid_adapter_log_req {
+ u8 action;
+ u8 type;
+ u8 chain_offset;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flag;
+ u8 r2[4];
+ union {
+ u8 b[12];
+ __le16 s[6];
+ __le32 w[3];
+ } mbox;
+ struct leapraid_sge_simple64 sge;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_adapter_log_rep - Adapter log reply
+ *
+ * @action: Action code echoed
+ * @type: Log type echoed
+ * @msg_len: Length of message
+ * @func: Function code
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Status returned by adapter
+ */
+struct leapraid_adapter_log_rep {
+ u8 action;
+ u8 type;
+ u8 msg_len;
+ u8 func;
+ u8 r1[3];
+ u8 msg_flag;
+ u8 r2[6];
+ __le16 adapter_status;
+};
+
+/**
+ * struct leapraid_adapter_features_req - Request adapter features
+ *
+ * @r1: Reserved
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ */
+struct leapraid_adapter_features_req {
+ u8 r1[2];
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[4];
+};
+
+/**
+ * struct leapraid_adapter_features_rep - Adapter features reply
+ *
+ * @msg_ver: Message version
+ * @msg_len: Length of reply message
+ * @func: Function code
+ * @header_ver: Header version
+ * @r1: Reserved
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ * @who_init: Who initialized the adapter
+ * @r4: Reserved
+ * @max_msix_vectors: Max MSI-X vectors supported
+ * @req_slot: Number of request slots
+ * @r5: Reserved
+ * @adapter_caps: Adapter capabilities
+ * @fw_version: Firmware version
+ * @sas_wide_max_qdepth: Max wide SAS queue depth
+ * @sas_narrow_max_qdepth: Max narrow SAS queue depth
+ * @r6: Reserved
+ * @hp_slot: Number of high-priority slots
+ * @r7: Reserved
+ * @max_volumes: Maximum supported volumes
+ * @max_dev_hdl: Maximum device handle
+ * @r8: Reserved
+ * @min_dev_hdl: Minimum device handle
+ * @r9: Reserved
+ */
+struct leapraid_adapter_features_rep {
+ u16 msg_ver;
+ u8 msg_len;
+ u8 func;
+ u16 header_ver;
+ u8 r1;
+ u8 msg_flag;
+ u8 r2[6];
+ u16 adapter_status;
+ u8 r3[4];
+ u8 sata_max_qdepth;
+ u8 who_init;
+ u8 r4;
+ u8 max_msix_vectors;
+ __le16 req_slot;
+ u8 r5[2];
+ __le32 adapter_caps;
+ __le32 fw_version;
+ __le16 sas_wide_max_qdepth;
+ __le16 sas_narrow_max_qdepth;
+ u8 r6[10];
+ __le16 hp_slot;
+ u8 r7[3];
+ u8 max_volumes;
+ __le16 max_dev_hdl;
+ u8 r8[2];
+ __le16 min_dev_hdl;
+ u8 r9[6];
+};
+
+/**
+ * struct leapraid_scan_dev_req - Request to scan devices
+ *
+ * @r1: Reserved
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ */
+struct leapraid_scan_dev_req {
+ u8 r1[2];
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[4];
+};
+
+/**
+ * struct leapraid_scan_dev_rep - Scan devices reply
+ *
+ * @r1: Reserved
+ * @msg_len: Length of message
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @adapter_status: Adapter status
+ * @r4: Reserved
+ */
+struct leapraid_scan_dev_rep {
+ u8 r1[2];
+ u8 msg_len;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[6];
+ __le16 adapter_status;
+ u8 r4[4];
+};
+
+/**
+ * struct leapraid_evt_notify_req - Event notification request
+ *
+ * @r1: Reserved
+ * @chain_offset: Offset for chained SGE
+ * @func: Function code
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @evt_masks: Event masks to enable notifications
+ * @r4: Reserved
+ */
+struct leapraid_evt_notify_req {
+ u8 r1[2];
+ u8 chain_offset;
+ u8 func;
+ u8 r2[3];
+ u8 msg_flag;
+ u8 r3[12];
+ __le32 evt_masks[4];
+ u8 r4[8];
+};
+
+/**
+ * struct leapraid_evt_notify_rep - Event notification reply
+ *
+ * @evt_data_len: Length of event data
+ * @msg_len: Length of message
+ * @func: Function code
+ * @r1: Reserved
+ * @r2: Reserved
+ * @msg_flag: Message flags
+ * @r3: Reserved
+ * @adapter_status: Adapter status
+ * @r4: Reserved
+ * @evt: Event code
+ * @r5: Reserved
+ * @evt_data: Event data array
+ */
+struct leapraid_evt_notify_rep {
+ __le16 evt_data_len;
+ u8 msg_len;
+ u8 func;
+ u8 r1[2];
+ u8 r2;
+ u8 msg_flag;
+ u8 r3[6];
+ __le16 adapter_status;
+ u8 r4[4];
+ __le16 evt;
+ u8 r5[6];
+ __le32 evt_data[];
+};
+
+/**
+ * struct leapraid_evt_data_sas_dev_status_change - SAS device status change
+ *
+ * @task_tag: Task identifier
+ * @reason_code: Reason for status change
+ * @physical_port: Physical port number
+ * @r1: Reserved
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ * @sas_address: SAS address of device
+ * @lun: Logical Unit Number
+ */
+struct leapraid_evt_data_sas_dev_status_change {
+ __le16 task_tag;
+ u8 reason_code;
+ u8 physical_port;
+ u8 r1[2];
+ __le16 dev_hdl;
+ u8 r2[4];
+ __le64 sas_address;
+ u8 lun[8];
+} __packed __aligned(4);
+/**
+ * struct leapraid_evt_data_ir_change - IR (Integrated RAID) change event data
+ *
+ * @r1: Reserved
+ * @reason_code: Reason for IR change
+ * @r2: Reserved
+ * @vol_dev_hdl: Volume device handle
+ * @phys_disk_dev_hdl: Physical disk device handle
+ */
+struct leapraid_evt_data_ir_change {
+ u8 r1;
+ u8 reason_code;
+ u8 r2[2];
+ __le16 vol_dev_hdl;
+ __le16 phys_disk_dev_hdl;
+};
+
+/**
+ * struct leapraid_evt_data_sas_disc - SAS discovery event data
+ *
+ * @r1: Reserved
+ * @reason_code: Reason for discovery event
+ * @physical_port: Physical port number where event occurred
+ * @r2: Reserved
+ */
+struct leapraid_evt_data_sas_disc {
+ u8 r1;
+ u8 reason_code;
+ u8 physical_port;
+ u8 r2[5];
+};
+
+/**
+ * struct leapraid_evt_sas_topo_phy_entry - SAS topology PHY entry
+ *
+ * @attached_dev_hdl: Device handle attached to PHY
+ * @link_rate: Current link rate
+ * @phy_status: PHY status flags
+ */
+struct leapraid_evt_sas_topo_phy_entry {
+ __le16 attached_dev_hdl;
+ u8 link_rate;
+ u8 phy_status;
+};
+
+/**
+ * struct leapraid_evt_data_sas_topo_change_list - SAS topology change list
+ *
+ * @encl_hdl: Enclosure handle
+ * @exp_dev_hdl: Expander device handle
+ * @num_phys: Number of PHYs in this entry
+ * @r1: Reserved
+ * @entry_num: Entry index
+ * @start_phy_num: Start PHY number
+ * @exp_status: Expander status
+ * @physical_port: Physical port number
+ * @phy: Array of SAS PHY entries
+ */
+struct leapraid_evt_data_sas_topo_change_list {
+ __le16 encl_hdl;
+ __le16 exp_dev_hdl;
+ u8 num_phys;
+ u8 r1[3];
+ u8 entry_num;
+ u8 start_phy_num;
+ u8 exp_status;
+ u8 physical_port;
+ struct leapraid_evt_sas_topo_phy_entry phy[];
+};
+
+/**
+ * struct leapraid_evt_data_sas_enc_dev_status_change - SAS enclosure device status
+ *
+ * @enc_hdl: Enclosure handle
+ * @reason_code: Reason code for status change
+ * @physical_port: Physical port number
+ * @encl_logical_id: Enclosure logical ID
+ * @num_slots: Number of slots in enclosure
+ * @start_slot: First affected slot
+ * @phy_bits: Bitmap of affected PHYs
+ */
+struct leapraid_evt_data_sas_enc_dev_status_change {
+ __le16 enc_hdl;
+ u8 reason_code;
+ u8 physical_port;
+ __le64 encl_logical_id;
+ __le16 num_slots;
+ __le16 start_slot;
+ __le32 phy_bits;
+};
+
+/**
+ * struct leapraid_io_unit_ctrl_req - IO unit control request
+ *
+ * @op: Operation code
+ * @r1: Reserved
+ * @chain_offset: SGE chain offset
+ * @func: Function code
+ * @dev_hdl: Device handle
+ * @adapter_para: Adapter parameter selector
+ * @msg_flag: Message flags
+ * @r2: Reserved
+ * @phy_num: PHY number
+ * @r3: Reserved
+ * @adapter_para_value: Value for adapter parameter
+ * @adapter_para_value2: Optional second parameter value
+ * @r4: Reserved
+ */
+struct leapraid_io_unit_ctrl_req {
+ u8 op;
+ u8 r1;
+ u8 chain_offset;
+ u8 func;
+ u16 dev_hdl;
+ u8 adapter_para;
+ u8 msg_flag;
+ u8 r2[6];
+ u8 phy_num;
+ u8 r3[17];
+ __le32 adapter_para_value;
+ __le32 adapter_para_value2;
+ u8 r4[4];
+};
+
+/**
+ * struct leapraid_io_unit_ctrl_rep - IO unit control reply
+ *
+ * @op: Operation code echoed
+ * @r1: Reserved
+ * @func: Function code
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ */
+struct leapraid_io_unit_ctrl_rep {
+ u8 op;
+ u8 r1[2];
+ u8 func;
+ __le16 dev_hdl;
+ u8 r2[14];
+};
+
+/**
+ * struct leapraid_raid_act_req - RAID action request
+ *
+ * @act: RAID action code
+ * @r1: Reserved
+ * @func: Function code
+ * @r2: Reserved
+ * @phys_disk_num: Number of physical disks involved
+ * @r3: Reserved
+ * @action_data_sge: SGE describing action-specific data
+ */
+struct leapraid_raid_act_req {
+ u8 act;
+ u8 r1[2];
+ u8 func;
+ u8 r2[2];
+ u8 phys_disk_num;
+ u8 r3[13];
+ struct leapraid_sge_simple_union action_data_sge;
+};
+
+/**
+ * struct leapraid_raid_act_rep - RAID action reply
+ *
+ * @act: RAID action code echoed
+ * @r1: Reserved
+ * @func: Function code
+ * @vol_dev_hdl: Volume device handle
+ * @r2: Reserved
+ * @adapter_status: Status returned by adapter
+ * @r3: Reserved
+ */
+struct leapraid_raid_act_rep {
+ u8 act;
+ u8 r1[2];
+ u8 func;
+ __le16 vol_dev_hdl;
+ u8 r2[8];
+ __le16 adapter_status;
+ u8 r3[76];
+};
+
+/**
+ * struct leapraid_smp_passthrough_req - SMP passthrough request
+ *
+ * @passthrough_flg: Passthrough flags
+ * @physical_port: Target PHY port
+ * @r1: Reserved
+ * @func: Function code
+ * @req_data_len: Request data length
+ * @r2: Reserved
+ * @sas_address: SAS address of target device
+ * @r3: Reserved
+ * @sgl: Scatter-gather list describing request buffer
+ */
+struct leapraid_smp_passthrough_req {
+ u8 passthrough_flg;
+ u8 physical_port;
+ u8 r1;
+ u8 func;
+ __le16 req_data_len;
+ u8 r2[10];
+ __le64 sas_address;
+ u8 r3[8];
+ union leapraid_simple_sge_union sgl;
+} __packed __aligned(4);
+
+/**
+ * struct leapraid_smp_passthrough_rep - SMP passthrough reply
+ *
+ * @passthrough_flg: Passthrough flags echoed
+ * @physical_port: Target PHY port
+ * @r1: Reserved
+ * @func: Function code
+ * @resp_data_len: Length of response data
+ * @r2: Reserved
+ * @adapter_status: Adapter status
+ * @r3: Reserved
+ */
+struct leapraid_smp_passthrough_rep {
+ u8 passthrough_flg;
+ u8 physical_port;
+ u8 r1;
+ u8 func;
+ __le16 resp_data_len;
+ u8 r2[8];
+ __le16 adapter_status;
+ u8 r3[12];
+};
+
+/**
+ * struct leapraid_sas_io_unit_ctrl_req - SAS IO unit control request
+ *
+ * @op: Operation code
+ * @r1: Reserved
+ * @func: Function code
+ * @dev_hdl: Device handle
+ * @r2: Reserved
+ */
+struct leapraid_sas_io_unit_ctrl_req {
+ u8 op;
+ u8 r1[2];
+ u8 func;
+ __le16 dev_hdl;
+ u8 r2[38];
+};
+
+#endif /* LEAPRAID_H */
diff --git a/drivers/scsi/leapraid/leapraid_app.c b/drivers/scsi/leapraid/leapraid_app.c
new file mode 100644
index 000000000000..f838bd5aa20e
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_app.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+
+#include "leapraid_func.h"
+
+/* ioctl device file */
+#define LEAPRAID_DEV_NAME "leapraid_ctl"
+
+/* ioctl version */
+#define LEAPRAID_IOCTL_VERSION 0x07
+
+/* ioctl command */
+#define LEAPRAID_ADAPTER_INFO 17
+#define LEAPRAID_COMMAND 20
+#define LEAPRAID_EVENTQUERY 21
+#define LEAPRAID_EVENTREPORT 23
+
+/**
+ * struct leapraid_ioctl_header - IOCTL command header
+ * @adapter_id : Adapter identifier
+ * @port_number: Port identifier
+ * @max_data_size: Maximum data size for transfer
+ */
+struct leapraid_ioctl_header {
+ u32 adapter_id;
+ u32 port_number;
+ u32 max_data_size;
+};
+
+/**
+ * struct leapraid_ioctl_diag_reset - Diagnostic reset request
+ * @hdr: Common IOCTL header
+ */
+struct leapraid_ioctl_diag_reset {
+ struct leapraid_ioctl_header hdr;
+};
+
+/**
+ * struct leapraid_ioctl_pci_info - PCI device information
+ * @u: Union holding PCI bus/device/function information
+ * @u.bits.dev: PCI device number
+ * @u.bits.func: PCI function number
+ * @u.bits.bus: PCI bus number
+ * @u.word: Combined representation of PCI BDF
+ * @seg_id: PCI segment identifier
+ */
+struct leapraid_ioctl_pci_info {
+ union {
+ struct {
+ u32 dev:5;
+ u32 func:3;
+ u32 bus:24;
+ } bits;
+ u32 word;
+ } u;
+ u32 seg_id;
+};
+
+/**
+ * struct leapraid_ioctl_adapter_info - Adapter information for IOCTL
+ * @hdr: IOCTL header
+ * @adapter_type: Adapter type identifier
+ * @port_number: Port number
+ * @pci_id: PCI device ID
+ * @revision: Revision number
+ * @sub_dev: Subsystem device ID
+ * @sub_vendor: Subsystem vendor ID
+ * @r0: Reserved
+ * @fw_ver: Firmware version
+ * @bios_ver: BIOS version
+ * @driver_ver: Driver version
+ * @r1: Reserved
+ * @scsi_id: SCSI ID
+ * @r2: Reserved
+ * @pci_info: PCI information structure
+ */
+struct leapraid_ioctl_adapter_info {
+ struct leapraid_ioctl_header hdr;
+ u32 adapter_type;
+ u32 port_number;
+ u32 pci_id;
+ u32 revision;
+ u32 sub_dev;
+ u32 sub_vendor;
+ u32 r0;
+ u32 fw_ver;
+ u32 bios_ver;
+ u8 driver_ver[32];
+ u8 r1;
+ u8 scsi_id;
+ u16 r2;
+ struct leapraid_ioctl_pci_info pci_info;
+};
+
+/**
+ * struct leapraid_ioctl_command - IOCTL command structure
+ * @hdr: IOCTL header
+ * @timeout: Command timeout
+ * @rep_msg_buf_ptr: User pointer to reply message buffer
+ * @c2h_buf_ptr: User pointer to card-to-host data buffer
+ * @h2c_buf_ptr: User pointer to host-to-card data buffer
+ * @sense_data_ptr: User pointer to sense data buffer
+ * @max_rep_bytes: Maximum reply bytes
+ * @c2h_size: Card-to-host data size
+ * @h2c_size: Host-to-card data size
+ * @max_sense_bytes: Maximum sense data bytes
+ * @data_sge_offset: Data SGE offset
+ * @mf: Message frame data (flexible array)
+ */
+struct leapraid_ioctl_command {
+ struct leapraid_ioctl_header hdr;
+ u32 timeout;
+ void __user *rep_msg_buf_ptr;
+ void __user *c2h_buf_ptr;
+ void __user *h2c_buf_ptr;
+ void __user *sense_data_ptr;
+ u32 max_rep_bytes;
+ u32 c2h_size;
+ u32 h2c_size;
+ u32 max_sense_bytes;
+ u32 data_sge_offset;
+ u8 mf[];
+};
+
+static struct leapraid_adapter *leapraid_ctl_lookup_adapter(int adapter_id)
+{
+ struct leapraid_adapter *adapter;
+
+ spin_lock(&leapraid_adapter_lock);
+ list_for_each_entry(adapter, &leapraid_adapter_list, list) {
+ if (adapter->adapter_attr.id == adapter_id) {
+ spin_unlock(&leapraid_adapter_lock);
+ return adapter;
+ }
+ }
+ spin_unlock(&leapraid_adapter_lock);
+
+ return NULL;
+}
+
+static void leapraid_cli_scsiio_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_req *ctl_sp_mpi_req, u16 taskid,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size,
+ u16 dev_hdl, void *psge)
+{
+ struct leapraid_mpi_scsiio_req *scsiio_request =
+ (struct leapraid_mpi_scsiio_req *)ctl_sp_mpi_req;
+
+ scsiio_request->sense_buffer_len = SCSI_SENSE_BUFFERSIZE;
+ scsiio_request->sense_buffer_low_add =
+ leapraid_get_sense_buffer_dma(adapter, taskid);
+ memset((void *)(&adapter->driver_cmds.ctl_cmd.sense),
+ 0, SCSI_SENSE_BUFFERSIZE);
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+ if (scsiio_request->func == LEAPRAID_FUNC_SCSIIO_REQ)
+ leapraid_fire_scsi_io(adapter, taskid, dev_hdl);
+ else
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_smp_passthrough_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_req *ctl_sp_mpi_req,
+ u16 taskid,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, void *h2c)
+{
+ struct leapraid_smp_passthrough_req *smp_pt_req =
+ (struct leapraid_smp_passthrough_req *)ctl_sp_mpi_req;
+ u8 *data;
+
+ if (!adapter->adapter_attr.enable_mp)
+ smp_pt_req->physical_port = LEAPRAID_DISABLE_MP_PORT_ID;
+ if (smp_pt_req->passthrough_flg & LEAPRAID_SMP_PT_FLAG_SGL_PTR)
+ data = (u8 *)&smp_pt_req->sgl;
+ else
+ data = h2c;
+
+ if (data[1] == LEAPRAID_SMP_FN_REPORT_PHY_ERR_LOG &&
+ (data[10] == 1 || data[10] == 2))
+ adapter->reset_desc.adapter_link_resetting = true;
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_fire_ieee_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size);
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_sata_passthrough_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_ctl_fire_ieee_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+}
+
+static void leapraid_ctl_load_fw_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_ctl_fire_ieee_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+}
+
+static void leapraid_ctl_fire_mpi_cmd(struct leapraid_adapter *adapter,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ leapraid_build_mpi_sg(adapter, psge, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+ leapraid_fire_task(adapter, taskid);
+}
+
+static void leapraid_ctl_sas_io_unit_ctrl_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_req *ctl_sp_mpi_req,
+ dma_addr_t h2c_dma_addr,
+ size_t h2c_size,
+ dma_addr_t c2h_dma_addr,
+ size_t c2h_size,
+ void *psge, u16 taskid)
+{
+ struct leapraid_sas_io_unit_ctrl_req *sas_io_unit_ctrl_req =
+ (struct leapraid_sas_io_unit_ctrl_req *)ctl_sp_mpi_req;
+
+ if (sas_io_unit_ctrl_req->op == LEAPRAID_SAS_OP_PHY_HARD_RESET ||
+ sas_io_unit_ctrl_req->op == LEAPRAID_SAS_OP_PHY_LINK_RESET)
+ adapter->reset_desc.adapter_link_resetting = true;
+ leapraid_ctl_fire_mpi_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+}
+
+static long leapraid_ctl_do_command(struct leapraid_adapter *adapter,
+ struct leapraid_ioctl_command *karg,
+ void __user *mf)
+{
+ struct leapraid_req *leap_mpi_req = NULL;
+ struct leapraid_req *ctl_sp_mpi_req = NULL;
+ u16 taskid;
+ void *h2c = NULL;
+ size_t h2c_size = 0;
+ dma_addr_t h2c_dma_addr = 0;
+ void *c2h = NULL;
+ size_t c2h_size = 0;
+ dma_addr_t c2h_dma_addr = 0;
+ void *psge;
+ unsigned long timeout;
+ u16 dev_hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ bool issue_reset = false;
+ u32 sz;
+ long rc = 0;
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ leap_mpi_req = kzalloc(LEAPRAID_REQUEST_SIZE, GFP_KERNEL);
+ if (!leap_mpi_req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (karg->data_sge_offset * LEAPRAID_SGE_OFFSET_SIZE > LEAPRAID_REQUEST_SIZE ||
+ karg->data_sge_offset > ((UINT_MAX) / LEAPRAID_SGE_OFFSET_SIZE)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_user(leap_mpi_req, mf,
+ karg->data_sge_offset * LEAPRAID_SGE_OFFSET_SIZE)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ taskid = adapter->driver_cmds.ctl_cmd.taskid;
+
+ adapter->driver_cmds.ctl_cmd.status = LEAPRAID_CMD_PENDING;
+ memset((void *)(&adapter->driver_cmds.ctl_cmd.reply), 0,
+ LEAPRAID_REPLY_SIEZ);
+ ctl_sp_mpi_req = leapraid_get_task_desc(adapter, taskid);
+ memset(ctl_sp_mpi_req, 0, LEAPRAID_REQUEST_SIZE);
+ memcpy(ctl_sp_mpi_req,
+ leap_mpi_req,
+ karg->data_sge_offset * LEAPRAID_SGE_OFFSET_SIZE);
+
+ if (ctl_sp_mpi_req->func == LEAPRAID_FUNC_SCSIIO_REQ ||
+ ctl_sp_mpi_req->func == LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH ||
+ ctl_sp_mpi_req->func == LEAPRAID_FUNC_SATA_PASSTHROUGH) {
+ dev_hdl = le16_to_cpu(ctl_sp_mpi_req->func_dep1);
+ if (!dev_hdl || dev_hdl > adapter->adapter_attr.features.max_dev_handle) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (WARN_ON(ctl_sp_mpi_req->func == LEAPRAID_FUNC_SCSI_TMF))
+ return -EINVAL;
+
+ h2c_size = karg->h2c_size;
+ c2h_size = karg->c2h_size;
+ if (h2c_size) {
+ h2c = dma_alloc_coherent(&adapter->pdev->dev, h2c_size,
+ &h2c_dma_addr, GFP_ATOMIC);
+ if (!h2c) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (copy_from_user(h2c, karg->h2c_buf_ptr, h2c_size)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ }
+ if (c2h_size) {
+ c2h = dma_alloc_coherent(&adapter->pdev->dev,
+ c2h_size, &c2h_dma_addr, GFP_ATOMIC);
+ if (!c2h) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ psge = (void *)ctl_sp_mpi_req + (karg->data_sge_offset *
+ LEAPRAID_SGE_OFFSET_SIZE);
+ init_completion(&adapter->driver_cmds.ctl_cmd.done);
+
+ switch (ctl_sp_mpi_req->func) {
+ case LEAPRAID_FUNC_SCSIIO_REQ:
+ case LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH:
+ if (test_bit(dev_hdl, (unsigned long *)adapter->dev_topo.dev_removing)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ leapraid_cli_scsiio_cmd(adapter, ctl_sp_mpi_req, taskid,
+ h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size,
+ dev_hdl, psge);
+ break;
+ case LEAPRAID_FUNC_SMP_PASSTHROUGH:
+ if (!h2c) {
+ rc = -EINVAL;
+ goto out;
+ }
+ leapraid_ctl_smp_passthrough_cmd(adapter,
+ ctl_sp_mpi_req, taskid,
+ h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size,
+ psge, h2c);
+ break;
+ case LEAPRAID_FUNC_SATA_PASSTHROUGH:
+ if (test_bit(dev_hdl, (unsigned long *)adapter->dev_topo.dev_removing)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ leapraid_ctl_sata_passthrough_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+ break;
+ case LEAPRAID_FUNC_FW_DOWNLOAD:
+ case LEAPRAID_FUNC_FW_UPLOAD:
+ leapraid_ctl_load_fw_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+ break;
+ case LEAPRAID_FUNC_SAS_IO_UNIT_CTRL:
+ leapraid_ctl_sas_io_unit_ctrl_cmd(adapter, ctl_sp_mpi_req,
+ h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size,
+ psge, taskid);
+ break;
+ default:
+ leapraid_ctl_fire_mpi_cmd(adapter, h2c_dma_addr,
+ h2c_size, c2h_dma_addr,
+ c2h_size, psge, taskid);
+ break;
+ }
+
+ timeout = karg->timeout;
+ if (timeout < LEAPRAID_CTL_CMD_TIMEOUT)
+ timeout = LEAPRAID_CTL_CMD_TIMEOUT;
+ wait_for_completion_timeout(&adapter->driver_cmds.ctl_cmd.done,
+ timeout * HZ);
+
+ if ((leap_mpi_req->func == LEAPRAID_FUNC_SMP_PASSTHROUGH ||
+ leap_mpi_req->func == LEAPRAID_FUNC_SAS_IO_UNIT_CTRL) &&
+ adapter->reset_desc.adapter_link_resetting) {
+ adapter->reset_desc.adapter_link_resetting = false;
+ }
+ if (!(adapter->driver_cmds.ctl_cmd.status & LEAPRAID_CMD_DONE)) {
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.ctl_cmd.status);
+ goto reset;
+ }
+
+ if (c2h_size) {
+ if (copy_to_user(karg->c2h_buf_ptr, c2h, c2h_size)) {
+ rc = -ENODATA;
+ goto out;
+ }
+ }
+ if (karg->max_rep_bytes) {
+ sz = min_t(u32, karg->max_rep_bytes, LEAPRAID_REPLY_SIEZ);
+ if (copy_to_user(karg->rep_msg_buf_ptr,
+ (void *)&adapter->driver_cmds.ctl_cmd.reply,
+ sz)) {
+ rc = -ENODATA;
+ goto out;
+ }
+ }
+
+ if (karg->max_sense_bytes &&
+ (leap_mpi_req->func == LEAPRAID_FUNC_SCSIIO_REQ ||
+ leap_mpi_req->func == LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH)) {
+ if (!karg->sense_data_ptr)
+ goto out;
+
+ sz = min_t(u32, karg->max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ if (copy_to_user(karg->sense_data_ptr,
+ (void *)&adapter->driver_cmds.ctl_cmd.sense,
+ sz)) {
+ rc = -ENODATA;
+ goto out;
+ }
+ }
+reset:
+ if (issue_reset) {
+ rc = -ENODATA;
+ if (leap_mpi_req->func == LEAPRAID_FUNC_SCSIIO_REQ ||
+ leap_mpi_req->func == LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH ||
+ leap_mpi_req->func == LEAPRAID_FUNC_SATA_PASSTHROUGH) {
+ dev_err(&adapter->pdev->dev,
+ "fire tgt reset: hdl=0x%04x\n",
+ le16_to_cpu(leap_mpi_req->func_dep1));
+ leapraid_issue_locked_tm(adapter,
+ le16_to_cpu(leap_mpi_req->func_dep1), 0, 0, 0,
+ LEAPRAID_TM_TASKTYPE_TARGET_RESET, taskid,
+ LEAPRAID_TM_MSGFLAGS_LINK_RESET);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+ }
+out:
+ if (c2h)
+ dma_free_coherent(&adapter->pdev->dev, c2h_size,
+ c2h, c2h_dma_addr);
+ if (h2c)
+ dma_free_coherent(&adapter->pdev->dev, h2c_size,
+ h2c, h2c_dma_addr);
+ kfree(leap_mpi_req);
+ adapter->driver_cmds.ctl_cmd.status = LEAPRAID_CMD_NOT_USED;
+ return rc;
+}
+
+static long leapraid_ctl_get_adapter_info(struct leapraid_adapter *adapter,
+ void __user *arg)
+{
+ struct leapraid_ioctl_adapter_info *karg;
+ ssize_t __maybe_unused ret;
+ u8 revision;
+
+ karg = kzalloc(sizeof(*karg), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+
+ pci_read_config_byte(adapter->pdev, PCI_CLASS_REVISION, &revision);
+ karg->revision = revision;
+ karg->pci_id = adapter->pdev->device;
+ karg->sub_dev = adapter->pdev->subsystem_device;
+ karg->sub_vendor = adapter->pdev->subsystem_vendor;
+ karg->pci_info.u.bits.bus = adapter->pdev->bus->number;
+ karg->pci_info.u.bits.dev = PCI_SLOT(adapter->pdev->devfn);
+ karg->pci_info.u.bits.func = PCI_FUNC(adapter->pdev->devfn);
+ karg->pci_info.seg_id = pci_domain_nr(adapter->pdev->bus);
+ karg->fw_ver = adapter->adapter_attr.features.fw_version;
+ ret = strscpy(karg->driver_ver, LEAPRAID_DRIVER_NAME,
+ sizeof(karg->driver_ver));
+ strcat(karg->driver_ver, "-");
+ strcat(karg->driver_ver, LEAPRAID_DRIVER_VERSION);
+ karg->adapter_type = LEAPRAID_IOCTL_VERSION;
+ karg->bios_ver = adapter->adapter_attr.bios_version;
+ if (copy_to_user(arg, karg,
+ sizeof(struct leapraid_ioctl_adapter_info))) {
+ kfree(karg);
+ return -EFAULT;
+ }
+
+ kfree(karg);
+ return 0;
+}
+
+static long leapraid_ctl_ioctl_main(struct file *file, unsigned int cmd,
+ void __user *arg, u8 compat)
+{
+ struct leapraid_ioctl_header ioctl_header;
+ struct leapraid_adapter *adapter;
+ long rc = -ENOIOCTLCMD;
+ int count;
+
+ if (copy_from_user(&ioctl_header, (char __user *)arg,
+ sizeof(struct leapraid_ioctl_header)))
+ return -EFAULT;
+
+ adapter = leapraid_ctl_lookup_adapter(ioctl_header.adapter_id);
+ if (!adapter)
+ return -EFAULT;
+
+ mutex_lock(&adapter->access_ctrl.pci_access_lock);
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ count = LEAPRAID_WAIT_SHOST_RECOVERY;
+ while (count--) {
+ if (!adapter->access_ctrl.shost_recovering)
+ break;
+ ssleep(1);
+ }
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering ||
+ adapter->scan_dev_desc.driver_loading ||
+ adapter->access_ctrl.host_removing) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (file->f_flags & O_NONBLOCK) {
+ if (!mutex_trylock(&adapter->driver_cmds.ctl_cmd.mutex)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ } else if (mutex_lock_interruptible(&adapter->driver_cmds.ctl_cmd.mutex)) {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
+ switch (_IOC_NR(cmd)) {
+ case LEAPRAID_ADAPTER_INFO:
+ if (_IOC_SIZE(cmd) == sizeof(struct leapraid_ioctl_adapter_info))
+ rc = leapraid_ctl_get_adapter_info(adapter, arg);
+ break;
+ case LEAPRAID_COMMAND:
+ {
+ struct leapraid_ioctl_command __user *uarg;
+ struct leapraid_ioctl_command karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (karg.hdr.adapter_id != ioctl_header.adapter_id) {
+ rc = -EINVAL;
+ break;
+ }
+
+ if (_IOC_SIZE(cmd) == sizeof(struct leapraid_ioctl_command)) {
+ uarg = arg;
+ rc = leapraid_ctl_do_command(adapter, &karg,
+ &uarg->mf);
+ }
+ break;
+ }
+ case LEAPRAID_EVENTQUERY:
+ case LEAPRAID_EVENTREPORT:
+ rc = 0;
+ break;
+ default:
+ pr_err("unknown ioctl opcode=0x%08x\n", cmd);
+ break;
+ }
+ mutex_unlock(&adapter->driver_cmds.ctl_cmd.mutex);
+
+out:
+ mutex_unlock(&adapter->access_ctrl.pci_access_lock);
+ return rc;
+}
+
+static long leapraid_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return leapraid_ctl_ioctl_main(file, cmd,
+ (void __user *)arg, 0);
+}
+
+static int leapraid_fw_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct leapraid_adapter *adapter;
+ unsigned long length;
+ unsigned long pfn;
+
+ length = vma->vm_end - vma->vm_start;
+
+ adapter = list_first_entry(&leapraid_adapter_list,
+ struct leapraid_adapter, list);
+
+ if (length > (LEAPRAID_SYS_LOG_BUF_SIZE +
+ LEAPRAID_SYS_LOG_BUF_RESERVE)) {
+ dev_err(&adapter->pdev->dev,
+ "requested mapping size is too large!\n");
+ return -EINVAL;
+ }
+
+ if (!adapter->fw_log_desc.fw_log_buffer) {
+ dev_err(&adapter->pdev->dev, "no log buffer!\n");
+ return -EINVAL;
+ }
+
+ pfn = virt_to_phys(adapter->fw_log_desc.fw_log_buffer) >> PAGE_SHIFT;
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, length,
+ vma->vm_page_prot)) {
+ dev_err(&adapter->pdev->dev,
+ "failed to map memory to user space!\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static const struct file_operations leapraid_ctl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = leapraid_ctl_ioctl,
+ .mmap = leapraid_fw_mmap,
+};
+
+static struct miscdevice leapraid_ctl_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = LEAPRAID_DEV_NAME,
+ .fops = &leapraid_ctl_fops,
+};
+
+void leapraid_ctl_init(void)
+{
+ if (misc_register(&leapraid_ctl_dev) < 0)
+ pr_err("%s can't register misc device\n", LEAPRAID_DRIVER_NAME);
+}
+
+void leapraid_ctl_exit(void)
+{
+ misc_deregister(&leapraid_ctl_dev);
+}
diff --git a/drivers/scsi/leapraid/leapraid_func.c b/drivers/scsi/leapraid/leapraid_func.c
new file mode 100644
index 000000000000..c83c30f56805
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_func.c
@@ -0,0 +1,8264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <linux/module.h>
+
+#include "leapraid_func.h"
+
+static int msix_disable;
+module_param(msix_disable, int, 0444);
+MODULE_PARM_DESC(msix_disable,
+ "disable msix routed interrupts (default=0)");
+
+static int smart_poll;
+module_param(smart_poll, int, 0444);
+MODULE_PARM_DESC(smart_poll,
+ "check SATA drive health via SMART polling: (default=0)");
+
+static int interrupt_mode;
+module_param(interrupt_mode, int, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "intr mode: 0 for MSI-X, 1 for MSI, 2 for legacy. (default=0)");
+
+static int max_msix_vectors = -1;
+module_param(max_msix_vectors, int, 0444);
+MODULE_PARM_DESC(max_msix_vectors, " max msix vectors");
+
+static void leapraid_remove_device(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev);
+static void leapraid_set_led(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev, bool on);
+static void leapraid_ublk_io_dev(struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct leapraid_card_port *port);
+static int leapraid_make_adapter_available(struct leapraid_adapter *adapter);
+static int leapraid_fw_log_init(struct leapraid_adapter *adapter);
+static int leapraid_make_adapter_ready(struct leapraid_adapter *adapter,
+ enum reset_type type);
+
+static inline bool leapraid_is_end_dev(u32 dev_type)
+{
+ return (dev_type & LEAPRAID_DEVTYP_END_DEV) &&
+ ((dev_type & LEAPRAID_DEVTYP_SSP_TGT) ||
+ (dev_type & LEAPRAID_DEVTYP_STP_TGT) ||
+ (dev_type & LEAPRAID_DEVTYP_SATA_DEV));
+}
+
+bool leapraid_pci_removed(struct leapraid_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 vendor_id;
+
+ if (pci_bus_read_config_dword(pdev->bus, pdev->devfn, PCI_VENDOR_ID,
+ &vendor_id))
+ return true;
+
+ return ((vendor_id & LEAPRAID_PCI_VENDOR_ID_MASK) !=
+ LEAPRAID_VENDOR_ID);
+}
+
+static bool leapraid_pci_active(struct leapraid_adapter *adapter)
+{
+ return !(adapter->access_ctrl.pcie_recovering ||
+ leapraid_pci_removed(adapter));
+}
+
+void *leapraid_get_reply_vaddr(struct leapraid_adapter *adapter, u32 rep_paddr)
+{
+ if (!rep_paddr)
+ return NULL;
+
+ return adapter->mem_desc.rep_msg +
+ (rep_paddr - (u32)adapter->mem_desc.rep_msg_dma);
+}
+
+void *leapraid_get_task_desc(struct leapraid_adapter *adapter, u16 taskid)
+{
+ return (void *)(adapter->mem_desc.task_desc +
+ (taskid * LEAPRAID_REQUEST_SIZE));
+}
+
+void *leapraid_get_sense_buffer(struct leapraid_adapter *adapter, u16 taskid)
+{
+ return (void *)(adapter->mem_desc.sense_data +
+ ((taskid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+__le32 leapraid_get_sense_buffer_dma(struct leapraid_adapter *adapter,
+ u16 taskid)
+{
+ return cpu_to_le32(adapter->mem_desc.sense_data_dma +
+ ((taskid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+void leapraid_mask_int(struct leapraid_adapter *adapter)
+{
+ u32 reg;
+
+ adapter->mask_int = true;
+ reg = leapraid_readl(&adapter->iomem_base->host_int_mask);
+ reg |= LEAPRAID_TO_SYS_DB_MASK + LEAPRAID_REPLY_INT_MASK +
+ LEAPRAID_RESET_IRQ_MASK;
+ writel(reg, &adapter->iomem_base->host_int_mask);
+ leapraid_readl(&adapter->iomem_base->host_int_mask);
+}
+
+void leapraid_unmask_int(struct leapraid_adapter *adapter)
+{
+ u32 reg;
+
+ reg = leapraid_readl(&adapter->iomem_base->host_int_mask);
+ reg &= ~LEAPRAID_REPLY_INT_MASK;
+ writel(reg, &adapter->iomem_base->host_int_mask);
+ adapter->mask_int = false;
+}
+
+static void leapraid_flush_io_and_panic(struct leapraid_adapter *adapter)
+{
+ adapter->access_ctrl.adapter_thermal_alert = true;
+ leapraid_smart_polling_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+}
+
+static void leapraid_check_panic_needed(struct leapraid_adapter *adapter,
+ u32 db, u32 adapter_state)
+{
+ bool fault_1 = adapter_state == LEAPRAID_DB_MASK;
+ bool fault_2 = (adapter_state == LEAPRAID_DB_FAULT) &&
+ ((db & LEAPRAID_DB_DATA_MASK) == LEAPRAID_DB_OVER_TEMPERATURE);
+
+ if (!fault_1 && !fault_2)
+ return;
+
+ if (fault_1)
+ pr_err("%s, doorbell status 0xFFFF!\n", __func__);
+ else
+ pr_err("%s, adapter overheating detected!\n", __func__);
+
+ leapraid_flush_io_and_panic(adapter);
+ panic("%s overheating detected, panic now!!!\n", __func__);
+}
+
+u32 leapraid_get_adapter_state(struct leapraid_adapter *adapter)
+{
+ u32 db;
+ u32 adapter_state;
+
+ db = leapraid_readl(&adapter->iomem_base->db);
+ adapter_state = db & LEAPRAID_DB_MASK;
+ leapraid_check_panic_needed(adapter, db, adapter_state);
+ return adapter_state;
+}
+
+static bool leapraid_wait_adapter_ready(struct leapraid_adapter *adapter)
+{
+ u32 cur_state;
+ u32 cnt = LEAPRAID_ADAPTER_READY_MAX_RETRY;
+
+ do {
+ cur_state = leapraid_get_adapter_state(adapter);
+ if (cur_state == LEAPRAID_DB_READY)
+ return true;
+ if (cur_state == LEAPRAID_DB_FAULT)
+ break;
+ usleep_range(LEAPRAID_ADAPTER_READY_SLEEP_MIN_US,
+ LEAPRAID_ADAPTER_READY_SLEEP_MAX_US);
+ } while (--cnt);
+
+ return false;
+}
+
+static int leapraid_db_wait_int_host(struct leapraid_adapter *adapter)
+{
+ u32 cnt = LEAPRAID_DB_WAIT_MAX_RETRY;
+
+ do {
+ if (leapraid_readl(&adapter->iomem_base->host_int_status) &
+ LEAPRAID_ADAPTER2HOST_DB_STATUS)
+ return 0;
+ udelay(LEAPRAID_DB_WAIT_DELAY_US);
+ } while (--cnt);
+
+ return -EFAULT;
+}
+
+static int leapraid_db_wait_ack_and_clear_int(struct leapraid_adapter *adapter)
+{
+ u32 adapter_state;
+ u32 int_status;
+ u32 cnt;
+
+ cnt = LEAPRAID_ADAPTER_READY_MAX_RETRY;
+ do {
+ int_status =
+ leapraid_readl(&adapter->iomem_base->host_int_status);
+ if (!(int_status & LEAPRAID_HOST2ADAPTER_DB_STATUS)) {
+ return 0;
+ } else if (int_status & LEAPRAID_ADAPTER2HOST_DB_STATUS) {
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state == LEAPRAID_DB_FAULT)
+ return -EFAULT;
+ } else if (int_status == 0xFFFFFFFF) {
+ goto out;
+ }
+
+ usleep_range(LEAPRAID_ADAPTER_READY_SLEEP_MIN_US,
+ LEAPRAID_ADAPTER_READY_SLEEP_MAX_US);
+ } while (--cnt);
+
+out:
+ return -EFAULT;
+}
+
+static int leapraid_handshake_func(struct leapraid_adapter *adapter,
+ int req_bytes, u32 *req,
+ int rep_bytes, u16 *rep)
+{
+ int failed, i;
+
+ if ((leapraid_readl(&adapter->iomem_base->db) &
+ LEAPRAID_DB_USED)) {
+ dev_err(&adapter->pdev->dev, "doorbell used\n");
+ return -EFAULT;
+ }
+
+ if (leapraid_readl(&adapter->iomem_base->host_int_status) &
+ LEAPRAID_ADAPTER2HOST_DB_STATUS)
+ writel(0, &adapter->iomem_base->host_int_status);
+
+ writel(((LEAPRAID_FUNC_HANDSHAKE << LEAPRAID_DB_FUNC_SHIFT) |
+ ((req_bytes / LEAPRAID_DWORDS_BYTE_SIZE) <<
+ LEAPRAID_DB_ADD_DWORDS_SHIFT)),
+ &adapter->iomem_base->db);
+
+ if (leapraid_db_wait_int_host(adapter)) {
+ dev_err(&adapter->pdev->dev, "%d:wait db interrupt timeout\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ writel(0, &adapter->iomem_base->host_int_status);
+
+ if (leapraid_db_wait_ack_and_clear_int(adapter)) {
+ dev_err(&adapter->pdev->dev, "%d:wait ack failure\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ for (i = 0, failed = 0;
+ i < req_bytes / LEAPRAID_DWORDS_BYTE_SIZE && !failed;
+ i++) {
+ writel((u32)(req[i]), &adapter->iomem_base->db);
+ if (leapraid_db_wait_ack_and_clear_int(adapter))
+ failed = 1;
+ }
+ if (failed) {
+ dev_err(&adapter->pdev->dev, "%d:wait ack failure\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < rep_bytes / LEAPRAID_WORD_BYTE_SIZE; i++) {
+ if (leapraid_db_wait_int_host(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%d:wait db interrupt timeout\n", __LINE__);
+ return -EFAULT;
+ }
+ rep[i] = (u16)(leapraid_readl(&adapter->iomem_base->db)
+ & LEAPRAID_DB_DATA_MASK);
+ writel(0, &adapter->iomem_base->host_int_status);
+ }
+
+ if (leapraid_db_wait_int_host(adapter)) {
+ dev_err(&adapter->pdev->dev, "%d:wait db interrupt timeout\n",
+ __LINE__);
+ return -EFAULT;
+ }
+
+ writel(0, &adapter->iomem_base->host_int_status);
+
+ return 0;
+}
+
+int leapraid_check_adapter_is_op(struct leapraid_adapter *adapter)
+{
+ int wait_count = LEAPRAID_DB_WAIT_OPERATIONAL;
+
+ do {
+ if (leapraid_pci_removed(adapter))
+ return -EFAULT;
+
+ if (leapraid_get_adapter_state(adapter) ==
+ LEAPRAID_DB_OPERATIONAL)
+ return 0;
+
+ dev_info(&adapter->pdev->dev,
+ "waiting for adapter to become op status(cnt=%d)\n",
+ LEAPRAID_DB_WAIT_OPERATIONAL - wait_count);
+
+ ssleep(1);
+ } while (--wait_count);
+
+ dev_err(&adapter->pdev->dev,
+ "adapter failed to become op state, last state=%d\n",
+ leapraid_get_adapter_state(adapter));
+
+ return -EFAULT;
+}
+
+struct leapraid_io_req_tracker *leapraid_get_io_tracker_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct scsi_cmnd *scmd;
+
+ if (WARN_ON(!taskid))
+ return NULL;
+
+ if (WARN_ON(taskid > adapter->shost->can_queue))
+ return NULL;
+
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (scmd)
+ return leapraid_get_scmd_priv(scmd);
+
+ return NULL;
+}
+
+static u8 leapraid_get_cb_idx(struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_driver_cmd *sp_cmd;
+ u8 cb_idx = 0xFF;
+
+ if (WARN_ON(!taskid))
+ return cb_idx;
+
+ list_for_each_entry(sp_cmd, &adapter->driver_cmds.special_cmd_list,
+ list)
+ if (taskid == sp_cmd->taskid ||
+ taskid == sp_cmd->hp_taskid ||
+ taskid == sp_cmd->inter_taskid)
+ return sp_cmd->cb_idx;
+
+ WARN_ON(cb_idx == 0xFF);
+ return cb_idx;
+}
+
+struct scsi_cmnd *leapraid_get_scmd_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_scsiio_req *leap_mpi_req;
+ struct leapraid_io_req_tracker *st;
+ struct scsi_cmnd *scmd;
+ u32 uniq_tag;
+
+ if (taskid <= 0 || taskid > adapter->shost->can_queue)
+ return NULL;
+
+ uniq_tag = taskid - 1;
+ leap_mpi_req = leapraid_get_task_desc(adapter, taskid);
+ if (!leap_mpi_req->dev_hdl)
+ return NULL;
+
+ scmd = scsi_host_find_tag(adapter->shost, uniq_tag);
+ if (scmd) {
+ st = leapraid_get_scmd_priv(scmd);
+ if (st && st->taskid == taskid)
+ return scmd;
+ }
+
+ return NULL;
+}
+
+u16 leapraid_alloc_scsiio_taskid(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd)
+{
+ struct leapraid_io_req_tracker *request;
+ u16 taskid;
+ u32 tag = scmd->request->tag;
+
+ scmd->host_scribble =
+ (unsigned char *)(&adapter->mem_desc.io_tracker[tag]);
+ request = leapraid_get_scmd_priv(scmd);
+ taskid = tag + 1;
+ request->taskid = taskid;
+ request->scmd = scmd;
+ return taskid;
+}
+
+static void leapraid_check_pending_io(struct leapraid_adapter *adapter)
+{
+ if (adapter->access_ctrl.shost_recovering &&
+ adapter->reset_desc.pending_io_cnt) {
+ if (adapter->reset_desc.pending_io_cnt == 1)
+ wake_up(&adapter->reset_desc.reset_wait_queue);
+ adapter->reset_desc.pending_io_cnt--;
+ }
+}
+
+static void leapraid_clear_io_tracker(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker)
+{
+ if (!io_tracker)
+ return;
+
+ if (WARN_ON(io_tracker->taskid == 0))
+ return;
+
+ io_tracker->scmd = NULL;
+}
+
+static bool leapraid_is_fixed_taskid(struct leapraid_adapter *adapter,
+ u16 taskid)
+{
+ return (taskid == adapter->driver_cmds.ctl_cmd.taskid ||
+ taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid ||
+ taskid == adapter->driver_cmds.tm_cmd.hp_taskid ||
+ taskid == adapter->driver_cmds.ctl_cmd.hp_taskid ||
+ taskid == adapter->driver_cmds.scan_dev_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.timestamp_sync_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.raid_action_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.transport_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.cfg_op_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.enc_cmd.inter_taskid ||
+ taskid == adapter->driver_cmds.notify_event_cmd.inter_taskid);
+}
+
+void leapraid_free_taskid(struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_io_req_tracker *io_tracker;
+ void *task_desc;
+
+ if (leapraid_is_fixed_taskid(adapter, taskid))
+ return;
+
+ if (taskid <= adapter->shost->can_queue) {
+ io_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ taskid);
+ if (!io_tracker) {
+ leapraid_check_pending_io(adapter);
+ return;
+ }
+
+ task_desc = leapraid_get_task_desc(adapter, taskid);
+ memset(task_desc, 0, LEAPRAID_REQUEST_SIZE);
+ leapraid_clear_io_tracker(adapter, io_tracker);
+ leapraid_check_pending_io(adapter);
+ }
+}
+
+static u8 leapraid_get_msix_idx(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd)
+{
+ return adapter->notification_desc.msix_cpu_map[raw_smp_processor_id()];
+}
+
+static u8 leapraid_get_and_set_msix_idx_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_io_req_tracker *io_tracker = NULL;
+
+ if (taskid <= adapter->shost->can_queue)
+ io_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ taskid);
+
+ if (!io_tracker)
+ return leapraid_get_msix_idx(adapter, NULL);
+
+ io_tracker->msix_io = leapraid_get_msix_idx(adapter, io_tracker->scmd);
+
+ return io_tracker->msix_io;
+}
+
+void leapraid_fire_scsi_io(struct leapraid_adapter *adapter, u16 taskid,
+ u16 handle)
+{
+ struct leapraid_atomic_req_desc desc;
+
+ desc.flg = LEAPRAID_REQ_DESC_FLG_SCSI_IO;
+ desc.msix_idx = leapraid_get_and_set_msix_idx_from_taskid(adapter,
+ taskid);
+ desc.taskid = cpu_to_le16(taskid);
+ writel((__force u32)cpu_to_le32(*((u32 *)&desc)),
+ &adapter->iomem_base->atomic_req_desc_post);
+}
+
+void leapraid_fire_hpr_task(struct leapraid_adapter *adapter, u16 taskid,
+ u16 msix_task)
+{
+ struct leapraid_atomic_req_desc desc;
+
+ desc.flg = LEAPRAID_REQ_DESC_FLG_HPR;
+ desc.msix_idx = msix_task;
+ desc.taskid = cpu_to_le16(taskid);
+ writel((__force u32)cpu_to_le32(*((u32 *)&desc)),
+ &adapter->iomem_base->atomic_req_desc_post);
+}
+
+void leapraid_fire_task(struct leapraid_adapter *adapter, u16 taskid)
+{
+ struct leapraid_atomic_req_desc desc;
+
+ desc.flg = LEAPRAID_REQ_DESC_FLG_DFLT_TYPE;
+ desc.msix_idx = leapraid_get_and_set_msix_idx_from_taskid(adapter,
+ taskid);
+ desc.taskid = cpu_to_le16(taskid);
+ writel((__force u32)cpu_to_le32(*((u32 *)&desc)),
+ &adapter->iomem_base->atomic_req_desc_post);
+}
+
+void leapraid_clean_active_scsi_cmds(struct leapraid_adapter *adapter)
+{
+ struct leapraid_io_req_tracker *io_tracker;
+ struct scsi_cmnd *scmd;
+ u16 taskid;
+
+ for (taskid = 1; taskid <= adapter->shost->can_queue; taskid++) {
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (!scmd)
+ continue;
+
+ io_tracker = leapraid_get_scmd_priv(scmd);
+ if (io_tracker && io_tracker->taskid == 0)
+ continue;
+
+ scsi_dma_unmap(scmd);
+ leapraid_clear_io_tracker(adapter, io_tracker);
+ if (!leapraid_pci_active(adapter) ||
+ adapter->reset_desc.adapter_reset_results != 0 ||
+ adapter->access_ctrl.adapter_thermal_alert ||
+ adapter->access_ctrl.host_removing)
+ scmd->result = DID_NO_CONNECT << 16;
+ else
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ scmd->scsi_done(scmd);
+ }
+}
+
+static void leapraid_clean_active_driver_cmd(
+ struct leapraid_driver_cmd *driver_cmd)
+{
+ if (driver_cmd->status & LEAPRAID_CMD_PENDING) {
+ driver_cmd->status |= LEAPRAID_CMD_RESET;
+ complete(&driver_cmd->done);
+ }
+}
+
+static void leapraid_clean_active_driver_cmds(struct leapraid_adapter *adapter)
+{
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.timestamp_sync_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.raid_action_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.driver_scsiio_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.tm_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.transport_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.enc_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.notify_event_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.cfg_op_cmd);
+ leapraid_clean_active_driver_cmd(&adapter->driver_cmds.ctl_cmd);
+
+ if (adapter->driver_cmds.scan_dev_cmd.status & LEAPRAID_CMD_PENDING) {
+ adapter->scan_dev_desc.scan_dev_failed = true;
+ adapter->driver_cmds.scan_dev_cmd.status |= LEAPRAID_CMD_RESET;
+ if (adapter->scan_dev_desc.driver_loading) {
+ adapter->scan_dev_desc.scan_start_failed =
+ LEAPRAID_ADAPTER_STATUS_INTERNAL_ERROR;
+ adapter->scan_dev_desc.scan_start = false;
+ } else {
+ complete(&adapter->driver_cmds.scan_dev_cmd.done);
+ }
+ }
+}
+
+static void leapraid_clean_active_cmds(struct leapraid_adapter *adapter)
+{
+ leapraid_clean_active_driver_cmds(adapter);
+ memset(adapter->dev_topo.pending_dev_add, 0,
+ adapter->dev_topo.pending_dev_add_sz);
+ memset(adapter->dev_topo.dev_removing, 0,
+ adapter->dev_topo.dev_removing_sz);
+ leapraid_clean_active_fw_evt(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+}
+
+static void leapraid_tgt_not_responding(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv = NULL;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ unsigned long flags = 0;
+ u32 adapter_state = 0;
+
+ if (adapter->access_ctrl.pcie_recovering)
+ return;
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state != LEAPRAID_DB_OPERATIONAL)
+ return;
+
+ if (test_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls))
+ return;
+
+ clear_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add);
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev && sas_dev->starget && sas_dev->starget->hostdata) {
+ starget_priv = sas_dev->starget->hostdata;
+ starget_priv->deleted = true;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (starget_priv)
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_tgt_rst_send(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv = NULL;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ struct leapraid_card_port *port = NULL;
+ u64 sas_address = 0;
+ unsigned long flags;
+ u32 adapter_state;
+
+ if (adapter->access_ctrl.pcie_recovering)
+ return;
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state != LEAPRAID_DB_OPERATIONAL)
+ return;
+
+ if (test_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls))
+ return;
+
+ clear_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add);
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev && sas_dev->starget && sas_dev->starget->hostdata) {
+ starget_priv = sas_dev->starget->hostdata;
+ starget_priv->deleted = true;
+ sas_address = sas_dev->sas_addr;
+ port = sas_dev->card_port;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (starget_priv) {
+ leapraid_ublk_io_dev(adapter, sas_address, port);
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ }
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static inline void leapraid_single_mpi_sg_append(struct leapraid_adapter *adapter,
+ void *sge, u32 flag_and_len,
+ dma_addr_t dma_addr)
+{
+ if (adapter->adapter_attr.use_32_dma_mask) {
+ ((struct leapraid_sge_simple32 *)sge)->flg_and_len =
+ cpu_to_le32(flag_and_len |
+ (LEAPRAID_SGE_FLG_32 |
+ LEAPRAID_SGE_FLG_SYSTEM_ADDR) <<
+ LEAPRAID_SGE_FLG_SHIFT);
+ ((struct leapraid_sge_simple32 *)sge)->addr =
+ cpu_to_le32(dma_addr);
+ } else {
+ ((struct leapraid_sge_simple64 *)sge)->flg_and_len =
+ cpu_to_le32(flag_and_len |
+ (LEAPRAID_SGE_FLG_64 |
+ LEAPRAID_SGE_FLG_SYSTEM_ADDR) <<
+ LEAPRAID_SGE_FLG_SHIFT);
+ ((struct leapraid_sge_simple64 *)sge)->addr =
+ cpu_to_le64(dma_addr);
+ }
+}
+
+static inline void leapraid_single_ieee_sg_append(void *sge, u8 flag,
+ u8 next_chain_offset,
+ u32 len,
+ dma_addr_t dma_addr)
+{
+ ((struct leapraid_chain64_ieee_sg *)sge)->flg = flag;
+ ((struct leapraid_chain64_ieee_sg *)sge)->next_chain_offset =
+ next_chain_offset;
+ ((struct leapraid_chain64_ieee_sg *)sge)->len = cpu_to_le32(len);
+ ((struct leapraid_chain64_ieee_sg *)sge)->addr = cpu_to_le64(dma_addr);
+}
+
+static void leapraid_build_nodata_mpi_sg(struct leapraid_adapter *adapter,
+ void *sge)
+{
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ (u32)((LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL |
+ LEAPRAID_SGE_FLG_SIMPLE_ONE) <<
+ LEAPRAID_SGE_FLG_SHIFT),
+ -1);
+}
+
+void leapraid_build_mpi_sg(struct leapraid_adapter *adapter, void *sge,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size)
+{
+ if (h2c_size && !c2h_size) {
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL |
+ LEAPRAID_SGE_FLG_H2C) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ h2c_size,
+ h2c_dma_addr);
+ } else if (!h2c_size && c2h_size) {
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ c2h_size,
+ c2h_dma_addr);
+ } else if (h2c_size && c2h_size) {
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_H2C) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ h2c_size,
+ h2c_dma_addr);
+ if (adapter->adapter_attr.use_32_dma_mask)
+ sge += sizeof(struct leapraid_sge_simple32);
+ else
+ sge += sizeof(struct leapraid_sge_simple64);
+ leapraid_single_mpi_sg_append(adapter,
+ sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ c2h_size,
+ c2h_dma_addr);
+ } else {
+ return leapraid_build_nodata_mpi_sg(adapter, sge);
+ }
+}
+
+void leapraid_build_ieee_nodata_sg(struct leapraid_adapter *adapter, void *sge)
+{
+ leapraid_single_ieee_sg_append(sge,
+ (LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR |
+ LEAPRAID_IEEE_SGE_FLG_EOL),
+ 0, 0, -1);
+}
+
+int leapraid_build_scmd_ieee_sg(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd, u16 taskid)
+{
+ struct leapraid_scsiio_req *scsiio_req;
+ struct leapraid_io_req_tracker *io_tracker;
+ struct scatterlist *scmd_sg_cur;
+ int sg_entries_left;
+ void *sg_entry_cur;
+ void *host_chain;
+ dma_addr_t host_chain_dma;
+ u8 host_chain_cursor;
+ u32 sg_entries_in_cur_seg;
+ u32 chain_offset_in_cur_seg;
+ u32 chain_len_in_cur_seg;
+
+ io_tracker = leapraid_get_scmd_priv(scmd);
+ scsiio_req = leapraid_get_task_desc(adapter, taskid);
+ scmd_sg_cur = scsi_sglist(scmd);
+ sg_entries_left = scsi_dma_map(scmd);
+ if (sg_entries_left < 0)
+ return -ENOMEM;
+ sg_entry_cur = &scsiio_req->sgl;
+ if (sg_entries_left <= LEAPRAID_SGL_INLINE_THRESHOLD)
+ goto fill_last_seg;
+
+ scsiio_req->chain_offset = LEAPRAID_CHAIN_OFFSET_DWORDS;
+ leapraid_single_ieee_sg_append(sg_entry_cur,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0, sg_dma_len(scmd_sg_cur),
+ sg_dma_address(scmd_sg_cur));
+ scmd_sg_cur = sg_next(scmd_sg_cur);
+ sg_entry_cur += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ sg_entries_left--;
+
+ host_chain_cursor = 0;
+ host_chain = io_tracker->chain +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_dma = io_tracker->chain_dma +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_cursor += 1;
+ for (;;) {
+ sg_entries_in_cur_seg =
+ (sg_entries_left <= LEAPRAID_MAX_SGES_IN_CHAIN) ?
+ sg_entries_left : LEAPRAID_MAX_SGES_IN_CHAIN;
+ chain_offset_in_cur_seg =
+ (sg_entries_left == (int)sg_entries_in_cur_seg) ?
+ 0 : sg_entries_in_cur_seg;
+ chain_len_in_cur_seg = sg_entries_in_cur_seg *
+ LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ if (chain_offset_in_cur_seg)
+ chain_len_in_cur_seg += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+
+ leapraid_single_ieee_sg_append(sg_entry_cur,
+ LEAPRAID_IEEE_SGE_FLG_CHAIN_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ chain_offset_in_cur_seg, chain_len_in_cur_seg,
+ host_chain_dma);
+ sg_entry_cur = host_chain;
+ if (!chain_offset_in_cur_seg)
+ goto fill_last_seg;
+
+ while (sg_entries_in_cur_seg) {
+ leapraid_single_ieee_sg_append(sg_entry_cur,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0, sg_dma_len(scmd_sg_cur),
+ sg_dma_address(scmd_sg_cur));
+ scmd_sg_cur = sg_next(scmd_sg_cur);
+ sg_entry_cur += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ sg_entries_left--;
+ sg_entries_in_cur_seg--;
+ }
+ host_chain = io_tracker->chain +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_dma = io_tracker->chain_dma +
+ host_chain_cursor * LEAPRAID_CHAIN_SEG_SIZE;
+ host_chain_cursor += 1;
+ }
+
+fill_last_seg:
+ while (sg_entries_left > 0) {
+ u32 flags = LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR;
+ if (sg_entries_left == 1)
+ flags |= LEAPRAID_IEEE_SGE_FLG_EOL;
+ leapraid_single_ieee_sg_append(sg_entry_cur, flags,
+ 0, sg_dma_len(scmd_sg_cur),
+ sg_dma_address(scmd_sg_cur));
+ scmd_sg_cur = sg_next(scmd_sg_cur);
+ sg_entry_cur += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ sg_entries_left--;
+ }
+ return 0;
+}
+
+void leapraid_build_ieee_sg(struct leapraid_adapter *adapter, void *sge,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size)
+{
+ if (h2c_size && !c2h_size) {
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_EOL |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0,
+ h2c_size,
+ h2c_dma_addr);
+ } else if (!h2c_size && c2h_size) {
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_EOL |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0,
+ c2h_size,
+ c2h_dma_addr);
+ } else if (h2c_size && c2h_size) {
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR,
+ 0,
+ h2c_size,
+ h2c_dma_addr);
+ sge += LEAPRAID_IEEE_SGE64_ENTRY_SIZE;
+ leapraid_single_ieee_sg_append(sge,
+ LEAPRAID_IEEE_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_IEEE_SGE_FLG_SYSTEM_ADDR |
+ LEAPRAID_IEEE_SGE_FLG_EOL,
+ 0,
+ c2h_size,
+ c2h_dma_addr);
+ } else {
+ return leapraid_build_ieee_nodata_sg(adapter, sge);
+ }
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv)
+{
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ if (tgt_priv->sas_dev)
+ leapraid_sdev_get(tgt_priv->sas_dev);
+
+ return tgt_priv->sas_dev;
+}
+
+struct leapraid_sas_dev *leapraid_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_from_tgt(adapter, tgt_priv);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+static struct leapraid_card_port *leapraid_get_port_by_id(
+ struct leapraid_adapter *adapter,
+ u8 port_id, bool skip_dirty)
+{
+ struct leapraid_card_port *port;
+ struct leapraid_card_port *dirty_port = NULL;
+
+ if (!adapter->adapter_attr.enable_mp)
+ port_id = LEAPRAID_DISABLE_MP_PORT_ID;
+
+ list_for_each_entry(port, &adapter->dev_topo.card_port_list, list) {
+ if (port->port_id != port_id)
+ continue;
+
+ if (!(port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY))
+ return port;
+
+ if (skip_dirty && !dirty_port)
+ dirty_port = port;
+ }
+
+ if (dirty_port)
+ return dirty_port;
+
+ if (unlikely(!adapter->adapter_attr.enable_mp)) {
+ port = kzalloc(sizeof(*port), GFP_ATOMIC);
+ if (!port)
+ return NULL;
+
+ port->port_id = LEAPRAID_DISABLE_MP_PORT_ID;
+ list_add_tail(&port->list, &adapter->dev_topo.card_port_list);
+ return port;
+ }
+
+ return NULL;
+}
+
+struct leapraid_vphy *leapraid_get_vphy_by_phy(struct leapraid_card_port *port,
+ u32 phy_seq_num)
+{
+ struct leapraid_vphy *vphy;
+
+ if (!port || !port->vphys_mask)
+ return NULL;
+
+ list_for_each_entry(vphy, &port->vphys_list, list) {
+ if (vphy->phy_mask & BIT(phy_seq_num))
+ return vphy;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->rphy == rphy) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_init_list,
+ list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->rphy == rphy) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ if (!port)
+ return NULL;
+
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->card_port == port) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_init_list,
+ list)
+ if (sas_dev->sas_addr == sas_address &&
+ sas_dev->card_port == port) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+
+ if (!port)
+ return NULL;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter, sas_address,
+ port);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ assert_spin_locked(&adapter->dev_topo.sas_dev_lock);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list)
+ if (sas_dev->hdl == hdl) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_init_list,
+ list)
+ if (sas_dev->hdl == hdl) {
+ leapraid_sdev_get(sas_dev);
+ return sas_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+void leapraid_sas_dev_remove(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ unsigned long flags;
+ bool del_from_list;
+
+ if (!sas_dev)
+ return;
+
+ del_from_list = false;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ del_from_list = true;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (del_from_list)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_sas_dev_remove_by_hdl(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ bool del_from_list;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ del_from_list = false;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev) {
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ del_from_list = true;
+ leapraid_sdev_put(sas_dev);
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (del_from_list) {
+ leapraid_remove_device(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+}
+
+void leapraid_sas_dev_remove_by_sas_address(struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct leapraid_card_port *port)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ bool del_from_list;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ del_from_list = false;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter, sas_address,
+ port);
+ if (sas_dev) {
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ del_from_list = true;
+ leapraid_sdev_put(sas_dev);
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (del_from_list) {
+ leapraid_remove_device(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+}
+
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_id(
+ struct leapraid_adapter *adapter, uint id, uint channel)
+{
+ struct leapraid_raid_volume *raid_volume;
+
+ list_for_each_entry(raid_volume, &adapter->dev_topo.raid_volume_list,
+ list) {
+ if (raid_volume->id == id &&
+ raid_volume->channel == channel) {
+ return raid_volume;
+ }
+ }
+
+ return NULL;
+}
+
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_raid_volume *raid_volume;
+
+ list_for_each_entry(raid_volume, &adapter->dev_topo.raid_volume_list,
+ list) {
+ if (raid_volume->hdl == hdl)
+ return raid_volume;
+ }
+
+ return NULL;
+}
+
+static struct leapraid_raid_volume *leapraid_raid_volume_find_by_wwid(
+ struct leapraid_adapter *adapter, u64 wwid)
+{
+ struct leapraid_raid_volume *raid_volume;
+
+ list_for_each_entry(raid_volume, &adapter->dev_topo.raid_volume_list,
+ list) {
+ if (raid_volume->wwid == wwid)
+ return raid_volume;
+ }
+
+ return NULL;
+}
+
+static void leapraid_raid_volume_add(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ list_add_tail(&raid_volume->list, &adapter->dev_topo.raid_volume_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+void leapraid_raid_volume_remove(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ list_del(&raid_volume->list);
+ kfree(raid_volume);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static struct leapraid_enc_node *leapraid_enc_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_enc_node *enc_dev;
+
+ list_for_each_entry(enc_dev, &adapter->dev_topo.enc_list, list) {
+ if (le16_to_cpu(enc_dev->pg0.enc_hdl) == hdl)
+ return enc_dev;
+ }
+
+ return NULL;
+}
+
+struct leapraid_topo_node *leapraid_exp_find_by_sas_address(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port)
+{
+ struct leapraid_topo_node *sas_exp;
+
+ if (!port)
+ return NULL;
+
+ list_for_each_entry(sas_exp, &adapter->dev_topo.exp_list, list) {
+ if (sas_exp->sas_address == sas_address &&
+ sas_exp->card_port == port)
+ return sas_exp;
+ }
+
+ return NULL;
+}
+
+bool leapraid_scmd_find_by_tgt(struct leapraid_adapter *adapter, uint id,
+ uint channel)
+{
+ struct scsi_cmnd *scmd;
+ int taskid;
+
+ for (taskid = 1; taskid <= adapter->shost->can_queue; taskid++) {
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (!scmd)
+ continue;
+
+ if (scmd->device->id == id && scmd->device->channel == channel)
+ return true;
+ }
+
+ return false;
+}
+
+bool leapraid_scmd_find_by_lun(struct leapraid_adapter *adapter, uint id,
+ unsigned int lun, uint channel)
+{
+ struct scsi_cmnd *scmd;
+ int taskid;
+
+ for (taskid = 1; taskid <= adapter->shost->can_queue; taskid++) {
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ if (!scmd)
+ continue;
+
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel &&
+ scmd->device->lun == lun)
+ return true;
+ }
+
+ return false;
+}
+
+static struct leapraid_topo_node *leapraid_exp_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_topo_node *sas_exp;
+
+ list_for_each_entry(sas_exp, &adapter->dev_topo.exp_list, list) {
+ if (sas_exp->hdl == hdl)
+ return sas_exp;
+ }
+
+ return NULL;
+}
+
+static enum leapraid_card_port_checking_flg leapraid_get_card_port_feature(
+ struct leapraid_card_port *old_card_port,
+ struct leapraid_card_port *card_port,
+ struct leapraid_card_port_feature *feature)
+{
+ feature->dirty_flg =
+ old_card_port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY;
+ feature->same_addr =
+ old_card_port->sas_address == card_port->sas_address;
+ feature->exact_phy =
+ old_card_port->phy_mask == card_port->phy_mask;
+ feature->phy_overlap =
+ old_card_port->phy_mask & card_port->phy_mask;
+ feature->same_port =
+ old_card_port->port_id == card_port->port_id;
+ feature->cur_chking_old_port = old_card_port;
+
+ if (!feature->dirty_flg || !feature->same_addr)
+ return CARD_PORT_SKIP_CHECKING;
+
+ return CARD_PORT_FURTHER_CHECKING_NEEDED;
+}
+
+static int leapraid_process_card_port_feature(
+ struct leapraid_card_port_feature *feature)
+{
+ struct leapraid_card_port *old_card_port;
+
+ old_card_port = feature->cur_chking_old_port;
+ if (feature->exact_phy) {
+ feature->checking_state = SAME_PORT_WITH_NOTHING_CHANGED;
+ feature->expected_old_port = old_card_port;
+ return 1;
+ } else if (feature->phy_overlap) {
+ if (feature->same_port) {
+ feature->checking_state =
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS;
+ feature->expected_old_port = old_card_port;
+ } else if (feature->checking_state !=
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS) {
+ feature->checking_state =
+ SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS;
+ feature->expected_old_port = old_card_port;
+ }
+ } else {
+ if (feature->checking_state !=
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS &&
+ feature->checking_state !=
+ SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS) {
+ feature->checking_state = SAME_ADDR_ONLY;
+ feature->expected_old_port = old_card_port;
+ feature->same_addr_port_count++;
+ }
+ }
+
+ return 0;
+}
+
+static int leapraid_check_card_port(struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_card_port **expected_card_port,
+ int *count)
+{
+ struct leapraid_card_port *old_card_port;
+ struct leapraid_card_port_feature feature;
+
+ *expected_card_port = NULL;
+ memset(&feature, 0, sizeof(struct leapraid_card_port_feature));
+ feature.expected_old_port = NULL;
+ feature.same_addr_port_count = 0;
+ feature.checking_state = NEW_CARD_PORT;
+
+ list_for_each_entry(old_card_port, &adapter->dev_topo.card_port_list,
+ list) {
+ if (leapraid_get_card_port_feature(old_card_port, card_port,
+ &feature))
+ continue;
+
+ if (leapraid_process_card_port_feature(&feature))
+ break;
+ }
+
+ if (feature.checking_state == SAME_ADDR_ONLY)
+ *count = feature.same_addr_port_count;
+
+ *expected_card_port = feature.expected_old_port;
+ return feature.checking_state;
+}
+
+static void leapraid_del_phy_part_of_anther_port(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port_table, int index,
+ u8 port_count, int offset)
+{
+ struct leapraid_topo_node *card_topo_node;
+ bool found = false;
+ int i;
+
+ card_topo_node = &adapter->dev_topo.card;
+ for (i = 0; i < port_count; i++) {
+ if (i == index)
+ continue;
+
+ if (card_port_table[i].phy_mask & BIT(offset)) {
+ leapraid_transport_detach_phy_to_port(adapter,
+ card_topo_node,
+ &card_topo_node->card_phy[offset]);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ card_port_table[index].phy_mask |= BIT(offset);
+}
+
+static void leapraid_add_or_del_phys_from_existing_port(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_card_port *card_port_table,
+ int index, u8 port_count)
+{
+ struct leapraid_topo_node *card_topo_node;
+ u32 phy_mask_diff;
+ u32 offset = 0;
+
+ card_topo_node = &adapter->dev_topo.card;
+ phy_mask_diff = card_port->phy_mask ^
+ card_port_table[index].phy_mask;
+ for (offset = 0; offset < adapter->dev_topo.card.phys_num; offset++) {
+ if (!(phy_mask_diff & BIT(offset)))
+ continue;
+
+ if (!(card_port_table[index].phy_mask & BIT(offset))) {
+ leapraid_del_phy_part_of_anther_port(adapter,
+ card_port_table,
+ index, port_count,
+ offset);
+ continue;
+ }
+
+ if (card_topo_node->card_phy[offset].phy_is_assigned)
+ leapraid_transport_detach_phy_to_port(adapter,
+ card_topo_node,
+ &card_topo_node->card_phy[offset]);
+
+ leapraid_transport_attach_phy_to_port(adapter,
+ card_topo_node, &card_topo_node->card_phy[offset],
+ card_port->sas_address,
+ card_port);
+ }
+}
+
+struct leapraid_sas_dev *leapraid_get_next_sas_dev_from_init_list(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_sas_dev *sas_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ if (!list_empty(&adapter->dev_topo.sas_dev_init_list)) {
+ sas_dev = list_first_entry(&adapter->dev_topo.sas_dev_init_list,
+ struct leapraid_sas_dev, list);
+ leapraid_sdev_get(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return sas_dev;
+}
+
+static bool leapraid_check_boot_dev_internal(u64 sas_address, u64 dev_name,
+ u64 enc_lid, u16 slot,
+ struct leapraid_boot_dev *boot_dev,
+ u8 form)
+{
+ if (!boot_dev)
+ return false;
+
+ switch (form & LEAPRAID_BOOTDEV_FORM_MASK) {
+ case LEAPRAID_BOOTDEV_FORM_SAS_WWID:
+ if (!sas_address)
+ return false;
+
+ return sas_address ==
+ le64_to_cpu(((struct leapraid_boot_dev_format_sas_wwid *)(
+ boot_dev->pg_dev))->sas_addr);
+ case LEAPRAID_BOOTDEV_FORM_ENC_SLOT:
+ if (!enc_lid)
+ return false;
+
+ return (enc_lid == le64_to_cpu(((struct leapraid_boot_dev_format_enc_slot *)(
+ boot_dev->pg_dev))->enc_lid) &&
+ slot == le16_to_cpu(((struct leapraid_boot_dev_format_enc_slot *)(
+ boot_dev->pg_dev))->slot_num));
+ case LEAPRAID_BOOTDEV_FORM_DEV_NAME:
+ if (!dev_name)
+ return false;
+
+ return dev_name == le64_to_cpu(((struct leapraid_boot_dev_format_dev_name *)(
+ boot_dev->pg_dev))->dev_name);
+ case LEAPRAID_BOOTDEV_FORM_NONE:
+ default:
+ return false;
+ }
+}
+
+static void leapraid_try_set_boot_dev(struct leapraid_boot_dev *boot_dev,
+ u64 sas_addr, u64 dev_name,
+ u64 enc_lid, u16 slot,
+ void *dev, u32 chnl)
+{
+ bool matched = false;
+
+ if (boot_dev->dev)
+ return;
+
+ matched = leapraid_check_boot_dev_internal(sas_addr, dev_name, enc_lid,
+ slot, boot_dev,
+ boot_dev->form);
+ if (matched) {
+ boot_dev->dev = dev;
+ boot_dev->chnl = chnl;
+ }
+}
+
+static void leapraid_check_boot_dev(struct leapraid_adapter *adapter,
+ void *dev, u32 chnl)
+{
+ u64 sas_addr = 0;
+ u64 dev_name = 0;
+ u64 enc_lid = 0;
+ u16 slot = 0;
+
+ if (!adapter->scan_dev_desc.driver_loading)
+ return;
+
+ switch (chnl) {
+ case RAID_CHANNEL:
+ {
+ struct leapraid_raid_volume *raid_volume =
+ (struct leapraid_raid_volume *)dev;
+
+ sas_addr = raid_volume->wwid;
+ break;
+ }
+ default:
+ {
+ struct leapraid_sas_dev *sas_dev =
+ (struct leapraid_sas_dev *)dev;
+ sas_addr = sas_dev->sas_addr;
+ dev_name = sas_dev->dev_name;
+ enc_lid = sas_dev->enc_lid;
+ slot = sas_dev->slot;
+ break;
+ }
+ }
+
+ leapraid_try_set_boot_dev(&adapter->boot_devs.requested_boot_dev,
+ sas_addr, dev_name, enc_lid,
+ slot, dev, chnl);
+ leapraid_try_set_boot_dev(&adapter->boot_devs.requested_alt_boot_dev,
+ sas_addr, dev_name, enc_lid,
+ slot, dev, chnl);
+ leapraid_try_set_boot_dev(&adapter->boot_devs.current_boot_dev,
+ sas_addr, dev_name, enc_lid,
+ slot, dev, chnl);
+}
+
+static void leapraid_build_and_fire_cfg_req(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep)
+{
+ struct leapraid_cfg_req *local_leap_cfg_req;
+
+ memset(leap_mpi_cfgp_rep, 0, sizeof(struct leapraid_cfg_rep));
+ memset((void *)(&adapter->driver_cmds.cfg_op_cmd.reply), 0,
+ sizeof(struct leapraid_cfg_rep));
+ adapter->driver_cmds.cfg_op_cmd.status = LEAPRAID_CMD_PENDING;
+ local_leap_cfg_req = leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.cfg_op_cmd.inter_taskid);
+ memcpy(local_leap_cfg_req, leap_mpi_cfgp_req,
+ sizeof(struct leapraid_cfg_req));
+ init_completion(&adapter->driver_cmds.cfg_op_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.cfg_op_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.cfg_op_cmd.done,
+ LEAPRAID_CFG_OP_TIMEOUT * HZ);
+}
+
+static int leapraid_req_cfg_func(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep,
+ void *target_cfg_pg, void *real_cfg_pg_addr,
+ u16 target_real_cfg_pg_sz)
+{
+ u32 adapter_status = UINT_MAX;
+ bool issue_reset = false;
+ u8 retry_cnt;
+ int rc;
+
+ retry_cnt = 0;
+ mutex_lock(&adapter->driver_cmds.cfg_op_cmd.mutex);
+retry:
+ if (retry_cnt) {
+ if (retry_cnt > LEAPRAID_CFG_REQ_RETRY_TIMES) {
+ rc = -EFAULT;
+ goto out;
+ }
+ dev_warn(&adapter->pdev->dev,
+ "cfg-req: retry request, cnt=%u\n", retry_cnt);
+ }
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: adapter not operational\n");
+ goto out;
+ }
+
+ leapraid_build_and_fire_cfg_req(adapter, leap_mpi_cfgp_req,
+ leap_mpi_cfgp_rep);
+ if (!(adapter->driver_cmds.cfg_op_cmd.status & LEAPRAID_CMD_DONE)) {
+ retry_cnt++;
+ if (adapter->driver_cmds.cfg_op_cmd.status &
+ LEAPRAID_CMD_RESET) {
+ dev_warn(&adapter->pdev->dev,
+ "cfg-req: cmd gg due to hard reset\n");
+ goto retry;
+ }
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering) {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: cmd not done during %s, skip reset\n",
+ adapter->access_ctrl.shost_recovering ?
+ "shost recovery" : "pcie recovery");
+ issue_reset = false;
+ rc = -EFAULT;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: cmd timeout, issuing hard reset\n");
+ issue_reset = true;
+ }
+
+ goto out;
+ }
+
+ if (adapter->driver_cmds.cfg_op_cmd.status &
+ LEAPRAID_CMD_REPLY_VALID) {
+ memcpy(leap_mpi_cfgp_rep,
+ (void *)(&adapter->driver_cmds.cfg_op_cmd.reply),
+ sizeof(struct leapraid_cfg_rep));
+ adapter_status = le16_to_cpu(
+ leap_mpi_cfgp_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status == LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ if (target_cfg_pg && real_cfg_pg_addr &&
+ target_real_cfg_pg_sz)
+ if (leap_mpi_cfgp_req->action ==
+ LEAPRAID_CFG_ACT_PAGE_READ_CUR)
+ memcpy(target_cfg_pg,
+ real_cfg_pg_addr,
+ target_real_cfg_pg_sz);
+ } else {
+ if (adapter_status !=
+ LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE)
+ dev_err(&adapter->pdev->dev,
+ "cfg-rep: adapter_status=0x%x\n",
+ adapter_status);
+ rc = -EFAULT;
+ }
+ } else {
+ dev_err(&adapter->pdev->dev, "cfg-rep: reply invalid\n");
+ rc = -EFAULT;
+ }
+
+out:
+ adapter->driver_cmds.cfg_op_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.cfg_op_cmd.mutex);
+ if (issue_reset) {
+ if (adapter->scan_dev_desc.first_scan_dev_fired) {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d cfg-req: failure, issuing reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ rc = -EFAULT;
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "cfg-req: cmd gg during init, skip reset\n");
+ rc = -EFAULT;
+ }
+ }
+ return rc;
+}
+
+static int leapraid_request_cfg_pg_header(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep)
+{
+ return leapraid_req_cfg_func(adapter, leap_mpi_cfgp_req,
+ leap_mpi_cfgp_rep, NULL, NULL, 0);
+}
+
+static int leapraid_request_cfg_pg(struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *leap_mpi_cfgp_req,
+ struct leapraid_cfg_rep *leap_mpi_cfgp_rep,
+ void *target_cfg_pg, void *real_cfg_pg_addr,
+ u16 target_real_cfg_pg_sz)
+{
+ return leapraid_req_cfg_func(adapter, leap_mpi_cfgp_req,
+ leap_mpi_cfgp_rep, target_cfg_pg,
+ real_cfg_pg_addr, target_real_cfg_pg_sz);
+}
+
+int leapraid_op_config_page(struct leapraid_adapter *adapter,
+ void *target_cfg_pg, union cfg_param_1 cfgp1,
+ union cfg_param_2 cfgp2,
+ enum config_page_action cfg_op)
+{
+ struct leapraid_cfg_req leap_mpi_cfgp_req;
+ struct leapraid_cfg_rep leap_mpi_cfgp_rep;
+ u16 real_cfg_pg_sz = 0;
+ void *real_cfg_pg_addr = NULL;
+ dma_addr_t real_cfg_pg_dma = 0;
+ u32 __page_size;
+ int rc;
+
+ memset(&leap_mpi_cfgp_req, 0, sizeof(struct leapraid_cfg_req));
+ leap_mpi_cfgp_req.func = LEAPRAID_FUNC_CONFIG_OP;
+ leap_mpi_cfgp_req.action = LEAPRAID_CFG_ACT_PAGE_HEADER;
+
+ switch (cfg_op) {
+ case GET_BIOS_PG3:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_BIOS;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_BIOS3;
+ __page_size = sizeof(struct leapraid_bios_page3);
+ break;
+ case GET_BIOS_PG2:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_BIOS;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_BIOS2;
+ __page_size = sizeof(struct leapraid_bios_page2);
+ break;
+ case GET_SAS_DEVICE_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_DEV;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_DEV0;
+ __page_size = sizeof(struct leapraid_sas_dev_p0);
+ break;
+ case GET_SAS_IOUNIT_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type =
+ LEAPRAID_CFG_EXTPT_SAS_IO_UNIT;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_IOUNIT0;
+ __page_size = cfgp1.size;
+ break;
+ case GET_SAS_IOUNIT_PG1:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type =
+ LEAPRAID_CFG_EXTPT_SAS_IO_UNIT;
+ leap_mpi_cfgp_req.header.page_num =
+ LEAPRAID_CFG_PAGE_NUM_IOUNIT1;
+ __page_size = cfgp1.size;
+ break;
+ case GET_SAS_EXPANDER_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_EXP;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_EXP0;
+ __page_size = sizeof(struct leapraid_exp_p0);
+ break;
+ case GET_SAS_EXPANDER_PG1:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_EXP;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_EXP1;
+ __page_size = sizeof(struct leapraid_exp_p1);
+ break;
+ case GET_SAS_ENCLOSURE_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_ENC;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_ENC0;
+ __page_size = sizeof(struct leapraid_enc_p0);
+ break;
+ case GET_PHY_PG0:
+ leap_mpi_cfgp_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ leap_mpi_cfgp_req.ext_page_type = LEAPRAID_CFG_EXTPT_SAS_PHY;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_PHY0;
+ __page_size = sizeof(struct leapraid_sas_phy_p0);
+ break;
+ case GET_RAID_VOLUME_PG0:
+ leap_mpi_cfgp_req.header.page_type =
+ LEAPRAID_CFG_PT_RAID_VOLUME;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_VOL0;
+ __page_size = cfgp1.size;
+ break;
+ case GET_RAID_VOLUME_PG1:
+ leap_mpi_cfgp_req.header.page_type =
+ LEAPRAID_CFG_PT_RAID_VOLUME;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_VOL1;
+ __page_size = sizeof(struct leapraid_raidvol_p1);
+ break;
+ case GET_PHY_DISK_PG0:
+ leap_mpi_cfgp_req.header.page_type =
+ LEAPRAID_CFG_PT_RAID_PHYSDISK;
+ leap_mpi_cfgp_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_PD0;
+ __page_size = sizeof(struct leapraid_raidpd_p0);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "unsupported config page action=%d!\n", cfg_op);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ leapraid_build_nodata_mpi_sg(adapter,
+ &leap_mpi_cfgp_req.page_buf_sge);
+ rc = leapraid_request_cfg_pg_header(adapter,
+ &leap_mpi_cfgp_req,
+ &leap_mpi_cfgp_rep);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: header failed rc=%dn", rc);
+ goto out;
+ }
+
+ if (cfg_op == GET_SAS_DEVICE_PG0 ||
+ cfg_op == GET_SAS_EXPANDER_PG0 ||
+ cfg_op == GET_SAS_ENCLOSURE_PG0 ||
+ cfg_op == GET_RAID_VOLUME_PG1)
+ leap_mpi_cfgp_req.page_addr = cpu_to_le32(cfgp1.form |
+ cfgp2.handle);
+ else if (cfg_op == GET_PHY_DISK_PG0)
+ leap_mpi_cfgp_req.page_addr = cpu_to_le32(cfgp1.form |
+ cfgp2.form_specific);
+ else if (cfg_op == GET_RAID_VOLUME_PG0)
+ leap_mpi_cfgp_req.page_addr =
+ cpu_to_le32(cfgp2.handle |
+ LEAPRAID_RAID_VOL_CFG_PGAD_HDL);
+ else if (cfg_op == GET_SAS_EXPANDER_PG1)
+ leap_mpi_cfgp_req.page_addr =
+ cpu_to_le32(cfgp2.handle |
+ (cfgp1.phy_number <<
+ LEAPRAID_SAS_EXP_CFG_PGAD_PHYNUM_SHIFT) |
+ LEAPRAID_SAS_EXP_CFG_PGAD_HDL_PHY_NUM);
+ else if (cfg_op == GET_PHY_PG0)
+ leap_mpi_cfgp_req.page_addr = cpu_to_le32(cfgp1.phy_number |
+ LEAPRAID_SAS_PHY_CFG_PGAD_PHY_NUMBER);
+
+ leap_mpi_cfgp_req.action = LEAPRAID_CFG_ACT_PAGE_READ_CUR;
+
+ leap_mpi_cfgp_req.header.page_num = leap_mpi_cfgp_rep.header.page_num;
+ leap_mpi_cfgp_req.header.page_type =
+ leap_mpi_cfgp_rep.header.page_type;
+ leap_mpi_cfgp_req.header.page_len = leap_mpi_cfgp_rep.header.page_len;
+ leap_mpi_cfgp_req.ext_page_len = leap_mpi_cfgp_rep.ext_page_len;
+ leap_mpi_cfgp_req.ext_page_type = leap_mpi_cfgp_rep.ext_page_type;
+
+ real_cfg_pg_sz = (leap_mpi_cfgp_req.header.page_len) ?
+ leap_mpi_cfgp_req.header.page_len * 4 :
+ le16_to_cpu(leap_mpi_cfgp_rep.ext_page_len) * 4;
+ real_cfg_pg_addr = dma_alloc_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz,
+ &real_cfg_pg_dma,
+ GFP_KERNEL);
+ if (!real_cfg_pg_addr) {
+ dev_err(&adapter->pdev->dev, "cfg-req: dma alloc failed\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (leap_mpi_cfgp_req.action == LEAPRAID_CFG_ACT_PAGE_WRITE_CUR) {
+ leapraid_single_mpi_sg_append(adapter,
+ &leap_mpi_cfgp_req.page_buf_sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL |
+ LEAPRAID_SGE_FLG_H2C) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ real_cfg_pg_sz,
+ real_cfg_pg_dma);
+ memcpy(real_cfg_pg_addr, target_cfg_pg,
+ min_t(u16, real_cfg_pg_sz, __page_size));
+ } else {
+ memset(target_cfg_pg, 0, __page_size);
+ leapraid_single_mpi_sg_append(adapter,
+ &leap_mpi_cfgp_req.page_buf_sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ real_cfg_pg_sz,
+ real_cfg_pg_dma);
+ memset(real_cfg_pg_addr, 0,
+ min_t(u16, real_cfg_pg_sz, __page_size));
+ }
+
+ rc = leapraid_request_cfg_pg(adapter,
+ &leap_mpi_cfgp_req,
+ &leap_mpi_cfgp_rep,
+ target_cfg_pg,
+ real_cfg_pg_addr,
+ min_t(u16, real_cfg_pg_sz, __page_size));
+ if (rc) {
+ u32 adapter_status;
+
+ adapter_status = le16_to_cpu(leap_mpi_cfgp_rep.adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status !=
+ LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE)
+ dev_err(&adapter->pdev->dev,
+ "cfg-req: rc=%d, pg_info: 0x%x, 0x%x, %d\n",
+ rc, leap_mpi_cfgp_req.header.page_type,
+ leap_mpi_cfgp_req.ext_page_type,
+ leap_mpi_cfgp_req.header.page_num);
+ }
+
+ if (real_cfg_pg_addr)
+ dma_free_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz,
+ real_cfg_pg_addr,
+ real_cfg_pg_dma);
+out:
+ return rc;
+}
+
+static int leapraid_cfg_get_volume_hdl_dispatch(
+ struct leapraid_adapter *adapter,
+ struct leapraid_cfg_req *cfg_req,
+ struct leapraid_cfg_rep *cfg_rep,
+ struct leapraid_raid_cfg_p0 *raid_cfg_p0,
+ void *real_cfg_pg_addr,
+ u16 real_cfg_pg_sz,
+ u16 raid_cfg_p0_sz,
+ u16 pd_hdl, u16 *vol_hdl)
+{
+ u16 phys_disk_dev_hdl;
+ u16 adapter_status;
+ u16 element_type;
+ int config_num;
+ int rc, i;
+
+ config_num = 0xff;
+ while (true) {
+ cfg_req->page_addr =
+ cpu_to_le32(config_num +
+ LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP);
+ rc = leapraid_request_cfg_pg(
+ adapter, cfg_req, cfg_rep,
+ raid_cfg_p0, real_cfg_pg_addr,
+ min_t(u16, real_cfg_pg_sz, raid_cfg_p0_sz));
+ adapter_status = le16_to_cpu(cfg_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (rc) {
+ if (adapter_status ==
+ LEAPRAID_ADAPTER_STATUS_CONFIG_INVALID_PAGE) {
+ *vol_hdl = 0;
+ return 0;
+ }
+ return rc;
+ }
+
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS)
+ return -1;
+
+ for (i = 0; i < raid_cfg_p0->elements_num; i++) {
+ element_type =
+ le16_to_cpu(raid_cfg_p0->cfg_element[i].element_flg) &
+ LEAPRAID_RAIDCFG_P0_EFLG_MASK_ELEMENT_TYPE;
+
+ switch (element_type) {
+ case LEAPRAID_RAIDCFG_P0_EFLG_VOL_PHYS_DISK_ELEMENT:
+ case LEAPRAID_RAIDCFG_P0_EFLG_OCE_ELEMENT:
+ phys_disk_dev_hdl =
+ le16_to_cpu(raid_cfg_p0->cfg_element[i]
+ .phys_disk_dev_hdl);
+ if (phys_disk_dev_hdl == pd_hdl) {
+ *vol_hdl =
+ le16_to_cpu
+ (raid_cfg_p0->cfg_element[i]
+ .vol_dev_hdl);
+ return 0;
+ }
+ break;
+
+ case LEAPRAID_RAIDCFG_P0_EFLG_HOT_SPARE_ELEMENT:
+ *vol_hdl = 0;
+ return 0;
+ default:
+ break;
+ }
+ }
+ config_num = raid_cfg_p0->cfg_num;
+ }
+ return 0;
+}
+
+int leapraid_cfg_get_volume_hdl(struct leapraid_adapter *adapter,
+ u16 pd_hdl, u16 *vol_hdl)
+{
+ struct leapraid_raid_cfg_p0 *raid_cfg_p0 = NULL;
+ struct leapraid_cfg_req cfg_req;
+ struct leapraid_cfg_rep cfg_rep;
+ dma_addr_t real_cfg_pg_dma = 0;
+ void *real_cfg_pg_addr = NULL;
+ u16 real_cfg_pg_sz = 0;
+ int rc, raid_cfg_p0_sz;
+
+ *vol_hdl = 0;
+ memset(&cfg_req, 0, sizeof(struct leapraid_cfg_req));
+ cfg_req.func = LEAPRAID_FUNC_CONFIG_OP;
+ cfg_req.action = LEAPRAID_CFG_ACT_PAGE_HEADER;
+ cfg_req.header.page_type = LEAPRAID_CFG_PT_EXTENDED;
+ cfg_req.ext_page_type = LEAPRAID_CFG_EXTPT_RAID_CONFIG;
+ cfg_req.header.page_num = LEAPRAID_CFG_PAGE_NUM_VOL0;
+
+ leapraid_build_nodata_mpi_sg(adapter, &cfg_req.page_buf_sge);
+ rc = leapraid_request_cfg_pg_header(adapter, &cfg_req, &cfg_rep);
+ if (rc)
+ goto out;
+
+ cfg_req.action = LEAPRAID_CFG_ACT_PAGE_READ_CUR;
+ raid_cfg_p0_sz = le16_to_cpu(cfg_rep.ext_page_len) *
+ LEAPRAID_CFG_UNIT_SIZE;
+ raid_cfg_p0 = kmalloc(raid_cfg_p0_sz, GFP_KERNEL);
+ if (!raid_cfg_p0) {
+ rc = -1;
+ goto out;
+ }
+
+ real_cfg_pg_sz = (cfg_req.header.page_len) ?
+ cfg_req.header.page_len * LEAPRAID_CFG_UNIT_SIZE :
+ le16_to_cpu(cfg_rep.ext_page_len) * LEAPRAID_CFG_UNIT_SIZE;
+
+ real_cfg_pg_addr = dma_alloc_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz, &real_cfg_pg_dma,
+ GFP_KERNEL);
+ if (!real_cfg_pg_addr) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memset(raid_cfg_p0, 0, raid_cfg_p0_sz);
+ leapraid_single_mpi_sg_append(adapter,
+ &cfg_req.page_buf_sge,
+ ((LEAPRAID_SGE_FLG_SIMPLE_ONE |
+ LEAPRAID_SGE_FLG_LAST_ONE |
+ LEAPRAID_SGE_FLG_EOB |
+ LEAPRAID_SGE_FLG_EOL) <<
+ LEAPRAID_SGE_FLG_SHIFT) |
+ real_cfg_pg_sz,
+ real_cfg_pg_dma);
+ memset(real_cfg_pg_addr, 0,
+ min_t(u16, real_cfg_pg_sz, raid_cfg_p0_sz));
+
+ rc = leapraid_cfg_get_volume_hdl_dispatch(adapter,
+ &cfg_req, &cfg_rep,
+ raid_cfg_p0,
+ real_cfg_pg_addr,
+ real_cfg_pg_sz,
+ raid_cfg_p0_sz,
+ pd_hdl, vol_hdl);
+
+out:
+ if (real_cfg_pg_addr)
+ dma_free_coherent(&adapter->pdev->dev,
+ real_cfg_pg_sz, real_cfg_pg_addr,
+ real_cfg_pg_dma);
+ kfree(raid_cfg_p0);
+ return rc;
+}
+
+static int leapraid_get_adapter_phys(struct leapraid_adapter *adapter,
+ u8 *nr_phys)
+{
+ struct leapraid_sas_io_unit_p0 sas_io_unit_page0;
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ int rc = 0;
+
+ *nr_phys = 0;
+ cfgp1.size = sizeof(struct leapraid_sas_io_unit_p0);
+ rc = leapraid_op_config_page(adapter, &sas_io_unit_page0, cfgp1,
+ cfgp2, GET_SAS_IOUNIT_PG0);
+ if (rc)
+ return rc;
+
+ *nr_phys = sas_io_unit_page0.phy_num;
+
+ return 0;
+}
+
+static int leapraid_cfg_get_number_pds(struct leapraid_adapter *adapter,
+ u16 hdl, u8 *num_pds)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_raidvol_p0 raidvol_p0;
+ int rc;
+
+ *num_pds = 0;
+ cfgp1.size = sizeof(struct leapraid_raidvol_p0);
+ cfgp2.handle = hdl;
+ rc = leapraid_op_config_page(adapter, &raidvol_p0, cfgp1,
+ cfgp2, GET_RAID_VOLUME_PG0);
+
+ if (!rc)
+ *num_pds = raidvol_p0.num_phys_disks;
+
+ return rc;
+}
+
+int leapraid_cfg_get_volume_wwid(struct leapraid_adapter *adapter,
+ u16 vol_hdl, u64 *wwid)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_raidvol_p1 raidvol_p1;
+ int rc;
+
+ *wwid = 0;
+ cfgp1.form = LEAPRAID_RAID_VOL_CFG_PGAD_HDL;
+ cfgp2.handle = vol_hdl;
+ rc = leapraid_op_config_page(adapter, &raidvol_p1, cfgp1,
+ cfgp2, GET_RAID_VOLUME_PG1);
+ if (!rc)
+ *wwid = le64_to_cpu(raidvol_p1.wwid);
+
+ return rc;
+}
+
+static int leapraid_get_sas_io_unit_page0(struct leapraid_adapter *adapter,
+ struct leapraid_sas_io_unit_p0 *sas_io_unit_p0,
+ u16 sas_iou_pg0_sz)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+
+ cfgp1.size = sas_iou_pg0_sz;
+ return leapraid_op_config_page(adapter, sas_io_unit_p0, cfgp1,
+ cfgp2, GET_SAS_IOUNIT_PG0);
+}
+
+static int leapraid_get_sas_address(struct leapraid_adapter *adapter,
+ u16 hdl, u64 *sas_address)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+
+ *sas_address = 0;
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0)))
+ return -ENXIO;
+
+ if (hdl <= adapter->dev_topo.card.phys_num &&
+ (!(le32_to_cpu(sas_dev_p0.dev_info) & LEAPRAID_DEVTYP_SEP)))
+ *sas_address = adapter->dev_topo.card.sas_address;
+ else
+ *sas_address = le64_to_cpu(sas_dev_p0.sas_address);
+
+ return 0;
+}
+
+int leapraid_get_volume_cap(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_raidvol_p0 *raidvol_p0;
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_raidpd_p0 raidpd_p0;
+ u8 num_pds;
+ u16 sz;
+
+ if ((leapraid_cfg_get_number_pds(adapter, raid_volume->hdl,
+ &num_pds)) || !num_pds)
+ return -EFAULT;
+
+ raid_volume->pd_num = num_pds;
+ sz = offsetof(struct leapraid_raidvol_p0, phys_disk) +
+ (num_pds * sizeof(struct leapraid_raidvol0_phys_disk));
+ raidvol_p0 = kzalloc(sz, GFP_KERNEL);
+ if (!raidvol_p0)
+ return -EFAULT;
+
+ cfgp1.size = sz;
+ cfgp2.handle = raid_volume->hdl;
+ if ((leapraid_op_config_page(adapter, raidvol_p0, cfgp1, cfgp2,
+ GET_RAID_VOLUME_PG0))) {
+ kfree(raidvol_p0);
+ return -EFAULT;
+ }
+
+ raid_volume->vol_type = raidvol_p0->volume_type;
+ cfgp1.form = LEAPRAID_PHYSDISK_CFG_PGAD_PHYSDISKNUM;
+ cfgp2.form_specific = raidvol_p0->phys_disk[0].phys_disk_num;
+ if (!(leapraid_op_config_page(adapter, &raidpd_p0, cfgp1, cfgp2,
+ GET_PHY_DISK_PG0))) {
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = le16_to_cpu(raidpd_p0.dev_hdl);
+ if (!(leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0))) {
+ raid_volume->dev_info =
+ le32_to_cpu(sas_dev_p0.dev_info);
+ }
+ }
+
+ kfree(raidvol_p0);
+ return 0;
+}
+
+static void leapraid_fw_log_work(struct work_struct *work)
+{
+ struct leapraid_adapter *adapter = container_of(work,
+ struct leapraid_adapter, fw_log_desc.fw_log_work.work);
+ struct leapraid_fw_log_info *infom;
+ unsigned long flags;
+
+ infom = (struct leapraid_fw_log_info *)(adapter->fw_log_desc.fw_log_buffer +
+ LEAPRAID_SYS_LOG_BUF_SIZE);
+
+ if (adapter->fw_log_desc.fw_log_init_flag == 0) {
+ infom->user_position =
+ leapraid_readl(&adapter->iomem_base->host_log_buf_pos);
+ infom->adapter_position =
+ leapraid_readl(&adapter->iomem_base->adapter_log_buf_pos);
+ adapter->fw_log_desc.fw_log_init_flag++;
+ }
+
+ writel(infom->user_position, &adapter->iomem_base->host_log_buf_pos);
+ infom->adapter_position =
+ leapraid_readl(&adapter->iomem_base->adapter_log_buf_pos);
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->fw_log_desc.fw_log_wq)
+ queue_delayed_work(adapter->fw_log_desc.fw_log_wq,
+ &adapter->fw_log_desc.fw_log_work,
+ msecs_to_jiffies(LEAPRAID_PCIE_LOG_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+void leapraid_fw_log_stop(struct leapraid_adapter *adapter)
+{
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ wq = adapter->fw_log_desc.fw_log_wq;
+ adapter->fw_log_desc.fw_log_wq = NULL;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work_sync(&adapter->fw_log_desc.fw_log_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+void leapraid_fw_log_start(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return;
+
+ if (adapter->fw_log_desc.fw_log_wq)
+ return;
+
+ INIT_DELAYED_WORK(&adapter->fw_log_desc.fw_log_work,
+ leapraid_fw_log_work);
+ snprintf(adapter->fw_log_desc.fw_log_wq_name,
+ sizeof(adapter->fw_log_desc.fw_log_wq_name),
+ "poll_%s%u_fw_log",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+ adapter->fw_log_desc.fw_log_wq =
+ create_singlethread_workqueue(
+ adapter->fw_log_desc.fw_log_wq_name);
+ if (!adapter->fw_log_desc.fw_log_wq)
+ return;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->fw_log_desc.fw_log_wq)
+ queue_delayed_work(adapter->fw_log_desc.fw_log_wq,
+ &adapter->fw_log_desc.fw_log_work,
+ msecs_to_jiffies(LEAPRAID_PCIE_LOG_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+static void leapraid_timestamp_sync(struct leapraid_adapter *adapter)
+{
+ struct leapraid_io_unit_ctrl_req *io_unit_ctrl_req;
+ ktime_t current_time;
+ bool issue_reset = false;
+ u64 time_stamp = 0;
+
+ mutex_lock(&adapter->driver_cmds.timestamp_sync_cmd.mutex);
+ adapter->driver_cmds.timestamp_sync_cmd.status = LEAPRAID_CMD_PENDING;
+ io_unit_ctrl_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.timestamp_sync_cmd.inter_taskid);
+ memset(io_unit_ctrl_req, 0, sizeof(struct leapraid_io_unit_ctrl_req));
+ io_unit_ctrl_req->func = LEAPRAID_FUNC_SAS_IO_UNIT_CTRL;
+ io_unit_ctrl_req->op = LEAPRAID_SAS_OP_SET_PARAMETER;
+ io_unit_ctrl_req->adapter_para = LEAPRAID_SET_PARAMETER_SYNC_TIMESTAMP;
+
+ current_time = ktime_get_real();
+ time_stamp = ktime_to_ms(current_time);
+
+ io_unit_ctrl_req->adapter_para_value =
+ cpu_to_le32(time_stamp & 0xFFFFFFFF);
+ io_unit_ctrl_req->adapter_para_value2 =
+ cpu_to_le32(time_stamp >> 32);
+ init_completion(&adapter->driver_cmds.timestamp_sync_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.timestamp_sync_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.timestamp_sync_cmd.done,
+ LEAPRAID_TIMESTAMP_SYNC_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.timestamp_sync_cmd.status &
+ LEAPRAID_CMD_DONE))
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.timestamp_sync_cmd.status);
+
+ if (issue_reset) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+
+ adapter->driver_cmds.timestamp_sync_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.timestamp_sync_cmd.mutex);
+}
+
+static bool leapraid_should_skip_fault_check(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+ bool skip;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ skip = adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering ||
+ adapter->access_ctrl.host_removing;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+
+ return skip;
+}
+
+static void leapraid_check_scheduled_fault_work(struct work_struct *work)
+{
+ struct leapraid_adapter *adapter;
+ unsigned long flags;
+ u32 adapter_state;
+ int rc;
+
+ adapter = container_of(work, struct leapraid_adapter,
+ reset_desc.fault_reset_work.work);
+
+ if (leapraid_should_skip_fault_check(adapter))
+ goto scheduled_timer;
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state != LEAPRAID_DB_OPERATIONAL) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ dev_warn(&adapter->pdev->dev, "%s: hard reset: %s\n",
+ __func__, (rc == 0) ? "success" : "failed");
+
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (rc && adapter_state != LEAPRAID_DB_OPERATIONAL)
+ return;
+ }
+
+ if (++adapter->timestamp_sync_cnt >=
+ LEAPRAID_TIMESTAMP_SYNC_INTERVAL) {
+ adapter->timestamp_sync_cnt = 0;
+ leapraid_timestamp_sync(adapter);
+ }
+
+scheduled_timer:
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->reset_desc.fault_reset_wq)
+ queue_delayed_work(adapter->reset_desc.fault_reset_wq,
+ &adapter->reset_desc.fault_reset_work,
+ msecs_to_jiffies(LEAPRAID_FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+void leapraid_check_scheduled_fault_start(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+
+ if (adapter->reset_desc.fault_reset_wq)
+ return;
+
+ adapter->timestamp_sync_cnt = 0;
+ INIT_DELAYED_WORK(&adapter->reset_desc.fault_reset_work,
+ leapraid_check_scheduled_fault_work);
+ snprintf(adapter->reset_desc.fault_reset_wq_name,
+ sizeof(adapter->reset_desc.fault_reset_wq_name),
+ "poll_%s%u_status",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+ adapter->reset_desc.fault_reset_wq =
+ create_singlethread_workqueue(
+ adapter->reset_desc.fault_reset_wq_name);
+ if (!adapter->reset_desc.fault_reset_wq) {
+ dev_err(&adapter->pdev->dev,
+ "create single thread workqueue failed!\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ if (adapter->reset_desc.fault_reset_wq)
+ queue_delayed_work(adapter->reset_desc.fault_reset_wq,
+ &adapter->reset_desc.fault_reset_work,
+ msecs_to_jiffies(LEAPRAID_FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+}
+
+void leapraid_check_scheduled_fault_stop(struct leapraid_adapter *adapter)
+{
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ wq = adapter->reset_desc.fault_reset_wq;
+ adapter->reset_desc.fault_reset_wq = NULL;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+
+ if (!wq)
+ return;
+
+ if (!cancel_delayed_work_sync(&adapter->reset_desc.fault_reset_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+}
+
+static bool leapraid_ready_for_scsi_io(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ if (adapter->access_ctrl.pcie_recovering ||
+ adapter->access_ctrl.shost_recovering)
+ return false;
+
+ if (leapraid_check_adapter_is_op(adapter))
+ return false;
+
+ if (hdl == LEAPRAID_INVALID_DEV_HANDLE)
+ return false;
+
+ if (test_bit(hdl, (unsigned long *)adapter->dev_topo.dev_removing))
+ return false;
+
+ return true;
+}
+
+static int leapraid_dispatch_scsi_io(struct leapraid_adapter *adapter,
+ struct leapraid_scsi_cmd_desc *cmd_desc)
+{
+ struct scsi_device *sdev;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_cmnd *scmd;
+ void *dma_buffer = NULL;
+ dma_addr_t dma_addr = 0;
+ u8 sdev_flg = 0;
+ bool issue_reset = false;
+ int rc = 0;
+
+ if (WARN_ON(!adapter->driver_cmds.internal_scmd))
+ return -EINVAL;
+
+ if (!leapraid_ready_for_scsi_io(adapter, cmd_desc->hdl))
+ return -EINVAL;
+
+ mutex_lock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ if (adapter->driver_cmds.driver_scsiio_cmd.status !=
+ LEAPRAID_CMD_NOT_USED) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ adapter->driver_cmds.driver_scsiio_cmd.status = LEAPRAID_CMD_PENDING;
+
+ __shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (sdev_priv->starget_priv->hdl == cmd_desc->hdl &&
+ sdev_priv->lun == cmd_desc->lun) {
+ sdev_flg = 1;
+ break;
+ }
+ }
+
+ if (!sdev_flg) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (cmd_desc->data_length) {
+ dma_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ cmd_desc->data_length,
+ &dma_addr, GFP_ATOMIC);
+ if (!dma_buffer) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (cmd_desc->dir == DMA_TO_DEVICE)
+ memcpy(dma_buffer, cmd_desc->data_buffer,
+ cmd_desc->data_length);
+ }
+
+ scmd = adapter->driver_cmds.internal_scmd;
+ scmd->device = sdev;
+ scmd->cmd_len = cmd_desc->cdb_length;
+ memcpy(scmd->cmnd, cmd_desc->cdb, cmd_desc->cdb_length);
+ scmd->sc_data_direction = cmd_desc->dir;
+ scmd->sdb.length = cmd_desc->data_length;
+ scmd->sdb.table.nents = 1;
+ scmd->sdb.table.orig_nents = 1;
+ sg_init_one(scmd->sdb.table.sgl, dma_buffer, cmd_desc->data_length);
+ init_completion(&adapter->driver_cmds.driver_scsiio_cmd.done);
+ if (leapraid_queuecommand(adapter->shost, scmd)) {
+ adapter->driver_cmds.driver_scsiio_cmd.status &=
+ ~LEAPRAID_CMD_PENDING;
+ complete(&adapter->driver_cmds.driver_scsiio_cmd.done);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ wait_for_completion_timeout(&adapter->driver_cmds.driver_scsiio_cmd.done,
+ cmd_desc->time_out * HZ);
+
+ if (!(adapter->driver_cmds.driver_scsiio_cmd.status &
+ LEAPRAID_CMD_DONE)) {
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.driver_scsiio_cmd.status);
+ rc = -ENODATA;
+ goto reset;
+ }
+
+ rc = adapter->driver_cmds.internal_scmd->result;
+ if (!rc && cmd_desc->dir == DMA_FROM_DEVICE)
+ memcpy(cmd_desc->data_buffer, dma_buffer,
+ cmd_desc->data_length);
+
+reset:
+ if (issue_reset) {
+ rc = -ENODATA;
+ dev_err(&adapter->pdev->dev, "fire tgt reset: hdl=0x%04x\n",
+ cmd_desc->hdl);
+ leapraid_issue_locked_tm(adapter, cmd_desc->hdl, 0, 0, 0,
+ LEAPRAID_TM_TASKTYPE_TARGET_RESET,
+ adapter->driver_cmds.driver_scsiio_cmd.taskid,
+ LEAPRAID_TM_MSGFLAGS_LINK_RESET);
+ }
+out:
+ if (dma_buffer)
+ dma_free_coherent(&adapter->pdev->dev,
+ cmd_desc->data_length, dma_buffer, dma_addr);
+ adapter->driver_cmds.driver_scsiio_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ return rc;
+}
+
+static int leapraid_dispatch_logsense(struct leapraid_adapter *adapter,
+ u16 hdl, u32 lun)
+{
+ struct leapraid_scsi_cmd_desc *desc;
+ int rc = 0;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->hdl = hdl;
+ desc->lun = lun;
+ desc->data_length = LEAPRAID_LOGSENSE_DATA_LENGTH;
+ desc->dir = DMA_FROM_DEVICE;
+ desc->cdb_length = LEAPRAID_LOGSENSE_CDB_LENGTH;
+ desc->cdb[0] = LOG_SENSE;
+ desc->cdb[2] = LEAPRAID_LOGSENSE_CDB_CODE;
+ desc->cdb[8] = desc->data_length;
+ desc->raid_member = false;
+ desc->time_out = LEAPRAID_LOGSENSE_TIMEOUT;
+
+ desc->data_buffer = kzalloc(desc->data_length, GFP_KERNEL);
+ if (!desc->data_buffer) {
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ rc = leapraid_dispatch_scsi_io(adapter, desc);
+ if (!rc) {
+ if (((char *)desc->data_buffer)[8] ==
+ LEAPRAID_LOGSENSE_SMART_CODE)
+ leapraid_smart_fault_detect(adapter, hdl);
+ }
+
+ kfree(desc->data_buffer);
+ kfree(desc);
+
+ return rc;
+}
+
+static bool leapraid_smart_poll_check(struct leapraid_adapter *adapter,
+ struct leapraid_sdev_priv *sdev_priv,
+ u32 reset_flg)
+{
+ struct leapraid_sas_dev *sas_dev = NULL;
+
+ if (!sdev_priv || !sdev_priv->starget_priv->card_port)
+ goto out;
+
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter,
+ sdev_priv->starget_priv->sas_address,
+ sdev_priv->starget_priv->card_port);
+ if (!sas_dev || !sas_dev->support_smart)
+ goto out;
+
+ if (reset_flg)
+ sas_dev->led_on = false;
+ else if (sas_dev->led_on)
+ goto out;
+
+ if ((sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER) ||
+ (sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME) ||
+ sdev_priv->block)
+ goto out;
+
+ leapraid_sdev_put(sas_dev);
+ return true;
+
+out:
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ return false;
+}
+
+static void leapraid_sata_smart_poll_work(struct work_struct *work)
+{
+ struct leapraid_adapter *adapter =
+ container_of(work, struct leapraid_adapter,
+ smart_poll_desc.smart_poll_work.work);
+ struct scsi_device *sdev;
+ struct leapraid_sdev_priv *sdev_priv;
+ static u32 reset_cnt;
+ bool reset_flg = false;
+
+ if (leapraid_check_adapter_is_op(adapter))
+ goto out;
+
+ reset_flg = (reset_cnt < adapter->reset_desc.reset_cnt);
+ reset_cnt = adapter->reset_desc.reset_cnt;
+
+ __shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (leapraid_smart_poll_check(adapter, sdev_priv, reset_flg))
+ leapraid_dispatch_logsense(adapter,
+ sdev_priv->starget_priv->hdl,
+ sdev_priv->lun);
+ }
+
+out:
+ if (adapter->smart_poll_desc.smart_poll_wq)
+ queue_delayed_work(adapter->smart_poll_desc.smart_poll_wq,
+ &adapter->smart_poll_desc.smart_poll_work,
+ msecs_to_jiffies(LEAPRAID_SMART_POLLING_INTERVAL));
+}
+
+void leapraid_smart_polling_start(struct leapraid_adapter *adapter)
+{
+ if (adapter->smart_poll_desc.smart_poll_wq || !smart_poll)
+ return;
+
+ INIT_DELAYED_WORK(&adapter->smart_poll_desc.smart_poll_work,
+ leapraid_sata_smart_poll_work);
+
+ snprintf(adapter->smart_poll_desc.smart_poll_wq_name,
+ sizeof(adapter->smart_poll_desc.smart_poll_wq_name),
+ "poll_%s%u_smart_poll",
+ LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id);
+ adapter->smart_poll_desc.smart_poll_wq =
+ create_singlethread_workqueue(
+ adapter->smart_poll_desc.smart_poll_wq_name);
+ if (!adapter->smart_poll_desc.smart_poll_wq)
+ return;
+ queue_delayed_work(adapter->smart_poll_desc.smart_poll_wq,
+ &adapter->smart_poll_desc.smart_poll_work,
+ msecs_to_jiffies(LEAPRAID_SMART_POLLING_INTERVAL));
+}
+
+void leapraid_smart_polling_stop(struct leapraid_adapter *adapter)
+{
+ struct workqueue_struct *wq;
+
+ if (!adapter->smart_poll_desc.smart_poll_wq)
+ return;
+
+ wq = adapter->smart_poll_desc.smart_poll_wq;
+ adapter->smart_poll_desc.smart_poll_wq = NULL;
+
+ if (wq) {
+ if (!cancel_delayed_work_sync(&adapter->smart_poll_desc.smart_poll_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+static void leapraid_fw_work(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt);
+
+static void leapraid_fw_evt_free(struct kref *r)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+
+ fw_evt = container_of(r, struct leapraid_fw_evt_work, refcnt);
+
+ kfree(fw_evt->evt_data);
+ kfree(fw_evt);
+}
+
+static void leapraid_fw_evt_get(struct leapraid_fw_evt_work *fw_evt)
+{
+ kref_get(&fw_evt->refcnt);
+}
+
+static void leapraid_fw_evt_put(struct leapraid_fw_evt_work *fw_work)
+{
+ kref_put(&fw_work->refcnt, leapraid_fw_evt_free);
+}
+
+static struct leapraid_fw_evt_work *leapraid_alloc_fw_evt_work(void)
+{
+ struct leapraid_fw_evt_work *fw_evt =
+ kzalloc(sizeof(*fw_evt), GFP_ATOMIC);
+ if (!fw_evt)
+ return NULL;
+
+ kref_init(&fw_evt->refcnt);
+ return fw_evt;
+}
+
+static void leapraid_run_fw_evt_work(struct work_struct *work)
+{
+ struct leapraid_fw_evt_work *fw_evt =
+ container_of(work, struct leapraid_fw_evt_work, work);
+
+ leapraid_fw_work(fw_evt->adapter, fw_evt);
+}
+
+static void leapraid_fw_evt_add(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ unsigned long flags;
+
+ if (!adapter->fw_evt_s.fw_evt_thread)
+ return;
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ leapraid_fw_evt_get(fw_evt);
+ INIT_LIST_HEAD(&fw_evt->list);
+ list_add_tail(&fw_evt->list, &adapter->fw_evt_s.fw_evt_list);
+ INIT_WORK(&fw_evt->work, leapraid_run_fw_evt_work);
+ leapraid_fw_evt_get(fw_evt);
+ queue_work(adapter->fw_evt_s.fw_evt_thread, &fw_evt->work);
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+}
+
+static void leapraid_del_fw_evt_from_list(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (!list_empty(&fw_evt->list)) {
+ list_del_init(&fw_evt->list);
+ leapraid_fw_evt_put(fw_evt);
+ }
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+}
+
+static struct leapraid_fw_evt_work *leapraid_next_fw_evt(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_evt = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (!list_empty(&adapter->fw_evt_s.fw_evt_list)) {
+ fw_evt = list_first_entry(&adapter->fw_evt_s.fw_evt_list,
+ struct leapraid_fw_evt_work, list);
+ list_del_init(&fw_evt->list);
+ leapraid_fw_evt_put(fw_evt);
+ }
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ return fw_evt;
+}
+
+void leapraid_clean_active_fw_evt(struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+ bool rc = false;
+
+ if ((list_empty(&adapter->fw_evt_s.fw_evt_list) &&
+ !adapter->fw_evt_s.cur_evt) || !adapter->fw_evt_s.fw_evt_thread)
+ return;
+
+ adapter->fw_evt_s.fw_evt_cleanup = 1;
+ if (adapter->access_ctrl.shost_recovering &&
+ adapter->fw_evt_s.cur_evt)
+ adapter->fw_evt_s.cur_evt->ignore = 1;
+
+ while ((fw_evt = leapraid_next_fw_evt(adapter)) ||
+ (fw_evt = adapter->fw_evt_s.cur_evt)) {
+ if (fw_evt == adapter->fw_evt_s.cur_evt &&
+ adapter->fw_evt_s.cur_evt->evt_type !=
+ LEAPRAID_EVT_REMOVE_DEAD_DEV) {
+ adapter->fw_evt_s.cur_evt = NULL;
+ continue;
+ }
+
+ rc = cancel_work_sync(&fw_evt->work);
+
+ if (rc)
+ leapraid_fw_evt_put(fw_evt);
+ }
+ adapter->fw_evt_s.fw_evt_cleanup = 0;
+}
+
+static void leapraid_internal_dev_ublk(struct scsi_device *sdev,
+ struct leapraid_sdev_priv *sdev_priv)
+{
+ int rc = 0;
+
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: now internal unblkg dev\n",
+ sdev_priv->starget_priv->hdl);
+ sdev_priv->block = false;
+ rc = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ if (rc == -EINVAL) {
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: unblkg failed, rc=%d\n",
+ sdev_priv->starget_priv->hdl, rc);
+ sdev_priv->block = true;
+ rc = scsi_internal_device_block_nowait(sdev);
+ if (rc)
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: blkg failed: earlier unblkg err, rc=%d\n",
+ sdev_priv->starget_priv->hdl, rc);
+
+ sdev_priv->block = false;
+ rc = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ if (rc)
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: ublkg failed again, rc=%d\n",
+ sdev_priv->starget_priv->hdl, rc);
+ }
+}
+
+static void leapraid_internal_ublk_io_dev_to_running(struct scsi_device *sdev)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+
+ sdev_priv = sdev->hostdata;
+ sdev_priv->block = false;
+ scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
+ sdev_printk(KERN_WARNING, sdev, "%s: ublk hdl 0x%04x\n",
+ __func__, sdev_priv->starget_priv->hdl);
+}
+
+static void leapraid_ublk_io_dev_to_running(
+ struct leapraid_adapter *adapter, u64 sas_addr,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->sas_address != sas_addr ||
+ sdev_priv->starget_priv->card_port != card_port)
+ continue;
+
+ if (sdev_priv->block)
+ leapraid_internal_ublk_io_dev_to_running(sdev);
+ }
+}
+
+static void leapraid_ublk_io_dev(struct leapraid_adapter *adapter,
+ u64 sas_addr,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv || !sdev_priv->starget_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->sas_address != sas_addr)
+ continue;
+
+ if (sdev_priv->starget_priv->card_port != card_port)
+ continue;
+
+ if (sdev_priv->block)
+ leapraid_internal_dev_ublk(sdev, sdev_priv);
+
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ }
+}
+
+static void leapraid_ublk_io_all_dev(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_starget_priv *stgt_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+
+ if (!sdev_priv)
+ continue;
+
+ stgt_priv = sdev_priv->starget_priv;
+ if (!stgt_priv || stgt_priv->deleted)
+ continue;
+
+ if (!sdev_priv->block)
+ continue;
+
+ sdev_printk(KERN_WARNING, sdev, "hdl 0x%04x: blkg...\n",
+ sdev_priv->starget_priv->hdl);
+ leapraid_internal_dev_ublk(sdev, sdev_priv);
+ continue;
+ }
+}
+
+static void __maybe_unused leapraid_internal_dev_blk(
+ struct scsi_device *sdev,
+ struct leapraid_sdev_priv *sdev_priv)
+{
+ int rc = 0;
+
+ sdev_printk(KERN_INFO, sdev, "internal blkg hdl 0x%04x\n",
+ sdev_priv->starget_priv->hdl);
+ sdev_priv->block = true;
+ rc = scsi_internal_device_block_nowait(sdev);
+ if (rc == -EINVAL)
+ sdev_printk(KERN_WARNING, sdev,
+ "hdl 0x%04x: blkg failed, rc=%d\n",
+ rc, sdev_priv->starget_priv->hdl);
+}
+
+static void __maybe_unused leapraid_blkio_dev(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_device *sdev;
+
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter, hdl);
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl != hdl)
+ continue;
+
+ if (sdev_priv->block)
+ continue;
+
+ if (sas_dev && sas_dev->pend_sas_rphy_add)
+ continue;
+
+ if (sdev_priv->sep) {
+ sdev_printk(KERN_INFO, sdev,
+ "sep hdl 0x%04x skip blkg\n",
+ sdev_priv->starget_priv->hdl);
+ continue;
+ }
+
+ leapraid_internal_dev_blk(sdev, sdev_priv);
+ }
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_imm_blkio_to_end_dev(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_device *sdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+
+ if (sas_dev) {
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl != sas_dev->hdl)
+ continue;
+
+ if (sdev_priv->block)
+ continue;
+
+ if (sas_dev && sas_dev->pend_sas_rphy_add)
+ continue;
+
+ if (sdev_priv->sep) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s skip dev blk for sep hdl 0x%04x\n",
+ __func__,
+ sdev_priv->starget_priv->hdl);
+ continue;
+ }
+
+ leapraid_internal_dev_blk(sdev, sdev_priv);
+ }
+
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_imm_blkio_set_end_dev_blk_hdls(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ struct leapraid_sas_port *sas_port;
+
+ list_for_each_entry(sas_port,
+ &topo_node_exp->sas_port_list, port_list) {
+ if (sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE) {
+ leapraid_imm_blkio_to_end_dev(adapter, sas_port);
+ }
+ }
+}
+
+static void leapraid_imm_blkio_to_kids_attchd_to_ex(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp);
+
+static void leapraid_imm_blkio_to_sib_exp(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ struct leapraid_topo_node *topo_node_exp_sib;
+ struct leapraid_sas_port *sas_port;
+
+ list_for_each_entry(sas_port,
+ &topo_node_exp->sas_port_list, port_list) {
+ if (sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE) {
+ topo_node_exp_sib =
+ leapraid_exp_find_by_sas_address(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ leapraid_imm_blkio_to_kids_attchd_to_ex(
+ adapter,
+ topo_node_exp_sib);
+ }
+ }
+}
+
+static void leapraid_imm_blkio_to_kids_attchd_to_ex(
+ struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ if (!topo_node_exp)
+ return;
+
+ leapraid_imm_blkio_set_end_dev_blk_hdls(adapter, topo_node_exp);
+
+ leapraid_imm_blkio_to_sib_exp(adapter, topo_node_exp);
+}
+
+static void leapraid_report_sdev_directly(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ struct leapraid_sas_port *sas_port;
+
+ sas_port = leapraid_transport_port_add(adapter,
+ sas_dev->hdl,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ if (!sas_port) {
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ return;
+ }
+
+ if (!sas_dev->starget) {
+ if (!adapter->scan_dev_desc.driver_loading) {
+ leapraid_transport_port_remove(adapter,
+ sas_dev->sas_addr,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ }
+ return;
+ }
+
+ clear_bit(sas_dev->hdl,
+ (unsigned long *)adapter->dev_topo.pending_dev_add);
+}
+
+static struct leapraid_sas_dev *leapraid_init_sas_dev(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev_p0 *sas_dev_pg0,
+ struct leapraid_card_port *card_port, u16 hdl,
+ u64 parent_sas_addr, u64 sas_addr, u32 dev_info)
+{
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_enc_node *enc_dev;
+
+ sas_dev = kzalloc(sizeof(*sas_dev), GFP_KERNEL);
+ if (!sas_dev)
+ return NULL;
+
+ kref_init(&sas_dev->refcnt);
+ sas_dev->hdl = hdl;
+ sas_dev->dev_info = dev_info;
+ sas_dev->sas_addr = sas_addr;
+ sas_dev->card_port = card_port;
+ sas_dev->parent_sas_addr = parent_sas_addr;
+ sas_dev->phy = sas_dev_pg0->phy_num;
+ sas_dev->enc_hdl = le16_to_cpu(sas_dev_pg0->enc_hdl);
+ sas_dev->dev_name = le64_to_cpu(sas_dev_pg0->dev_name);
+ sas_dev->port_type = sas_dev_pg0->max_port_connections;
+ sas_dev->slot = sas_dev->enc_hdl ? le16_to_cpu(sas_dev_pg0->slot) : 0;
+ sas_dev->support_smart = (le16_to_cpu(sas_dev_pg0->flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_SATA_SMART);
+ if (le16_to_cpu(sas_dev_pg0->flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID) {
+ sas_dev->enc_level = sas_dev_pg0->enc_level;
+ memcpy(sas_dev->connector_name, sas_dev_pg0->connector_name, 4);
+ sas_dev->connector_name[4] = '\0';
+ } else {
+ sas_dev->enc_level = 0;
+ sas_dev->connector_name[0] = '\0';
+ }
+ if (le16_to_cpu(sas_dev_pg0->enc_hdl)) {
+ enc_dev = leapraid_enc_find_by_hdl(adapter,
+ le16_to_cpu(sas_dev_pg0->enc_hdl));
+ sas_dev->enc_lid = enc_dev ?
+ le64_to_cpu(enc_dev->pg0.enc_lid) : 0;
+ }
+ dev_info(&adapter->pdev->dev,
+ "add dev: hdl=0x%0x, sas addr=0x%016llx, port_type=0x%0x\n",
+ hdl, sas_dev->sas_addr, sas_dev->port_type);
+
+ return sas_dev;
+}
+
+static void leapraid_add_dev(struct leapraid_adapter *adapter, u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_pg0;
+ struct leapraid_card_port *card_port;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u64 parent_sas_addr;
+ u32 dev_info;
+ u64 sas_addr;
+ u8 port_id;
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_pg0,
+ cfgp1, cfgp2, GET_SAS_DEVICE_PG0)))
+ return;
+
+ dev_info = le32_to_cpu(sas_dev_pg0.dev_info);
+ if (!(leapraid_is_end_dev(dev_info)))
+ return;
+
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add);
+ sas_addr = le64_to_cpu(sas_dev_pg0.sas_address);
+ if (!(le16_to_cpu(sas_dev_pg0.flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_DEV_PRESENT))
+ return;
+
+ port_id = sas_dev_pg0.physical_port;
+ card_port = leapraid_get_port_by_id(adapter, port_id, false);
+ if (!card_port)
+ return;
+
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter, sas_addr, card_port);
+ if (sas_dev) {
+ clear_bit(hdl,
+ (unsigned long *)adapter->dev_topo.pending_dev_add);
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+
+ if (leapraid_get_sas_address(adapter,
+ le16_to_cpu(sas_dev_pg0.parent_dev_hdl),
+ &parent_sas_addr))
+ return;
+
+ sas_dev = leapraid_init_sas_dev(adapter, &sas_dev_pg0, card_port,
+ hdl, parent_sas_addr, sas_addr,
+ dev_info);
+ if (!sas_dev)
+ return;
+ if (adapter->scan_dev_desc.wait_scan_dev_done) {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_sdev_get(sas_dev);
+ list_add_tail(&sas_dev->list,
+ &adapter->dev_topo.sas_dev_init_list);
+ leapraid_check_boot_dev(adapter, sas_dev, 0);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ } else {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_sdev_get(sas_dev);
+ list_add_tail(&sas_dev->list, &adapter->dev_topo.sas_dev_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_report_sdev_directly(adapter, sas_dev);
+ }
+}
+
+static void leapraid_remove_device(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ if (sas_dev->led_on) {
+ leapraid_set_led(adapter, sas_dev, false);
+ sas_dev->led_on = false;
+ }
+
+ if (sas_dev->starget && sas_dev->starget->hostdata) {
+ starget_priv = sas_dev->starget->hostdata;
+ starget_priv->deleted = true;
+ leapraid_ublk_io_dev(adapter,
+ sas_dev->sas_addr, sas_dev->card_port);
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ }
+
+ leapraid_transport_port_remove(adapter,
+ sas_dev->sas_addr,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+
+ dev_info(&adapter->pdev->dev,
+ "remove dev: hdl=0x%04x, sas addr=0x%016llx\n",
+ sas_dev->hdl, (unsigned long long)sas_dev->sas_addr);
+}
+
+static struct leapraid_vphy *leapraid_alloc_vphy(struct leapraid_adapter *adapter,
+ u8 port_id, u8 phy_num)
+{
+ struct leapraid_card_port *port;
+ struct leapraid_vphy *vphy;
+
+ port = leapraid_get_port_by_id(adapter, port_id, false);
+ if (!port)
+ return NULL;
+
+ vphy = leapraid_get_vphy_by_phy(port, phy_num);
+ if (vphy)
+ return vphy;
+
+ vphy = kzalloc(sizeof(*vphy), GFP_KERNEL);
+ if (!vphy)
+ return NULL;
+
+ if (!port->vphys_mask)
+ INIT_LIST_HEAD(&port->vphys_list);
+
+ port->vphys_mask |= BIT(phy_num);
+ vphy->phy_mask |= BIT(phy_num);
+ list_add_tail(&vphy->list, &port->vphys_list);
+ return vphy;
+}
+
+static int leapraid_add_port_to_card_port_list(struct leapraid_adapter *adapter,
+ u8 port_id, bool refresh)
+{
+ struct leapraid_card_port *card_port;
+
+ card_port = leapraid_get_port_by_id(adapter, port_id, false);
+ if (card_port)
+ return 0;
+
+ card_port = kzalloc(sizeof(*card_port), GFP_KERNEL);
+ if (!card_port)
+ return -ENOMEM;
+
+ card_port->port_id = port_id;
+ dev_info(&adapter->pdev->dev,
+ "port: %d is added to card_port list\n",
+ card_port->port_id);
+
+ if (refresh)
+ if (adapter->access_ctrl.shost_recovering)
+ card_port->flg = LEAPRAID_CARD_PORT_FLG_NEW;
+ list_add_tail(&card_port->list, &adapter->dev_topo.card_port_list);
+ return 0;
+}
+
+static void leapraid_sas_host_add(struct leapraid_adapter *adapter,
+ bool refresh)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_phy_p0 phy_pg0;
+ struct leapraid_sas_dev_p0 sas_dev_pg0;
+ struct leapraid_enc_p0 enc_pg0;
+ struct leapraid_sas_io_unit_p0 *sas_iou_pg0;
+ u16 sas_iou_pg0_sz;
+ u16 attached_hdl;
+ u8 phys_num;
+ u8 port_id;
+ u8 link_rate;
+ int i;
+
+ if (!refresh) {
+ if (leapraid_get_adapter_phys(adapter, &phys_num) || !phys_num)
+ return;
+
+ adapter->dev_topo.card.card_phy =
+ kcalloc(phys_num,
+ sizeof(struct leapraid_card_phy), GFP_KERNEL);
+ if (!adapter->dev_topo.card.card_phy)
+ return;
+
+ adapter->dev_topo.card.phys_num = phys_num;
+ }
+
+ sas_iou_pg0_sz = offsetof(struct leapraid_sas_io_unit_p0, phy_info) +
+ (adapter->dev_topo.card.phys_num *
+ sizeof(struct leapraid_sas_io_unit0_phy_info));
+ sas_iou_pg0 = kzalloc(sas_iou_pg0_sz, GFP_KERNEL);
+ if (!sas_iou_pg0)
+ goto out;
+
+ if (leapraid_get_sas_io_unit_page0(adapter,
+ sas_iou_pg0,
+ sas_iou_pg0_sz))
+ goto out;
+
+ adapter->dev_topo.card.parent_dev = &adapter->shost->shost_gendev;
+ adapter->dev_topo.card.hdl =
+ le16_to_cpu(sas_iou_pg0->phy_info[0].controller_dev_hdl);
+ for (i = 0; i < adapter->dev_topo.card.phys_num; i++) {
+ if (!refresh) { /* add */
+ cfgp1.phy_number = i;
+ if (leapraid_op_config_page(adapter, &phy_pg0, cfgp1,
+ cfgp2, GET_PHY_PG0))
+ goto out;
+
+ port_id = sas_iou_pg0->phy_info[i].port;
+ if (leapraid_add_port_to_card_port_list(adapter,
+ port_id,
+ false))
+ goto out;
+
+ if ((le32_to_cpu(phy_pg0.phy_info) &
+ LEAPRAID_SAS_PHYINFO_VPHY) &&
+ (phy_pg0.neg_link_rate >> 4) >=
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5) {
+ if (!leapraid_alloc_vphy(adapter, port_id, i))
+ goto out;
+ adapter->dev_topo.card.card_phy[i].vphy = true;
+ }
+
+ adapter->dev_topo.card.card_phy[i].hdl =
+ adapter->dev_topo.card.hdl;
+ adapter->dev_topo.card.card_phy[i].phy_id = i;
+ adapter->dev_topo.card.card_phy[i].card_port =
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false);
+ leapraid_transport_add_card_phy(
+ adapter,
+ &adapter->dev_topo.card.card_phy[i],
+ &phy_pg0, adapter->dev_topo.card.parent_dev);
+ } else { /* refresh */
+ link_rate = sas_iou_pg0->phy_info[i].neg_link_rate >> 4;
+ port_id = sas_iou_pg0->phy_info[i].port;
+ if (leapraid_add_port_to_card_port_list(adapter,
+ port_id,
+ true))
+ goto out;
+
+ if (le32_to_cpu(sas_iou_pg0->phy_info[i]
+ .controller_phy_dev_info) &
+ LEAPRAID_DEVTYP_SEP &&
+ link_rate >= LEAPRAID_SAS_NEG_LINK_RATE_1_5) {
+ cfgp1.phy_number = i;
+ if ((leapraid_op_config_page(adapter, &phy_pg0,
+ cfgp1, cfgp2,
+ GET_PHY_PG0)))
+ continue;
+
+ if ((le32_to_cpu(phy_pg0.phy_info) &
+ LEAPRAID_SAS_PHYINFO_VPHY)) {
+ if (!leapraid_alloc_vphy(adapter,
+ port_id,
+ i))
+ goto out;
+ adapter->dev_topo.card.card_phy[i].vphy = true;
+ }
+ }
+
+ adapter->dev_topo.card.card_phy[i].hdl =
+ adapter->dev_topo.card.hdl;
+ attached_hdl =
+ le16_to_cpu(sas_iou_pg0->phy_info[i].attached_dev_hdl);
+ if (attached_hdl && link_rate < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ link_rate = LEAPRAID_SAS_NEG_LINK_RATE_1_5;
+
+ adapter->dev_topo.card.card_phy[i].card_port =
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false);
+ if (!adapter->dev_topo.card.card_phy[i].phy) {
+ cfgp1.phy_number = i;
+ if ((leapraid_op_config_page(adapter, &phy_pg0,
+ cfgp1, cfgp2,
+ GET_PHY_PG0)))
+ continue;
+
+ adapter->dev_topo.card.card_phy[i].phy_id = i;
+ leapraid_transport_add_card_phy(adapter,
+ &adapter->dev_topo.card.card_phy[i],
+ &phy_pg0,
+ adapter->dev_topo.card.parent_dev);
+ continue;
+ }
+
+ leapraid_transport_update_links(adapter,
+ adapter->dev_topo.card.sas_address,
+ attached_hdl, i, link_rate,
+ adapter->dev_topo.card.card_phy[i].card_port);
+ }
+ }
+
+ if (!refresh) {
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = adapter->dev_topo.card.hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_pg0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0)))
+ goto out;
+
+ adapter->dev_topo.card.enc_hdl =
+ le16_to_cpu(sas_dev_pg0.enc_hdl);
+ adapter->dev_topo.card.sas_address =
+ le64_to_cpu(sas_dev_pg0.sas_address);
+ dev_info(&adapter->pdev->dev,
+ "add host: devhdl=0x%04x, sas addr=0x%016llx, phynums=%d\n",
+ adapter->dev_topo.card.hdl,
+ (unsigned long long)adapter->dev_topo.card.sas_address,
+ adapter->dev_topo.card.phys_num);
+
+ if (adapter->dev_topo.card.enc_hdl) {
+ cfgp1.form = LEAPRAID_SAS_ENC_CFG_PGAD_HDL;
+ cfgp2.handle = adapter->dev_topo.card.enc_hdl;
+ if (!(leapraid_op_config_page(adapter, &enc_pg0,
+ cfgp1, cfgp2,
+ GET_SAS_ENCLOSURE_PG0)))
+ adapter->dev_topo.card.enc_lid =
+ le64_to_cpu(enc_pg0.enc_lid);
+ }
+ }
+out:
+ kfree(sas_iou_pg0);
+}
+
+static int leapraid_internal_exp_add(struct leapraid_adapter *adapter,
+ struct leapraid_exp_p0 *exp_pg0,
+ union cfg_param_1 *cfgp1,
+ union cfg_param_2 *cfgp2,
+ u16 hdl)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_sas_port *sas_port = NULL;
+ struct leapraid_enc_node *enc_dev;
+ struct leapraid_exp_p1 exp_pg1;
+ int rc = 0;
+ unsigned long flags;
+ u8 port_id;
+ u16 parent_handle;
+ u64 sas_addr_parent = 0;
+ int i;
+
+ port_id = exp_pg0->physical_port;
+ parent_handle = le16_to_cpu(exp_pg0->parent_dev_hdl);
+
+ if (leapraid_get_sas_address(adapter, parent_handle, &sas_addr_parent))
+ return -1;
+
+ topo_node_exp = kzalloc(sizeof(*topo_node_exp), GFP_KERNEL);
+ if (!topo_node_exp)
+ return -1;
+
+ topo_node_exp->hdl = hdl;
+ topo_node_exp->phys_num = exp_pg0->phy_num;
+ topo_node_exp->sas_address_parent = sas_addr_parent;
+ topo_node_exp->sas_address = le64_to_cpu(exp_pg0->sas_address);
+ topo_node_exp->card_port =
+ leapraid_get_port_by_id(adapter, port_id, false);
+ if (!topo_node_exp->card_port) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "add exp: sas addr=0x%016llx, hdl=0x%04x, phdl=0x%04x, phys=%d\n",
+ (unsigned long long)topo_node_exp->sas_address,
+ hdl, parent_handle,
+ topo_node_exp->phys_num);
+ if (!topo_node_exp->phys_num) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ topo_node_exp->card_phy =
+ kcalloc(topo_node_exp->phys_num,
+ sizeof(struct leapraid_card_phy), GFP_KERNEL);
+ if (!topo_node_exp->card_phy) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&topo_node_exp->sas_port_list);
+ sas_port = leapraid_transport_port_add(adapter, hdl, sas_addr_parent,
+ topo_node_exp->card_port);
+ if (!sas_port) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ topo_node_exp->parent_dev = &sas_port->rphy->dev;
+ topo_node_exp->rphy = sas_port->rphy;
+ for (i = 0; i < topo_node_exp->phys_num; i++) {
+ cfgp1->phy_number = i;
+ cfgp2->handle = hdl;
+ if ((leapraid_op_config_page(adapter, &exp_pg1, *cfgp1, *cfgp2,
+ GET_SAS_EXPANDER_PG1))) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ topo_node_exp->card_phy[i].hdl = hdl;
+ topo_node_exp->card_phy[i].phy_id = i;
+ topo_node_exp->card_phy[i].card_port =
+ leapraid_get_port_by_id(adapter, port_id, false);
+ if ((leapraid_transport_add_exp_phy(adapter,
+ &topo_node_exp->card_phy[i],
+ &exp_pg1,
+ topo_node_exp->parent_dev))) {
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (topo_node_exp->enc_hdl) {
+ enc_dev = leapraid_enc_find_by_hdl(adapter,
+ topo_node_exp->enc_hdl);
+ if (enc_dev)
+ topo_node_exp->enc_lid =
+ le64_to_cpu(enc_dev->pg0.enc_lid);
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_add_tail(&topo_node_exp->list, &adapter->dev_topo.exp_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+ return 0;
+
+out_fail:
+ if (sas_port)
+ leapraid_transport_port_remove(adapter,
+ topo_node_exp->sas_address,
+ sas_addr_parent,
+ topo_node_exp->card_port);
+ kfree(topo_node_exp);
+ return rc;
+}
+
+static int leapraid_exp_add(struct leapraid_adapter *adapter, u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_exp_p0 exp_pg0;
+ u16 parent_handle;
+ u64 sas_addr, sas_addr_parent = 0;
+ unsigned long flags;
+ u8 port_id;
+ int rc = 0;
+
+ if (!hdl)
+ return -EPERM;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering)
+ return -EPERM;
+
+ cfgp1.form = LEAPRAID_SAS_EXP_CFD_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &exp_pg0, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG0)))
+ return -EPERM;
+
+ parent_handle = le16_to_cpu(exp_pg0.parent_dev_hdl);
+ if (leapraid_get_sas_address(adapter, parent_handle, &sas_addr_parent))
+ return -EPERM;
+
+ port_id = exp_pg0.physical_port;
+ if (sas_addr_parent != adapter->dev_topo.card.sas_address) {
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp =
+ leapraid_exp_find_by_sas_address(adapter,
+ sas_addr_parent,
+ leapraid_get_port_by_id(adapter, port_id, false));
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+ if (!topo_node_exp) {
+ rc = leapraid_exp_add(adapter, parent_handle);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ sas_addr = le64_to_cpu(exp_pg0.sas_address);
+ topo_node_exp =
+ leapraid_exp_find_by_sas_address(adapter, sas_addr,
+ leapraid_get_port_by_id(adapter, port_id, false));
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (topo_node_exp)
+ return 0;
+
+ return leapraid_internal_exp_add(adapter, &exp_pg0, &cfgp1,
+ &cfgp2, hdl);
+}
+
+static void leapraid_exp_node_rm(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp)
+{
+ struct leapraid_sas_port *sas_port, *sas_port_next;
+ unsigned long flags;
+ int port_id;
+
+ list_for_each_entry_safe(sas_port, sas_port_next,
+ &topo_node_exp->sas_port_list,
+ port_list) {
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ switch (sas_port->remote_identify.device_type) {
+ case SAS_END_DEVICE:
+ leapraid_sas_dev_remove_by_sas_address(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ leapraid_exp_rm(
+ adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ default:
+ break;
+ }
+ }
+
+ port_id = topo_node_exp->card_port->port_id;
+ leapraid_transport_port_remove(adapter, topo_node_exp->sas_address,
+ topo_node_exp->sas_address_parent,
+ topo_node_exp->card_port);
+ dev_info(&adapter->pdev->dev,
+ "removing exp: port=%d, sas addr=0x%016llx, hdl=0x%04x\n",
+ port_id, (unsigned long long)topo_node_exp->sas_address,
+ topo_node_exp->hdl);
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_del(&topo_node_exp->list);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+ kfree(topo_node_exp->card_phy);
+ kfree(topo_node_exp);
+}
+
+void leapraid_exp_rm(struct leapraid_adapter *adapter, u64 sas_addr,
+ struct leapraid_card_port *port)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ unsigned long flags;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp = leapraid_exp_find_by_sas_address(adapter,
+ sas_addr,
+ port);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (topo_node_exp)
+ leapraid_exp_node_rm(adapter, topo_node_exp);
+}
+
+static void leapraid_check_device(struct leapraid_adapter *adapter,
+ u64 parent_sas_address, u16 handle,
+ u8 phy_number, u8 link_rate)
+{
+ struct leapraid_sas_dev_p0 sas_device_pg0;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ struct leapraid_enc_node *enclosure_dev = NULL;
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct leapraid_starget_priv *sas_target_priv_data;
+ u32 device_info;
+ struct leapraid_card_port *port;
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = handle;
+ if ((leapraid_op_config_page(adapter, &sas_device_pg0, cfgp1, cfgp2,
+ GET_SAS_DEVICE_PG0)))
+ return;
+
+ if (phy_number != sas_device_pg0.phy_num)
+ return;
+
+ device_info = le32_to_cpu(sas_device_pg0.dev_info);
+ if (!(leapraid_is_end_dev(device_info)))
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.sas_address);
+ port = leapraid_get_port_by_id(adapter, sas_device_pg0.physical_port,
+ false);
+ if (!port)
+ goto out_unlock;
+
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter, sas_address,
+ port);
+ if (!sas_dev)
+ goto out_unlock;
+
+ if (unlikely(sas_dev->hdl != handle)) {
+ starget = sas_dev->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget,
+ "hdl changed from 0x%04x to 0x%04x!\n",
+ sas_dev->hdl, handle);
+ sas_target_priv_data->hdl = handle;
+ sas_dev->hdl = handle;
+ if (le16_to_cpu(sas_device_pg0.flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID) {
+ sas_dev->enc_level =
+ sas_device_pg0.enc_level;
+ memcpy(sas_dev->connector_name,
+ sas_device_pg0.connector_name, 4);
+ sas_dev->connector_name[4] = '\0';
+ } else {
+ sas_dev->enc_level = 0;
+ sas_dev->connector_name[0] = '\0';
+ }
+ sas_dev->enc_hdl =
+ le16_to_cpu(sas_device_pg0.enc_hdl);
+ enclosure_dev =
+ leapraid_enc_find_by_hdl(adapter, sas_dev->enc_hdl);
+ if (enclosure_dev) {
+ sas_dev->enc_lid =
+ le64_to_cpu(enclosure_dev->pg0.enc_lid);
+ }
+ }
+
+ if (!(le16_to_cpu(sas_device_pg0.flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_DEV_PRESENT))
+ goto out_unlock;
+
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ leapraid_ublk_io_dev_to_running(adapter, sas_address, port);
+ goto out;
+
+out_unlock:
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+out:
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static int leapraid_internal_sas_topo_chg_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_topo_node *topo_node_exp,
+ struct leapraid_fw_evt_work *fw_evt,
+ u64 sas_addr, u8 max_phys)
+{
+ struct leapraid_evt_data_sas_topo_change_list *evt_data;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u8 phy_number;
+ u8 link_rate, prev_link_rate;
+ u16 reason_code;
+ u16 hdl;
+ int i;
+
+ evt_data = fw_evt->evt_data;
+ for (i = 0; i < evt_data->entry_num; i++) {
+ if (fw_evt->ignore)
+ return 0;
+
+ if (adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return 0;
+
+ phy_number = evt_data->start_phy_num + i;
+ if (phy_number >= max_phys)
+ continue;
+
+ reason_code = evt_data->phy[i].phy_status &
+ LEAPRAID_EVT_SAS_TOPO_RC_MASK;
+
+ hdl = le16_to_cpu(evt_data->phy[i].attached_dev_hdl);
+ if (!hdl)
+ continue;
+
+ link_rate = evt_data->phy[i].link_rate >> 4;
+ prev_link_rate = evt_data->phy[i].link_rate & 0xF;
+ switch (reason_code) {
+ case LEAPRAID_EVT_SAS_TOPO_RC_PHY_CHANGED:
+ if (adapter->access_ctrl.shost_recovering)
+ break;
+
+ if (link_rate == prev_link_rate)
+ break;
+
+ leapraid_transport_update_links(adapter, sas_addr,
+ hdl, phy_number,
+ link_rate, card_port);
+ if (link_rate < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ leapraid_check_device(adapter, sas_addr, hdl,
+ phy_number, link_rate);
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock,
+ flags);
+ sas_dev =
+ leapraid_hold_lock_get_sas_dev_by_hdl(
+ adapter, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock,
+ flags);
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ break;
+ }
+ if (!test_bit(hdl, (unsigned long *)adapter->dev_topo.pending_dev_add))
+ break;
+
+ evt_data->phy[i].phy_status &=
+ LEAPRAID_EVT_SAS_TOPO_RC_CLEAR_MASK;
+ evt_data->phy[i].phy_status |=
+ LEAPRAID_EVT_SAS_TOPO_RC_TARG_ADDED;
+ fallthrough;
+
+ case LEAPRAID_EVT_SAS_TOPO_RC_TARG_ADDED:
+ if (adapter->access_ctrl.shost_recovering)
+ break;
+ leapraid_transport_update_links(adapter, sas_addr,
+ hdl, phy_number,
+ link_rate, card_port);
+ if (link_rate < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ break;
+ leapraid_add_dev(adapter, hdl);
+ break;
+ case LEAPRAID_EVT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ leapraid_sas_dev_remove_by_hdl(adapter, hdl);
+ break;
+ }
+ }
+
+ if (evt_data->exp_status == LEAPRAID_EVT_SAS_TOPO_ES_NOT_RESPONDING &&
+ topo_node_exp)
+ leapraid_exp_rm(adapter, sas_addr, card_port);
+
+ return 0;
+}
+
+static int leapraid_sas_topo_chg_evt(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_card_port *card_port;
+ struct leapraid_evt_data_sas_topo_change_list *evt_data;
+ u16 phdl;
+ u8 max_phys;
+ u64 sas_addr;
+ unsigned long flags;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return 0;
+
+ evt_data = fw_evt->evt_data;
+ leapraid_sas_host_add(adapter, adapter->dev_topo.card.phys_num);
+
+ if (fw_evt->ignore)
+ return 0;
+
+ phdl = le16_to_cpu(evt_data->exp_dev_hdl);
+ card_port = leapraid_get_port_by_id(adapter,
+ evt_data->physical_port,
+ false);
+ if (evt_data->exp_status == LEAPRAID_EVT_SAS_TOPO_ES_ADDED)
+ if (leapraid_exp_add(adapter, phdl) != 0)
+ return 0;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp = leapraid_exp_find_by_hdl(adapter, phdl);
+ if (topo_node_exp) {
+ sas_addr = topo_node_exp->sas_address;
+ max_phys = topo_node_exp->phys_num;
+ card_port = topo_node_exp->card_port;
+ } else if (phdl < adapter->dev_topo.card.phys_num) {
+ sas_addr = adapter->dev_topo.card.sas_address;
+ max_phys = adapter->dev_topo.card.phys_num;
+ } else {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ return leapraid_internal_sas_topo_chg_evt(adapter, card_port,
+ topo_node_exp, fw_evt,
+ sas_addr, max_phys);
+}
+
+static void leapraid_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
+{
+ sdev->no_uld_attach = no_uld_attach ? 1 : 0;
+ sdev_printk(KERN_INFO, sdev,
+ "%s raid component to upper layer\n",
+ sdev->no_uld_attach ? "hide" : "expose");
+ WARN_ON(scsi_device_reprobe(sdev));
+}
+
+static void leapraid_sas_pd_add(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_sas_dev *sas_dev;
+ u64 sas_address;
+ u16 parent_hdl;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ dev_warn(&adapter->pdev->dev,
+ "dev handle 0x%x already exists\n", hdl);
+ return;
+ }
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1, cfgp2,
+ GET_SAS_DEVICE_PG0))) {
+ dev_warn(&adapter->pdev->dev, "failed to read dev page0\n");
+ return;
+ }
+
+ parent_hdl = le16_to_cpu(sas_dev_p0.parent_dev_hdl);
+ if (!leapraid_get_sas_address(adapter, parent_hdl, &sas_address))
+ leapraid_transport_update_links(adapter, sas_address, hdl,
+ sas_dev_p0.phy_num,
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ leapraid_get_port_by_id(adapter,
+ sas_dev_p0.physical_port,
+ false));
+ leapraid_add_dev(adapter, hdl);
+}
+
+static void leapraid_sas_pd_delete(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ leapraid_sas_dev_remove_by_hdl(adapter, hdl);
+}
+
+static void leapraid_sas_pd_hide(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct scsi_target *starget = NULL;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u64 volume_wwid = 0;
+ u16 volume_hdl = 0;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ leapraid_cfg_get_volume_hdl(adapter, hdl, &volume_hdl);
+ if (volume_hdl)
+ leapraid_cfg_get_volume_wwid(adapter,
+ volume_hdl,
+ &volume_wwid);
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (!sas_dev) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return;
+ }
+
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ if (sas_dev->starget && sas_dev->starget->hostdata) {
+ starget = sas_dev->starget;
+ starget_priv = starget->hostdata;
+ starget_priv->flg |= LEAPRAID_TGT_FLG_RAID_MEMBER;
+ sas_dev->volume_hdl = volume_hdl;
+ sas_dev->volume_wwid = volume_wwid;
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ if (starget) {
+ dev_info(&adapter->pdev->dev, "hide sas_dev, hdl=0x%x\n", hdl);
+ starget_for_each_device(starget,
+ (void *)1, leapraid_reprobe_lun);
+ }
+}
+
+static void leapraid_sas_pd_expose(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct scsi_target *starget = NULL;
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->phys_disk_dev_hdl);
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (!sas_dev) {
+ dev_warn(&adapter->pdev->dev,
+ "%s:%d: sas_dev not found, hdl=0x%x\n",
+ __func__, __LINE__, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return;
+ }
+
+ sas_dev->volume_hdl = 0;
+ sas_dev->volume_wwid = 0;
+ clear_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ if (sas_dev->starget && sas_dev->starget->hostdata) {
+ starget = sas_dev->starget;
+ starget_priv = starget->hostdata;
+ starget_priv->flg &= ~LEAPRAID_TGT_FLG_RAID_MEMBER;
+ sas_dev->led_on = false;
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (starget) {
+ dev_info(&adapter->pdev->dev,
+ "expose sas_dev, hdl=0x%x\n", hdl);
+ starget_for_each_device(starget, NULL, leapraid_reprobe_lun);
+ }
+}
+
+static void leapraid_sas_volume_add(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ struct leapraid_raid_volume *raid_volume;
+ unsigned long flags;
+ u64 wwid;
+ u16 hdl;
+
+ hdl = le16_to_cpu(evt_data->vol_dev_hdl);
+
+ if (leapraid_cfg_get_volume_wwid(adapter, hdl, &wwid)) {
+ dev_warn(&adapter->pdev->dev, "failed to read volume page1\n");
+ return;
+ }
+
+ if (!wwid) {
+ dev_warn(&adapter->pdev->dev, "invalid WWID(handle=0x%x)\n",
+ hdl);
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_wwid(adapter, wwid);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+
+ if (raid_volume) {
+ dev_warn(&adapter->pdev->dev,
+ "volume handle 0x%x already exists\n", hdl);
+ return;
+ }
+
+ raid_volume = kzalloc(sizeof(*raid_volume), GFP_KERNEL);
+ if (!raid_volume)
+ return;
+
+ raid_volume->id = adapter->dev_topo.sas_id++;
+ raid_volume->channel = RAID_CHANNEL;
+ raid_volume->hdl = hdl;
+ raid_volume->wwid = wwid;
+ leapraid_raid_volume_add(adapter, raid_volume);
+ if (!adapter->scan_dev_desc.wait_scan_dev_done) {
+ if (scsi_add_device(adapter->shost, RAID_CHANNEL,
+ raid_volume->id, 0))
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ dev_info(&adapter->pdev->dev,
+ "add raid volume: hdl=0x%x, wwid=0x%llx\n", hdl, wwid);
+ } else {
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ leapraid_check_boot_dev(adapter, raid_volume, RAID_CHANNEL);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ }
+}
+
+static void leapraid_sas_volume_delete(struct leapraid_adapter *adapter,
+ u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_raid_volume *raid_volume;
+ struct scsi_target *starget = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_hdl(adapter, hdl);
+ if (!raid_volume) {
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ dev_warn(&adapter->pdev->dev,
+ "%s:%d: volume handle 0x%x not found\n",
+ __func__, __LINE__, hdl);
+ return;
+ }
+
+ if (raid_volume->starget) {
+ starget = raid_volume->starget;
+ starget_priv = starget->hostdata;
+ starget_priv->deleted = true;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "delete raid volume: hdl=0x%x, wwid=0x%llx\n",
+ raid_volume->hdl, raid_volume->wwid);
+ list_del(&raid_volume->list);
+ kfree(raid_volume);
+
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+
+ if (starget)
+ scsi_remove_target(&starget->dev);
+}
+
+static void leapraid_sas_ir_chg_evt(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_evt_data_ir_change *evt_data;
+
+ evt_data = fw_evt->evt_data;
+
+ switch (evt_data->reason_code) {
+ case LEAPRAID_EVT_IR_RC_VOLUME_ADD:
+ leapraid_sas_volume_add(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_VOLUME_DELETE:
+ leapraid_sas_volume_delete(adapter,
+ le16_to_cpu(evt_data->vol_dev_hdl));
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_HIDDEN_TO_ADD:
+ leapraid_sas_pd_add(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_UNHIDDEN_TO_DELETE:
+ leapraid_sas_pd_delete(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_CREATED_TO_HIDE:
+ leapraid_sas_pd_hide(adapter, evt_data);
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_DELETED_TO_EXPOSE:
+ leapraid_sas_pd_expose(adapter, evt_data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_sas_enc_dev_stat_add_node(
+ struct leapraid_adapter *adapter, u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_enc_node *enc_node = NULL;
+ int rc;
+
+ enc_node = kzalloc(sizeof(*enc_node), GFP_KERNEL);
+ if (!enc_node)
+ return;
+
+ cfgp1.form = LEAPRAID_SAS_ENC_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ rc = leapraid_op_config_page(adapter, &enc_node->pg0, cfgp1, cfgp2,
+ GET_SAS_ENCLOSURE_PG0);
+ if (rc) {
+ kfree(enc_node);
+ return;
+ }
+ list_add_tail(&enc_node->list, &adapter->dev_topo.enc_list);
+}
+
+static void leapraid_sas_enc_dev_stat_del_node(
+ struct leapraid_enc_node *enc_node)
+{
+ if (!enc_node)
+ return;
+
+ list_del(&enc_node->list);
+ kfree(enc_node);
+}
+
+static void leapraid_sas_enc_dev_stat_chg_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_enc_node *enc_node = NULL;
+ struct leapraid_evt_data_sas_enc_dev_status_change *evt_data;
+ u16 enc_hdl;
+
+ if (adapter->access_ctrl.shost_recovering)
+ return;
+
+ evt_data = fw_evt->evt_data;
+ enc_hdl = le16_to_cpu(evt_data->enc_hdl);
+ if (enc_hdl)
+ enc_node = leapraid_enc_find_by_hdl(adapter, enc_hdl);
+ switch (evt_data->reason_code) {
+ case LEAPRAID_EVT_SAS_ENCL_RC_ADDED:
+ if (!enc_node)
+ leapraid_sas_enc_dev_stat_add_node(adapter, enc_hdl);
+ break;
+ case LEAPRAID_EVT_SAS_ENCL_RC_NOT_RESPONDING:
+ leapraid_sas_enc_dev_stat_del_node(enc_node);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_remove_unresp_sas_end_dev(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_sas_dev *sas_dev, *sas_dev_next;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ list_for_each_entry_safe(sas_dev, sas_dev_next,
+ &adapter->dev_topo.sas_dev_init_list, list) {
+ list_del_init(&sas_dev->list);
+ leapraid_sdev_put(sas_dev);
+ }
+ list_for_each_entry_safe(sas_dev, sas_dev_next,
+ &adapter->dev_topo.sas_dev_list, list) {
+ if (!sas_dev->resp)
+ list_move_tail(&sas_dev->list, &head);
+ else
+ sas_dev->resp = false;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ list_for_each_entry_safe(sas_dev, sas_dev_next, &head, list) {
+ leapraid_remove_device(adapter, sas_dev);
+ list_del_init(&sas_dev->list);
+ leapraid_sdev_put(sas_dev);
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "unresponding sas end devices removed\n");
+}
+
+static void leapraid_remove_unresp_raid_volumes(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_volume *raid_volume, *raid_volume_next;
+
+ list_for_each_entry_safe(raid_volume, raid_volume_next,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (!raid_volume->resp)
+ leapraid_sas_volume_delete(adapter, raid_volume->hdl);
+ else
+ raid_volume->resp = false;
+ }
+ dev_info(&adapter->pdev->dev,
+ "unresponding raid volumes removed\n");
+}
+
+static void leapraid_remove_unresp_sas_exp(struct leapraid_adapter *adapter)
+{
+ struct leapraid_topo_node *topo_node_exp, *topo_node_exp_next;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_for_each_entry_safe(topo_node_exp, topo_node_exp_next,
+ &adapter->dev_topo.exp_list, list) {
+ if (!topo_node_exp->resp)
+ list_move_tail(&topo_node_exp->list, &head);
+ else
+ topo_node_exp->resp = false;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ list_for_each_entry_safe(topo_node_exp, topo_node_exp_next,
+ &head, list)
+ leapraid_exp_node_rm(adapter, topo_node_exp);
+
+ dev_info(&adapter->pdev->dev,
+ "unresponding sas expanders removed\n");
+}
+
+static void leapraid_remove_unresp_dev(struct leapraid_adapter *adapter)
+{
+ leapraid_remove_unresp_sas_end_dev(adapter);
+ if (adapter->adapter_attr.raid_support)
+ leapraid_remove_unresp_raid_volumes(adapter);
+ leapraid_remove_unresp_sas_exp(adapter);
+ leapraid_ublk_io_all_dev(adapter);
+}
+
+static void leapraid_del_dirty_vphy(struct leapraid_adapter *adapter)
+{
+ struct leapraid_card_port *card_port, *card_port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (!card_port->vphys_mask)
+ continue;
+
+ list_for_each_entry_safe(vphy, vphy_next,
+ &card_port->vphys_list, list) {
+ if (!(vphy->flg & LEAPRAID_VPHY_FLG_DIRTY))
+ continue;
+
+ card_port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+
+ if (!card_port->vphys_mask && !card_port->sas_address)
+ card_port->flg |= LEAPRAID_CARD_PORT_FLG_DIRTY;
+ }
+}
+
+static void leapraid_del_dirty_card_port(struct leapraid_adapter *adapter)
+{
+ struct leapraid_card_port *card_port, *card_port_next;
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (!(card_port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY) ||
+ card_port->flg & LEAPRAID_CARD_PORT_FLG_NEW)
+ continue;
+
+ list_del(&card_port->list);
+ kfree(card_port);
+ }
+}
+
+static void leapraid_update_dev_qdepth(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_device *sdev;
+ u16 qdepth;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv || !sdev_priv->starget_priv)
+ continue;
+ sas_dev = sdev_priv->starget_priv->sas_dev;
+ if (sas_dev && sas_dev->dev_info & LEAPRAID_DEVTYP_SSP_TGT)
+ qdepth = (sas_dev->port_type > 1) ?
+ adapter->adapter_attr.wideport_max_queue_depth :
+ adapter->adapter_attr.narrowport_max_queue_depth;
+ else if (sas_dev && sas_dev->dev_info &
+ LEAPRAID_DEVTYP_SATA_DEV)
+ qdepth = adapter->adapter_attr.sata_max_queue_depth;
+ else
+ continue;
+
+ leapraid_adjust_sdev_queue_depth(sdev, qdepth);
+ }
+}
+
+static void leapraid_update_exp_links(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node_exp,
+ u16 hdl)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_exp_p1 exp_p1;
+ int i;
+
+ cfgp2.handle = hdl;
+ for (i = 0; i < topo_node_exp->phys_num; i++) {
+ cfgp1.phy_number = i;
+ if ((leapraid_op_config_page(adapter, &exp_p1, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG1)))
+ return;
+
+ leapraid_transport_update_links(adapter,
+ topo_node_exp->sas_address,
+ le16_to_cpu(exp_p1.attached_dev_hdl),
+ i,
+ exp_p1.neg_link_rate >> 4,
+ topo_node_exp->card_port);
+ }
+}
+
+static void leapraid_scan_exp_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_topo_node *topo_node_exp;
+ struct leapraid_exp_p0 exp_p0;
+ unsigned long flags;
+ u16 hdl;
+ u8 port_id;
+
+ dev_info(&adapter->pdev->dev, "begin scanning expanders\n");
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &exp_p0, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG0);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(exp_p0.dev_hdl);
+ port_id = exp_p0.physical_port;
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node_exp =
+ leapraid_exp_find_by_sas_address(adapter,
+ le64_to_cpu(exp_p0.sas_address),
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false));
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+
+ if (topo_node_exp) {
+ leapraid_update_exp_links(adapter, topo_node_exp, hdl);
+ } else {
+ leapraid_exp_add(adapter, hdl);
+
+ dev_info(&adapter->pdev->dev,
+ "add exp: hdl=0x%04x, sas addr=0x%016llx\n",
+ hdl,
+ (unsigned long long)le64_to_cpu(
+ exp_p0.sas_address));
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "expanders scan complete\n");
+}
+
+static void leapraid_scan_phy_disks_after_reset(
+ struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ union cfg_param_1 cfgp1_extra = {0};
+ union cfg_param_2 cfgp2_extra = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_raidpd_p0 raidpd_p0;
+ struct leapraid_sas_dev *sas_dev;
+ u8 phys_disk_num, port_id;
+ u16 hdl, parent_hdl;
+ u64 sas_addr;
+
+ dev_info(&adapter->pdev->dev, "begin scanning phys disk\n");
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (phys_disk_num = 0xFF, cfgp2.form_specific = phys_disk_num;
+ !leapraid_op_config_page(adapter, &raidpd_p0,
+ cfgp1, cfgp2, GET_PHY_DISK_PG0);
+ cfgp2.form_specific = phys_disk_num) {
+ phys_disk_num = raidpd_p0.phys_disk_num;
+ hdl = le16_to_cpu(raidpd_p0.dev_hdl);
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter, hdl);
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ continue;
+ }
+
+ cfgp1_extra.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2_extra.handle = hdl;
+ if (leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1_extra,
+ cfgp2_extra, GET_SAS_DEVICE_PG0) !=
+ 0)
+ continue;
+
+ parent_hdl = le16_to_cpu(sas_dev_p0.parent_dev_hdl);
+ if (!leapraid_get_sas_address(adapter,
+ parent_hdl,
+ &sas_addr)) {
+ port_id = sas_dev_p0.physical_port;
+ leapraid_transport_update_links(
+ adapter, sas_addr, hdl,
+ sas_dev_p0.phy_num,
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ leapraid_get_port_by_id(
+ adapter, port_id, false));
+ set_bit(hdl,
+ (unsigned long *)adapter->dev_topo.pd_hdls);
+
+ leapraid_add_dev(adapter, hdl);
+
+ dev_info(&adapter->pdev->dev,
+ "add phys disk: hdl=0x%04x, sas addr=0x%016llx\n",
+ hdl,
+ (unsigned long long)le64_to_cpu(
+ sas_dev_p0.sas_address));
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "phys disk scan complete\n");
+}
+
+static void leapraid_scan_vol_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ union cfg_param_1 cfgp1_extra = {0};
+ union cfg_param_2 cfgp2_extra = {0};
+ struct leapraid_evt_data_ir_change evt_data;
+ static struct leapraid_raid_volume *raid_volume;
+ struct leapraid_raidvol_p1 *vol_p1;
+ struct leapraid_raidvol_p0 *vol_p0;
+ unsigned long flags;
+ u16 hdl;
+
+ vol_p0 = kzalloc(sizeof(*vol_p0), GFP_KERNEL);
+ if (!vol_p0)
+ return;
+
+ vol_p1 = kzalloc(sizeof(*vol_p1), GFP_KERNEL);
+ if (!vol_p1) {
+ kfree(vol_p0);
+ return;
+ }
+
+ dev_info(&adapter->pdev->dev, "begin scanning volumes\n");
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, vol_p1, cfgp1,
+ cfgp2, GET_RAID_VOLUME_PG1);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(vol_p1->dev_hdl);
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_wwid(
+ adapter,
+ le64_to_cpu(vol_p1->wwid));
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ if (raid_volume)
+ continue;
+
+ cfgp1_extra.size = sizeof(struct leapraid_raidvol_p0);
+ cfgp2_extra.handle = hdl;
+ if (leapraid_op_config_page(adapter, vol_p0, cfgp1_extra,
+ cfgp2_extra, GET_RAID_VOLUME_PG0))
+ continue;
+
+ if (vol_p0->volume_state == LEAPRAID_VOL_STATE_OPTIMAL ||
+ vol_p0->volume_state == LEAPRAID_VOL_STATE_ONLINE ||
+ vol_p0->volume_state == LEAPRAID_VOL_STATE_DEGRADED) {
+ memset(&evt_data, 0,
+ sizeof(struct leapraid_evt_data_ir_change));
+ evt_data.reason_code = LEAPRAID_EVT_IR_RC_VOLUME_ADD;
+ evt_data.vol_dev_hdl = vol_p1->dev_hdl;
+ leapraid_sas_volume_add(adapter, &evt_data);
+ dev_info(&adapter->pdev->dev,
+ "add volume: hdl=0x%04x\n",
+ vol_p1->dev_hdl);
+ }
+ }
+
+ kfree(vol_p0);
+ kfree(vol_p1);
+
+ dev_info(&adapter->pdev->dev, "volumes scan complete\n");
+}
+
+static void leapraid_scan_sas_dev_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ struct leapraid_sas_dev *sas_dev;
+ u16 hdl, parent_hdl;
+ u64 sas_address;
+ u8 port_id;
+
+ dev_info(&adapter->pdev->dev,
+ "begin scanning sas end devices\n");
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &sas_dev_p0, cfgp1, cfgp2,
+ GET_SAS_DEVICE_PG0);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(sas_dev_p0.dev_hdl);
+ if (!(leapraid_is_end_dev(le32_to_cpu(sas_dev_p0.dev_info))))
+ continue;
+
+ port_id = sas_dev_p0.physical_port;
+ sas_dev = leapraid_get_sas_dev_by_addr(
+ adapter,
+ le64_to_cpu(sas_dev_p0.sas_address),
+ leapraid_get_port_by_id(
+ adapter,
+ port_id,
+ false));
+ if (sas_dev) {
+ leapraid_sdev_put(sas_dev);
+ continue;
+ }
+
+ parent_hdl = le16_to_cpu(sas_dev_p0.parent_dev_hdl);
+ if (!leapraid_get_sas_address(adapter, parent_hdl,
+ &sas_address)) {
+ leapraid_transport_update_links(
+ adapter,
+ sas_address,
+ hdl,
+ sas_dev_p0.phy_num,
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ leapraid_get_port_by_id(adapter,
+ port_id,
+ false));
+ leapraid_add_dev(adapter, hdl);
+ dev_info(&adapter->pdev->dev,
+ "add sas dev: hdl=0x%04x, sas addr=0x%016llx\n",
+ hdl,
+ (unsigned long long)le64_to_cpu(
+ sas_dev_p0.sas_address));
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "sas end devices scan complete\n");
+}
+
+static void leapraid_scan_all_dev_after_reset(struct leapraid_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "begin scanning devices\n");
+
+ leapraid_sas_host_add(adapter, adapter->dev_topo.card.phys_num);
+ leapraid_scan_exp_after_reset(adapter);
+ if (adapter->adapter_attr.raid_support) {
+ leapraid_scan_phy_disks_after_reset(adapter);
+ leapraid_scan_vol_after_reset(adapter);
+ }
+ leapraid_scan_sas_dev_after_reset(adapter);
+
+ dev_info(&adapter->pdev->dev, "devices scan complete\n");
+}
+
+static void leapraid_hardreset_async_logic(struct leapraid_adapter *adapter)
+{
+ leapraid_remove_unresp_dev(adapter);
+ leapraid_del_dirty_vphy(adapter);
+ leapraid_del_dirty_card_port(adapter);
+ leapraid_update_dev_qdepth(adapter);
+ leapraid_scan_all_dev_after_reset(adapter);
+
+ if (adapter->scan_dev_desc.driver_loading)
+ leapraid_scan_dev_done(adapter);
+}
+
+static int leapraid_send_enc_cmd(struct leapraid_adapter *adapter,
+ struct leapraid_sep_rep *sep_rep,
+ struct leapraid_sep_req *sep_req)
+{
+ void *req;
+ bool reset_flg = false;
+ int rc = 0;
+
+ mutex_lock(&adapter->driver_cmds.enc_cmd.mutex);
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ adapter->driver_cmds.enc_cmd.status = LEAPRAID_CMD_PENDING;
+ req = leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.enc_cmd.inter_taskid);
+ memset(req, 0, LEAPRAID_REQUEST_SIZE);
+ memcpy(req, sep_req, sizeof(struct leapraid_sep_req));
+ init_completion(&adapter->driver_cmds.enc_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.enc_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.enc_cmd.done,
+ LEAPRAID_ENC_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.enc_cmd.status & LEAPRAID_CMD_DONE)) {
+ reset_flg =
+ leapraid_check_reset(
+ adapter->driver_cmds.enc_cmd.status);
+ rc = -EFAULT;
+ goto do_hard_reset;
+ }
+
+ if (adapter->driver_cmds.enc_cmd.status & LEAPRAID_CMD_REPLY_VALID)
+ memcpy(sep_rep, (void *)(&adapter->driver_cmds.enc_cmd.reply),
+ sizeof(struct leapraid_sep_rep));
+do_hard_reset:
+ if (reset_flg) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+
+ adapter->driver_cmds.enc_cmd.status = LEAPRAID_CMD_NOT_USED;
+out:
+ mutex_unlock(&adapter->driver_cmds.enc_cmd.mutex);
+ return rc;
+}
+
+static void leapraid_set_led(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev, bool on)
+{
+ struct leapraid_sep_rep sep_rep;
+ struct leapraid_sep_req sep_req;
+
+ if (!sas_dev)
+ return;
+
+ memset(&sep_req, 0, sizeof(struct leapraid_sep_req));
+ memset(&sep_rep, 0, sizeof(struct leapraid_sep_rep));
+ sep_req.func = LEAPRAID_FUNC_SCSI_ENC_PROCESSOR;
+ sep_req.act = LEAPRAID_SEP_REQ_ACT_WRITE_STATUS;
+ if (on) {
+ sep_req.slot_status =
+ cpu_to_le32(LEAPRAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ sep_req.dev_hdl = cpu_to_le16(sas_dev->hdl);
+ sep_req.flg = LEAPRAID_SEP_REQ_FLG_DEVHDL_ADDRESS;
+ if (leapraid_send_enc_cmd(adapter, &sep_rep, &sep_req)) {
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+
+ sas_dev->led_on = true;
+ if (sep_rep.adapter_status)
+ leapraid_sdev_put(sas_dev);
+ } else {
+ sep_req.slot_status = 0;
+ sep_req.slot = cpu_to_le16(sas_dev->slot);
+ sep_req.dev_hdl = 0;
+ sep_req.enc_hdl = cpu_to_le16(sas_dev->enc_hdl);
+ sep_req.flg = LEAPRAID_SEP_REQ_FLG_ENCLOSURE_SLOT_ADDRESS;
+ if ((leapraid_send_enc_cmd(adapter, &sep_rep, &sep_req))) {
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+
+ if (sep_rep.adapter_status) {
+ leapraid_sdev_put(sas_dev);
+ return;
+ }
+ }
+}
+
+static void leapraid_fw_work(struct leapraid_adapter *adapter,
+ struct leapraid_fw_evt_work *fw_evt)
+{
+ struct leapraid_sas_dev *sas_dev;
+
+ adapter->fw_evt_s.cur_evt = fw_evt;
+ leapraid_del_fw_evt_from_list(adapter, fw_evt);
+ if (adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering) {
+ leapraid_fw_evt_put(fw_evt);
+ adapter->fw_evt_s.cur_evt = NULL;
+ return;
+ }
+ switch (fw_evt->evt_type) {
+ case LEAPRAID_EVT_SAS_DISCOVERY:
+ {
+ struct leapraid_evt_data_sas_disc *evt_data;
+
+ evt_data = fw_evt->evt_data;
+ if (evt_data->reason_code ==
+ LEAPRAID_EVT_SAS_DISC_RC_STARTED &&
+ !adapter->dev_topo.card.phys_num)
+ leapraid_sas_host_add(adapter, 0);
+ break;
+ }
+ case LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST:
+ leapraid_sas_topo_chg_evt(adapter, fw_evt);
+ break;
+ case LEAPRAID_EVT_IR_CHANGE:
+ leapraid_sas_ir_chg_evt(adapter, fw_evt);
+ break;
+ case LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE:
+ leapraid_sas_enc_dev_stat_chg_evt(adapter, fw_evt);
+ break;
+ case LEAPRAID_EVT_REMOVE_DEAD_DEV:
+ while (scsi_host_in_recovery(adapter->shost) ||
+ adapter->access_ctrl.shost_recovering) {
+ if (adapter->access_ctrl.host_removing ||
+ adapter->fw_evt_s.fw_evt_cleanup)
+ goto out;
+
+ ssleep(1);
+ }
+ leapraid_hardreset_async_logic(adapter);
+ break;
+ case LEAPRAID_EVT_TURN_ON_PFA_LED:
+ sas_dev = leapraid_get_sas_dev_by_hdl(adapter,
+ fw_evt->dev_handle);
+ leapraid_set_led(adapter, sas_dev, true);
+ break;
+ case LEAPRAID_EVT_SCAN_DEV_DONE:
+ adapter->scan_dev_desc.scan_start = false;
+ break;
+ default:
+ break;
+ }
+out:
+ leapraid_fw_evt_put(fw_evt);
+ adapter->fw_evt_s.cur_evt = NULL;
+}
+
+static void leapraid_sas_dev_stat_chg_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_sas_dev_status_change *event_data)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ u64 sas_address;
+ unsigned long flags;
+
+ switch (event_data->reason_code) {
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_INTERNAL_DEV_RESET:
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ break;
+ default:
+ return;
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+
+ sas_address = le64_to_cpu(event_data->sas_address);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter,
+ sas_address,
+ leapraid_get_port_by_id(adapter,
+ event_data->physical_port,
+ false));
+
+ if (sas_dev && sas_dev->starget) {
+ starget_priv = sas_dev->starget->hostdata;
+ if (starget_priv) {
+ switch (event_data->reason_code) {
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_INTERNAL_DEV_RESET:
+ starget_priv->tm_busy = true;
+ break;
+ case LEAPRAID_EVT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ starget_priv->tm_busy = false;
+ break;
+ }
+ }
+ }
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_set_volume_delete_flag(struct leapraid_adapter *adapter,
+ u16 handle)
+{
+ struct leapraid_raid_volume *raid_volume;
+ struct leapraid_starget_priv *sas_target_priv_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_hdl(adapter, handle);
+ if (raid_volume && raid_volume->starget &&
+ raid_volume->starget->hostdata) {
+ sas_target_priv_data = raid_volume->starget->hostdata;
+ sas_target_priv_data->deleted = true;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static void leapraid_check_ir_change_evt(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_ir_change *evt_data)
+{
+ u16 phys_disk_dev_hdl;
+
+ switch (evt_data->reason_code) {
+ case LEAPRAID_EVT_IR_RC_VOLUME_DELETE:
+ leapraid_set_volume_delete_flag(adapter,
+ le16_to_cpu(evt_data->vol_dev_hdl));
+ break;
+ case LEAPRAID_EVT_IR_RC_PD_UNHIDDEN_TO_DELETE:
+ phys_disk_dev_hdl =
+ le16_to_cpu(evt_data->phys_disk_dev_hdl);
+ clear_bit(phys_disk_dev_hdl,
+ (unsigned long *)adapter->dev_topo.pd_hdls);
+ leapraid_tgt_rst_send(adapter, phys_disk_dev_hdl);
+ break;
+ }
+}
+
+static void leapraid_topo_del_evts_process_exp_status(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_sas_topo_change_list *evt_data)
+{
+ struct leapraid_fw_evt_work *fw_evt = NULL;
+ struct leapraid_evt_data_sas_topo_change_list *loc_evt_data = NULL;
+ unsigned long flags;
+ u16 exp_hdl;
+
+ exp_hdl = le16_to_cpu(evt_data->exp_dev_hdl);
+
+ switch (evt_data->exp_status) {
+ case LEAPRAID_EVT_SAS_TOPO_ES_NOT_RESPONDING:
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ list_for_each_entry(fw_evt,
+ &adapter->fw_evt_s.fw_evt_list, list) {
+ if (fw_evt->evt_type !=
+ LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST ||
+ fw_evt->ignore)
+ continue;
+
+ loc_evt_data = fw_evt->evt_data;
+ if ((loc_evt_data->exp_status ==
+ LEAPRAID_EVT_SAS_TOPO_ES_ADDED ||
+ loc_evt_data->exp_status ==
+ LEAPRAID_EVT_SAS_TOPO_ES_RESPONDING) &&
+ le16_to_cpu(loc_evt_data->exp_dev_hdl) == exp_hdl)
+ fw_evt->ignore = 1;
+ }
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_check_topo_del_evts(struct leapraid_adapter *adapter,
+ struct leapraid_evt_data_sas_topo_change_list *evt_data)
+{
+ int reason_code;
+ u16 hdl;
+ int i;
+
+ for (i = 0; i < evt_data->entry_num; i++) {
+ hdl = le16_to_cpu(evt_data->phy[i].attached_dev_hdl);
+ if (!hdl)
+ continue;
+
+ reason_code = evt_data->phy[i].phy_status &
+ LEAPRAID_EVT_SAS_TOPO_RC_MASK;
+ if (reason_code ==
+ LEAPRAID_EVT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
+ leapraid_tgt_not_responding(adapter, hdl);
+ }
+ leapraid_topo_del_evts_process_exp_status(adapter, evt_data);
+}
+
+static bool leapraid_async_process_evt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_notify_rep *event_notify_rep)
+{
+ u16 evt = le16_to_cpu(event_notify_rep->evt);
+ bool exit_flag = false;
+
+ switch (evt) {
+ case LEAPRAID_EVT_SAS_DEV_STATUS_CHANGE:
+ leapraid_sas_dev_stat_chg_evt(adapter,
+ (struct leapraid_evt_data_sas_dev_status_change
+ *)event_notify_rep->evt_data);
+ break;
+ case LEAPRAID_EVT_IR_CHANGE:
+ leapraid_check_ir_change_evt(adapter,
+ (struct leapraid_evt_data_ir_change
+ *)event_notify_rep->evt_data);
+ break;
+ case LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST:
+ leapraid_check_topo_del_evts(adapter,
+ (struct leapraid_evt_data_sas_topo_change_list
+ *)event_notify_rep->evt_data);
+ if (adapter->access_ctrl.shost_recovering) {
+ exit_flag = true;
+ return exit_flag;
+ }
+ break;
+ case LEAPRAID_EVT_SAS_DISCOVERY:
+ case LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE:
+ break;
+ default:
+ exit_flag = true;
+ return exit_flag;
+ }
+
+ return exit_flag;
+}
+
+static void leapraid_async_evt_cb_enqueue(
+ struct leapraid_adapter *adapter,
+ struct leapraid_evt_notify_rep *evt_notify_rep)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+ u16 evt_sz;
+
+ fw_evt = leapraid_alloc_fw_evt_work();
+ if (!fw_evt)
+ return;
+
+ evt_sz = le16_to_cpu(evt_notify_rep->evt_data_len) * 4;
+ fw_evt->evt_data = kmemdup(evt_notify_rep->evt_data,
+ evt_sz, GFP_ATOMIC);
+ if (!fw_evt->evt_data) {
+ leapraid_fw_evt_put(fw_evt);
+ return;
+ }
+ fw_evt->adapter = adapter;
+ fw_evt->evt_type = le16_to_cpu(evt_notify_rep->evt);
+ leapraid_fw_evt_add(adapter, fw_evt);
+ leapraid_fw_evt_put(fw_evt);
+}
+
+static void leapraid_async_evt_cb(struct leapraid_adapter *adapter,
+ u8 msix_index, u32 rep_paddr)
+{
+ struct leapraid_evt_notify_rep *evt_notify_rep;
+
+ if (adapter->access_ctrl.pcie_recovering)
+ return;
+
+ evt_notify_rep = leapraid_get_reply_vaddr(adapter, rep_paddr);
+ if (unlikely(!evt_notify_rep))
+ return;
+
+ if (leapraid_async_process_evt(adapter, evt_notify_rep))
+ return;
+
+ leapraid_async_evt_cb_enqueue(adapter, evt_notify_rep);
+}
+
+static void leapraid_handle_async_event(struct leapraid_adapter *adapter,
+ u8 msix_index, u32 reply)
+{
+ struct leapraid_evt_notify_rep *leap_mpi_rep =
+ leapraid_get_reply_vaddr(adapter, reply);
+
+ if (!leap_mpi_rep)
+ return;
+
+ if (leap_mpi_rep->func != LEAPRAID_FUNC_EVENT_NOTIFY)
+ return;
+
+ leapraid_async_evt_cb(adapter, msix_index, reply);
+}
+
+void leapraid_async_turn_on_led(struct leapraid_adapter *adapter, u16 handle)
+{
+ struct leapraid_fw_evt_work *fw_event;
+
+ fw_event = leapraid_alloc_fw_evt_work();
+ if (!fw_event)
+ return;
+
+ fw_event->dev_handle = handle;
+ fw_event->adapter = adapter;
+ fw_event->evt_type = LEAPRAID_EVT_TURN_ON_PFA_LED;
+ leapraid_fw_evt_add(adapter, fw_event);
+ leapraid_fw_evt_put(fw_event);
+}
+
+static void leapraid_hardreset_barrier(struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_event;
+
+ fw_event = leapraid_alloc_fw_evt_work();
+ if (!fw_event)
+ return;
+
+ fw_event->adapter = adapter;
+ fw_event->evt_type = LEAPRAID_EVT_REMOVE_DEAD_DEV;
+ leapraid_fw_evt_add(adapter, fw_event);
+ leapraid_fw_evt_put(fw_event);
+}
+
+static void leapraid_scan_dev_complete(struct leapraid_adapter *adapter)
+{
+ struct leapraid_fw_evt_work *fw_evt;
+
+ fw_evt = leapraid_alloc_fw_evt_work();
+ if (!fw_evt)
+ return;
+
+ fw_evt->evt_type = LEAPRAID_EVT_SCAN_DEV_DONE;
+ fw_evt->adapter = adapter;
+ leapraid_fw_evt_add(adapter, fw_evt);
+ leapraid_fw_evt_put(fw_evt);
+}
+
+static u8 leapraid_driver_cmds_done(struct leapraid_adapter *adapter,
+ u16 taskid, u8 msix_index,
+ u32 rep_paddr, u8 cb_idx)
+{
+ struct leapraid_rep *leap_mpi_rep =
+ leapraid_get_reply_vaddr(adapter, rep_paddr);
+ struct leapraid_driver_cmd *sp_cmd, *_sp_cmd = NULL;
+
+ list_for_each_entry(sp_cmd, &adapter->driver_cmds.special_cmd_list,
+ list)
+ if (cb_idx == sp_cmd->cb_idx) {
+ _sp_cmd = sp_cmd;
+ break;
+ }
+
+ if (WARN_ON(!_sp_cmd))
+ return 1;
+ if (WARN_ON(_sp_cmd->status == LEAPRAID_CMD_NOT_USED))
+ return 1;
+ if (WARN_ON(taskid != _sp_cmd->hp_taskid &&
+ taskid != _sp_cmd->taskid &&
+ taskid != _sp_cmd->inter_taskid))
+ return 1;
+
+ _sp_cmd->status |= LEAPRAID_CMD_DONE;
+ if (leap_mpi_rep) {
+ memcpy((void *)(&_sp_cmd->reply), leap_mpi_rep,
+ leap_mpi_rep->msg_len * 4);
+ _sp_cmd->status |= LEAPRAID_CMD_REPLY_VALID;
+
+ if (_sp_cmd->cb_idx == LEAPRAID_SCAN_DEV_CB_IDX) {
+ u16 adapter_status;
+
+ _sp_cmd->status &= ~LEAPRAID_CMD_PENDING;
+ adapter_status =
+ le16_to_cpu(leap_mpi_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS)
+ adapter->scan_dev_desc.scan_dev_failed = true;
+
+ if (_sp_cmd->async_scan_dev) {
+ if (adapter_status ==
+ LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ leapraid_scan_dev_complete(adapter);
+ } else {
+ adapter->scan_dev_desc.scan_start_failed =
+ adapter_status;
+ }
+ return 1;
+ }
+
+ complete(&_sp_cmd->done);
+ return 1;
+ }
+
+ if (_sp_cmd->cb_idx == LEAPRAID_CTL_CB_IDX) {
+ struct leapraid_scsiio_rep *scsiio_reply;
+
+ if (leap_mpi_rep->function ==
+ LEAPRAID_FUNC_SCSIIO_REQ ||
+ leap_mpi_rep->function ==
+ LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH) {
+ scsiio_reply =
+ (struct leapraid_scsiio_rep *)leap_mpi_rep;
+ if (scsiio_reply->scsi_state &
+ LEAPRAID_SCSI_STATE_AUTOSENSE_VALID)
+ memcpy((void *)(&adapter->driver_cmds.ctl_cmd.sense),
+ leapraid_get_sense_buffer(adapter, taskid),
+ min_t(u32,
+ SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_reply->sense_count)));
+ }
+ }
+ }
+
+ _sp_cmd->status &= ~LEAPRAID_CMD_PENDING;
+ complete(&_sp_cmd->done);
+
+ return 1;
+}
+
+static void leapraid_request_descript_handler(struct leapraid_adapter *adapter,
+ union leapraid_rep_desc_union *rpf,
+ u8 req_desc_type, u8 msix_idx)
+{
+ u32 rep;
+ u16 taskid;
+
+ rep = 0;
+ taskid = le16_to_cpu(rpf->dflt_rep.taskid);
+ switch (req_desc_type) {
+ case LEAPRAID_RPY_DESC_FLG_FP_SCSI_IO_SUCCESS:
+ case LEAPRAID_RPY_DESC_FLG_SCSI_IO_SUCCESS:
+ if (taskid <= adapter->shost->can_queue ||
+ taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid) {
+ leapraid_scsiio_done(adapter, taskid, msix_idx, 0);
+ } else {
+ if (leapraid_driver_cmds_done(adapter, taskid,
+ msix_idx, 0,
+ leapraid_get_cb_idx(adapter,
+ taskid)))
+ leapraid_free_taskid(adapter, taskid);
+ }
+ break;
+ case LEAPRAID_RPY_DESC_FLG_ADDRESS_REPLY:
+ rep = le32_to_cpu(rpf->addr_rep.rep_frame_addr);
+ if (rep > ((u32)adapter->mem_desc.rep_msg_dma +
+ adapter->adapter_attr.rep_msg_qd * LEAPRAID_REPLY_SIEZ) ||
+ rep < ((u32)adapter->mem_desc.rep_msg_dma))
+ rep = 0;
+ if (taskid) {
+ if (taskid <= adapter->shost->can_queue ||
+ taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid) {
+ leapraid_scsiio_done(adapter, taskid,
+ msix_idx, rep);
+ } else {
+ if (leapraid_driver_cmds_done(adapter, taskid,
+ msix_idx, rep,
+ leapraid_get_cb_idx(adapter,
+ taskid)))
+ leapraid_free_taskid(adapter, taskid);
+ }
+ } else {
+ leapraid_handle_async_event(adapter, msix_idx, rep);
+ }
+
+ if (rep) {
+ adapter->rep_msg_host_idx =
+ (adapter->rep_msg_host_idx ==
+ (adapter->adapter_attr.rep_msg_qd - 1)) ?
+ 0 : adapter->rep_msg_host_idx + 1;
+ adapter->mem_desc.rep_msg_addr[adapter->rep_msg_host_idx] =
+ cpu_to_le32(rep);
+ wmb(); /* Make sure that all write ops are in order */
+ writel(adapter->rep_msg_host_idx,
+ &adapter->iomem_base->rep_msg_host_idx);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+int leapraid_rep_queue_handler(struct leapraid_rq *rq)
+{
+ struct leapraid_adapter *adapter = rq->adapter;
+ union leapraid_rep_desc_union *rep_desc;
+ u8 req_desc_type;
+ u64 finish_cmds;
+ u8 msix_idx;
+
+ msix_idx = rq->msix_idx;
+ finish_cmds = 0;
+ if (!atomic_add_unless(&rq->busy, LEAPRAID_BUSY_LIMIT,
+ LEAPRAID_BUSY_LIMIT))
+ return finish_cmds;
+
+ rep_desc = &rq->rep_desc[rq->rep_post_host_idx];
+ req_desc_type = rep_desc->dflt_rep.rep_flg &
+ LEAPRAID_RPY_DESC_FLG_TYPE_MASK;
+ if (req_desc_type == LEAPRAID_RPY_DESC_FLG_UNUSED) {
+ atomic_dec(&rq->busy);
+ return finish_cmds;
+ }
+
+ for (;;) {
+ if (rep_desc->u.low == UINT_MAX ||
+ rep_desc->u.high == UINT_MAX)
+ break;
+
+ leapraid_request_descript_handler(adapter, rep_desc,
+ req_desc_type, msix_idx);
+ dev_dbg(&adapter->pdev->dev,
+ "LEAPRAID_SCSIIO: Handled Desc taskid %d, msix %d\n",
+ rep_desc->dflt_rep.taskid, msix_idx);
+ rep_desc->words = cpu_to_le64(ULLONG_MAX);
+ rq->rep_post_host_idx =
+ (rq->rep_post_host_idx ==
+ (adapter->adapter_attr.rep_desc_qd -
+ LEAPRAID_BUSY_LIMIT)) ?
+ 0 : rq->rep_post_host_idx + 1;
+ req_desc_type =
+ rq->rep_desc[rq->rep_post_host_idx].dflt_rep.rep_flg &
+ LEAPRAID_RPY_DESC_FLG_TYPE_MASK;
+ finish_cmds++;
+ if (req_desc_type == LEAPRAID_RPY_DESC_FLG_UNUSED)
+ break;
+ rep_desc = rq->rep_desc + rq->rep_post_host_idx;
+ }
+
+ if (!finish_cmds) {
+ atomic_dec(&rq->busy);
+ return finish_cmds;
+ }
+
+ wmb(); /* Make sure that all write ops are in order */
+ writel(rq->rep_post_host_idx | ((msix_idx & LEAPRAID_MSIX_GROUP_MASK) <<
+ LEAPRAID_RPHI_MSIX_IDX_SHIFT),
+ &adapter->iomem_base->rep_post_reg_idx[msix_idx /
+ LEAPRAID_MSIX_GROUP_SIZE].idx);
+ atomic_dec(&rq->busy);
+ return finish_cmds;
+}
+
+static irqreturn_t leapraid_irq_handler(int irq, void *bus_id)
+{
+ struct leapraid_rq *rq = bus_id;
+ struct leapraid_adapter *adapter = rq->adapter;
+
+ dev_dbg(&adapter->pdev->dev,
+ "LEAPRAID_SCSIIO: Receive a interrupt, irq %d msix %d\n",
+ irq, rq->msix_idx);
+
+ if (adapter->mask_int)
+ return IRQ_NONE;
+
+ return ((leapraid_rep_queue_handler(rq) > 0) ?
+ IRQ_HANDLED : IRQ_NONE);
+}
+
+void leapraid_sync_irqs(struct leapraid_adapter *adapter, bool poll)
+{
+ struct leapraid_int_rq *int_rq;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ unsigned int i;
+
+ if (!adapter->notification_desc.msix_enable)
+ return;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ if (int_rq->rq.msix_idx == 0)
+ continue;
+
+ synchronize_irq(pci_irq_vector(adapter->pdev, int_rq->rq.msix_idx));
+ if (poll)
+ leapraid_rep_queue_handler(&int_rq->rq);
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ blk_mq_poll_rq =
+ &adapter->notification_desc.blk_mq_poll_rqs[i];
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ if (blk_mq_poll_rq->rq.msix_idx == 0)
+ continue;
+
+ leapraid_rep_queue_handler(&blk_mq_poll_rq->rq);
+ }
+}
+
+void leapraid_mq_polling_pause(struct leapraid_adapter *adapter)
+{
+ int iopoll_q_count =
+ adapter->adapter_attr.rq_cnt -
+ adapter->notification_desc.iopoll_qdex;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[qid].pause, 1);
+
+ for (qid = 0; qid < iopoll_q_count; qid++) {
+ while (atomic_read(&adapter->notification_desc.blk_mq_poll_rqs[qid].busy)) {
+ cpu_relax();
+ udelay(LEAPRAID_IO_POLL_DELAY_US);
+ }
+ }
+}
+
+void leapraid_mq_polling_resume(struct leapraid_adapter *adapter)
+{
+ int iopoll_q_count =
+ adapter->adapter_attr.rq_cnt -
+ adapter->notification_desc.iopoll_qdex;
+ int qid;
+
+ for (qid = 0; qid < iopoll_q_count; qid++)
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[qid].pause, 0);
+}
+
+static int leapraid_unlock_host_diag(struct leapraid_adapter *adapter,
+ u32 *host_diag)
+{
+ const u32 unlock_seq[] = { 0x0, 0xF, 0x4, 0xB, 0x2, 0x7, 0xD };
+ const int max_retries = LEAPRAID_UNLOCK_RETRY_LIMIT;
+ int retry = 0;
+ unsigned int i;
+
+ *host_diag = 0;
+ while (retry++ <= max_retries) {
+ for (i = 0; i < ARRAY_SIZE(unlock_seq); i++)
+ writel(unlock_seq[i], &adapter->iomem_base->ws);
+
+ msleep(LEAPRAID_UNLOCK_SLEEP_MS);
+
+ *host_diag = leapraid_readl(&adapter->iomem_base->host_diag);
+ if (*host_diag & LEAPRAID_DIAG_WRITE_ENABLE)
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev, "try host reset timeout!\n");
+ return -EFAULT;
+}
+
+static int leapraid_host_diag_reset(struct leapraid_adapter *adapter)
+{
+ u32 host_diag;
+ u32 cnt;
+
+ dev_info(&adapter->pdev->dev, "entering host diag reset!\n");
+ pci_cfg_access_lock(adapter->pdev);
+
+ mutex_lock(&adapter->reset_desc.host_diag_mutex);
+ if (leapraid_unlock_host_diag(adapter, &host_diag))
+ goto out;
+
+ writel(host_diag | LEAPRAID_DIAG_RESET,
+ &adapter->iomem_base->host_diag);
+
+ msleep(LEAPRAID_MSLEEP_NORMAL_MS);
+ for (cnt = 0; cnt < LEAPRAID_RESET_LOOP_COUNT_DEFAULT; cnt++) {
+ host_diag = leapraid_readl(&adapter->iomem_base->host_diag);
+ if (host_diag == LEAPRAID_INVALID_HOST_DIAG_VAL)
+ goto out;
+
+ if (!(host_diag & LEAPRAID_DIAG_RESET))
+ break;
+
+ msleep(LEAPRAID_RESET_POLL_INTERVAL_MS);
+ }
+
+ writel(host_diag & ~LEAPRAID_DIAG_HOLD_ADAPTER_RESET,
+ &adapter->iomem_base->host_diag);
+ writel(0x0, &adapter->iomem_base->ws);
+ mutex_unlock(&adapter->reset_desc.host_diag_mutex);
+ if (!leapraid_wait_adapter_ready(adapter))
+ goto out;
+
+ pci_cfg_access_unlock(adapter->pdev);
+ dev_info(&adapter->pdev->dev, "host diag success!\n");
+ return 0;
+out:
+ pci_cfg_access_unlock(adapter->pdev);
+ dev_info(&adapter->pdev->dev, "host diag failed!\n");
+ mutex_unlock(&adapter->reset_desc.host_diag_mutex);
+ return -EFAULT;
+}
+
+static int leapraid_find_matching_port(
+ struct leapraid_card_port *card_port_table,
+ u8 count, u8 port_id, u64 sas_addr)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (card_port_table[i].port_id == port_id &&
+ card_port_table[i].sas_address == sas_addr)
+ return i;
+ }
+ return -1;
+}
+
+static u8 leapraid_fill_card_port_table(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_io_unit_p0 *sas_iounit_p0,
+ struct leapraid_card_port *new_card_port_table)
+{
+ u8 port_entry_num = 0, port_id;
+ u16 attached_hdl;
+ u64 attached_sas_addr;
+ int i, idx;
+
+ for (i = 0; i < adapter->dev_topo.card.phys_num; i++) {
+ if ((sas_iounit_p0->phy_info[i].neg_link_rate >> 4)
+ < LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ continue;
+
+ attached_hdl =
+ le16_to_cpu(sas_iounit_p0->phy_info[i].attached_dev_hdl);
+ if (leapraid_get_sas_address(adapter,
+ attached_hdl,
+ &attached_sas_addr) != 0)
+ continue;
+
+ port_id = sas_iounit_p0->phy_info[i].port;
+
+ idx = leapraid_find_matching_port(new_card_port_table,
+ port_entry_num,
+ port_id,
+ attached_sas_addr);
+ if (idx >= 0) {
+ new_card_port_table[idx].phy_mask |= BIT(i);
+ } else {
+ new_card_port_table[port_entry_num].port_id = port_id;
+ new_card_port_table[port_entry_num].phy_mask = BIT(i);
+ new_card_port_table[port_entry_num].sas_address =
+ attached_sas_addr;
+ port_entry_num++;
+ }
+ }
+
+ return port_entry_num;
+}
+
+static u8 leapraid_set_new_card_port_table_after_reset(
+ struct leapraid_adapter *adapter,
+ struct leapraid_card_port *new_card_port_table)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_io_unit_p0 *sas_iounit_p0 = NULL;
+ u8 port_entry_num = 0;
+ u16 sz;
+
+ sz = offsetof(struct leapraid_sas_io_unit_p0, phy_info) +
+ (adapter->dev_topo.card.phys_num *
+ sizeof(struct leapraid_sas_io_unit0_phy_info));
+ sas_iounit_p0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_p0)
+ return port_entry_num;
+
+ cfgp1.size = sz;
+ if ((leapraid_op_config_page(adapter, sas_iounit_p0, cfgp1, cfgp2,
+ GET_SAS_IOUNIT_PG0)) != 0)
+ goto out;
+
+ port_entry_num = leapraid_fill_card_port_table(adapter,
+ sas_iounit_p0,
+ new_card_port_table);
+out:
+ kfree(sas_iounit_p0);
+ return port_entry_num;
+}
+
+static void leapraid_update_existing_port(struct leapraid_adapter *adapter,
+ struct leapraid_card_port *new_table,
+ int entry_idx, int port_entry_num)
+{
+ struct leapraid_card_port *matched_card_port = NULL;
+ int matched_code;
+ int count = 0, lcount = 0;
+ u64 sas_addr;
+ int i;
+
+ matched_code = leapraid_check_card_port(adapter,
+ &new_table[entry_idx],
+ &matched_card_port,
+ &count);
+
+ if (!matched_card_port)
+ return;
+
+ if (matched_code == SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS ||
+ matched_code == SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS) {
+ leapraid_add_or_del_phys_from_existing_port(adapter,
+ matched_card_port,
+ new_table,
+ entry_idx,
+ port_entry_num);
+ } else if (matched_code == SAME_ADDR_ONLY) {
+ sas_addr = new_table[entry_idx].sas_address;
+ for (i = 0; i < port_entry_num; i++) {
+ if (new_table[i].sas_address == sas_addr)
+ lcount++;
+ }
+ if (count > 1 || lcount > 1)
+ return;
+
+ leapraid_add_or_del_phys_from_existing_port(adapter,
+ matched_card_port,
+ new_table,
+ entry_idx,
+ port_entry_num);
+ }
+
+ if (matched_card_port->port_id != new_table[entry_idx].port_id)
+ matched_card_port->port_id = new_table[entry_idx].port_id;
+
+ matched_card_port->flg &= ~LEAPRAID_CARD_PORT_FLG_DIRTY;
+ matched_card_port->phy_mask = new_table[entry_idx].phy_mask;
+}
+
+static void leapraid_update_card_port_after_reset(
+ struct leapraid_adapter *adapter)
+{
+ struct leapraid_card_port *new_card_port_table;
+ struct leapraid_card_port *matched_card_port = NULL;
+ u8 port_entry_num = 0;
+ u8 nr_phys;
+ int i;
+
+ if (leapraid_get_adapter_phys(adapter, &nr_phys) || !nr_phys)
+ return;
+
+ adapter->dev_topo.card.phys_num = nr_phys;
+ new_card_port_table = kcalloc(adapter->dev_topo.card.phys_num,
+ sizeof(struct leapraid_card_port),
+ GFP_KERNEL);
+ if (!new_card_port_table)
+ return;
+
+ port_entry_num =
+ leapraid_set_new_card_port_table_after_reset(adapter,
+ new_card_port_table);
+ if (!port_entry_num)
+ return;
+
+ list_for_each_entry(matched_card_port,
+ &adapter->dev_topo.card_port_list, list) {
+ matched_card_port->flg |= LEAPRAID_CARD_PORT_FLG_DIRTY;
+ }
+
+ matched_card_port = NULL;
+ for (i = 0; i < port_entry_num; i++)
+ leapraid_update_existing_port(adapter,
+ new_card_port_table,
+ i, port_entry_num);
+}
+
+static bool leapraid_is_valid_vphy(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_io_unit_p0 *sas_io_unit_p0,
+ int phy_index)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_phy_p0 phy_p0;
+
+ if ((sas_io_unit_p0->phy_info[phy_index].neg_link_rate >> 4) <
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5)
+ return false;
+
+ if (!(le32_to_cpu(sas_io_unit_p0->phy_info[phy_index].controller_phy_dev_info) &
+ LEAPRAID_DEVTYP_SEP))
+ return false;
+
+ cfgp1.phy_number = phy_index;
+ if (leapraid_op_config_page(adapter, &phy_p0, cfgp1, cfgp2,
+ GET_PHY_PG0))
+ return false;
+
+ if (!(le32_to_cpu(phy_p0.phy_info) & LEAPRAID_SAS_PHYINFO_VPHY))
+ return false;
+
+ return true;
+}
+
+static void leapraid_update_vphy_binding(struct leapraid_adapter *adapter,
+ struct leapraid_card_port *card_port,
+ struct leapraid_vphy *vphy,
+ int phy_index, u8 may_new_port_id,
+ u64 attached_sas_addr)
+{
+ struct leapraid_card_port *may_new_card_port;
+ struct leapraid_sas_dev *sas_dev;
+
+ may_new_card_port = leapraid_get_port_by_id(adapter,
+ may_new_port_id,
+ true);
+ if (!may_new_card_port) {
+ may_new_card_port = kzalloc(sizeof(*may_new_card_port),
+ GFP_KERNEL);
+ if (!may_new_card_port)
+ return;
+ may_new_card_port->port_id = may_new_port_id;
+ dev_err(&adapter->pdev->dev,
+ "%s: new card port %p added, port=%d\n",
+ __func__, may_new_card_port, may_new_port_id);
+ list_add_tail(&may_new_card_port->list,
+ &adapter->dev_topo.card_port_list);
+ }
+
+ if (card_port != may_new_card_port) {
+ if (!may_new_card_port->vphys_mask)
+ INIT_LIST_HEAD(&may_new_card_port->vphys_list);
+ may_new_card_port->vphys_mask |= BIT(phy_index);
+ card_port->vphys_mask &= ~BIT(phy_index);
+ list_move(&vphy->list, &may_new_card_port->vphys_list);
+
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter,
+ attached_sas_addr,
+ card_port);
+ if (sas_dev)
+ sas_dev->card_port = may_new_card_port;
+ }
+
+ if (may_new_card_port->flg & LEAPRAID_CARD_PORT_FLG_DIRTY) {
+ may_new_card_port->sas_address = 0;
+ may_new_card_port->phy_mask = 0;
+ may_new_card_port->flg &= ~LEAPRAID_CARD_PORT_FLG_DIRTY;
+ }
+ vphy->flg &= ~LEAPRAID_VPHY_FLG_DIRTY;
+}
+
+static void leapraid_update_vphys_after_reset(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_io_unit_p0 *sas_iounit_p0 = NULL;
+ struct leapraid_card_port *card_port, *card_port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+ u64 attached_sas_addr;
+ u16 sz;
+ u16 attached_hdl;
+ bool found = false;
+ u8 port_id;
+ int i;
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (!card_port->vphys_mask)
+ continue;
+
+ list_for_each_entry_safe(vphy, vphy_next,
+ &card_port->vphys_list, list) {
+ vphy->flg |= LEAPRAID_VPHY_FLG_DIRTY;
+ }
+ }
+
+ sz = offsetof(struct leapraid_sas_io_unit_p0, phy_info) +
+ (adapter->dev_topo.card.phys_num *
+ sizeof(struct leapraid_sas_io_unit0_phy_info));
+ sas_iounit_p0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_p0)
+ return;
+
+ cfgp1.size = sz;
+ if ((leapraid_op_config_page(adapter, sas_iounit_p0, cfgp1, cfgp2,
+ GET_SAS_IOUNIT_PG0)) != 0)
+ goto out;
+
+ for (i = 0; i < adapter->dev_topo.card.phys_num; i++) {
+ if (!leapraid_is_valid_vphy(adapter, sas_iounit_p0, i))
+ continue;
+
+ attached_hdl =
+ le16_to_cpu(sas_iounit_p0->phy_info[i].attached_dev_hdl);
+ if (leapraid_get_sas_address(adapter, attached_hdl,
+ &attached_sas_addr) != 0)
+ continue;
+
+ found = false;
+ card_port = NULL;
+ card_port_next = NULL;
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list,
+ list) {
+ if (!card_port->vphys_mask)
+ continue;
+
+ list_for_each_entry_safe(vphy, vphy_next,
+ &card_port->vphys_list,
+ list) {
+ if (!(vphy->flg & LEAPRAID_VPHY_FLG_DIRTY))
+ continue;
+
+ if (vphy->sas_address != attached_sas_addr)
+ continue;
+
+ if (!(vphy->phy_mask & BIT(i)))
+ vphy->phy_mask = BIT(i);
+
+ port_id = sas_iounit_p0->phy_info[i].port;
+
+ leapraid_update_vphy_binding(adapter,
+ card_port,
+ vphy,
+ i,
+ port_id,
+ attached_sas_addr);
+
+ found = true;
+ break;
+ }
+ if (found)
+ break;
+ }
+ }
+out:
+ kfree(sas_iounit_p0);
+}
+
+static void leapraid_mark_all_dev_deleted(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, adapter->shost) {
+ sdev_priv = sdev->hostdata;
+ if (sdev_priv && sdev_priv->starget_priv)
+ sdev_priv->starget_priv->deleted = true;
+ }
+}
+
+static void leapraid_free_enc_list(struct leapraid_adapter *adapter)
+{
+ struct leapraid_enc_node *enc_dev, *enc_dev_next;
+
+ list_for_each_entry_safe(enc_dev, enc_dev_next,
+ &adapter->dev_topo.enc_list,
+ list) {
+ list_del(&enc_dev->list);
+ kfree(enc_dev);
+ }
+}
+
+static void leapraid_rebuild_enc_list_after_reset(
+ struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_enc_node *enc_node;
+ u16 enc_hdl;
+ int rc;
+
+ leapraid_free_enc_list(adapter);
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (enc_hdl = 0xFFFF; ; enc_hdl = le16_to_cpu(enc_node->pg0.enc_hdl)) {
+ enc_node = kzalloc(sizeof(*enc_node),
+ GFP_KERNEL);
+ if (!enc_node)
+ return;
+
+ cfgp2.handle = enc_hdl;
+ rc = leapraid_op_config_page(adapter, &enc_node->pg0, cfgp1,
+ cfgp2, GET_SAS_ENCLOSURE_PG0);
+ if (rc) {
+ kfree(enc_node);
+ return;
+ }
+
+ list_add_tail(&enc_node->list, &adapter->dev_topo.enc_list);
+ }
+}
+
+static void leapraid_mark_resp_sas_dev(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev_p0 *sas_dev_p0)
+{
+ struct leapraid_starget_priv *starget_priv = NULL;
+ struct leapraid_enc_node *enc_node = NULL;
+ struct leapraid_card_port *card_port;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_target *starget;
+ unsigned long flags;
+
+ card_port = leapraid_get_port_by_id(adapter, sas_dev_p0->physical_port,
+ false);
+ if (sas_dev_p0->enc_hdl) {
+ enc_node = leapraid_enc_find_by_hdl(adapter,
+ le16_to_cpu(
+ sas_dev_p0->enc_hdl));
+ if (!enc_node)
+ dev_info(&adapter->pdev->dev,
+ "enc hdl 0x%04x has no matched enc dev\n",
+ le16_to_cpu(sas_dev_p0->enc_hdl));
+ }
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ list_for_each_entry(sas_dev, &adapter->dev_topo.sas_dev_list, list) {
+ if (sas_dev->sas_addr == le64_to_cpu(sas_dev_p0->sas_address) &&
+ sas_dev->slot == le16_to_cpu(sas_dev_p0->slot) &&
+ sas_dev->card_port == card_port) {
+ sas_dev->resp = true;
+ starget = sas_dev->starget;
+ if (starget && starget->hostdata) {
+ starget_priv = starget->hostdata;
+ starget_priv->tm_busy = false;
+ starget_priv->deleted = false;
+ } else {
+ starget_priv = NULL;
+ }
+
+ if (starget) {
+ starget_printk(KERN_INFO, starget,
+ "dev: hdl=0x%04x, sas addr=0x%016llx, port_id=%d\n",
+ sas_dev->hdl,
+ (unsigned long long)sas_dev->sas_addr,
+ sas_dev->card_port->port_id);
+ if (sas_dev->enc_hdl != 0)
+ starget_printk(KERN_INFO, starget,
+ "enc info: enc_lid=0x%016llx, slot=%d\n",
+ (unsigned long long)sas_dev->enc_lid,
+ sas_dev->slot);
+ }
+
+ if (le16_to_cpu(sas_dev_p0->flg) &
+ LEAPRAID_SAS_DEV_P0_FLG_ENC_LEVEL_VALID) {
+ sas_dev->enc_level = sas_dev_p0->enc_level;
+ memcpy(sas_dev->connector_name,
+ sas_dev_p0->connector_name, 4);
+ sas_dev->connector_name[4] = '\0';
+ } else {
+ sas_dev->enc_level = 0;
+ sas_dev->connector_name[0] = '\0';
+ }
+
+ sas_dev->enc_hdl =
+ le16_to_cpu(sas_dev_p0->enc_hdl);
+ if (enc_node) {
+ sas_dev->enc_lid =
+ le64_to_cpu(enc_node->pg0.enc_lid);
+ }
+ if (sas_dev->hdl == le16_to_cpu(sas_dev_p0->dev_hdl))
+ goto out;
+
+ dev_info(&adapter->pdev->dev,
+ "hdl changed: 0x%04x -> 0x%04x\n",
+ sas_dev->hdl, sas_dev_p0->dev_hdl);
+ sas_dev->hdl = le16_to_cpu(sas_dev_p0->dev_hdl);
+ if (starget_priv)
+ starget_priv->hdl =
+ le16_to_cpu(sas_dev_p0->dev_hdl);
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_search_resp_sas_dev(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_p0;
+ u32 device_info;
+
+ dev_info(&adapter->pdev->dev,
+ "begin searching for sas end devices\n");
+
+ if (list_empty(&adapter->dev_topo.sas_dev_list))
+ goto out;
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (cfgp2.handle = 0xFFFF;
+ !leapraid_op_config_page(adapter, &sas_dev_p0,
+ cfgp1, cfgp2, GET_SAS_DEVICE_PG0);
+ cfgp2.handle = le16_to_cpu(sas_dev_p0.dev_hdl)) {
+ device_info = le32_to_cpu(sas_dev_p0.dev_info);
+ if (!(leapraid_is_end_dev(device_info)))
+ continue;
+
+ leapraid_mark_resp_sas_dev(adapter, &sas_dev_p0);
+ }
+out:
+ dev_info(&adapter->pdev->dev,
+ "sas end devices searching complete\n");
+}
+
+static void leapraid_mark_resp_raid_volume(struct leapraid_adapter *adapter,
+ u64 wwid, u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_raid_volume *raid_volume;
+ struct scsi_target *starget;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ list_for_each_entry(raid_volume,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (raid_volume->wwid == wwid && raid_volume->starget) {
+ starget = raid_volume->starget;
+ if (starget && starget->hostdata) {
+ starget_priv = starget->hostdata;
+ starget_priv->deleted = false;
+ } else {
+ starget_priv = NULL;
+ }
+
+ raid_volume->resp = true;
+ spin_unlock_irqrestore(
+ &adapter->dev_topo.raid_volume_lock,
+ flags);
+
+ starget_printk(
+ KERN_INFO, raid_volume->starget,
+ "raid volume: hdl=0x%04x, wwid=0x%016llx\n",
+ hdl, (unsigned long long)raid_volume->wwid);
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ if (raid_volume->hdl == hdl) {
+ spin_unlock_irqrestore(
+ &adapter->dev_topo.raid_volume_lock,
+ flags);
+ return;
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "hdl changed: 0x%04x -> 0x%04x\n",
+ raid_volume->hdl, hdl);
+
+ raid_volume->hdl = hdl;
+ if (starget_priv)
+ starget_priv->hdl = hdl;
+ spin_unlock_irqrestore(
+ &adapter->dev_topo.raid_volume_lock,
+ flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static void leapraid_search_resp_raid_volume(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_1 cfgp1_extra = {0};
+ union cfg_param_2 cfgp2 = {0};
+ union cfg_param_2 cfgp2_extra = {0};
+ struct leapraid_raidvol_p1 raidvol_p1;
+ struct leapraid_raidvol_p0 raidvol_p0;
+ struct leapraid_raidpd_p0 raidpd_p0;
+ u16 hdl;
+ u8 phys_disk_num;
+
+ if (!adapter->adapter_attr.raid_support)
+ return;
+
+ dev_info(&adapter->pdev->dev,
+ "begin searching for raid volumes\n");
+
+ if (list_empty(&adapter->dev_topo.raid_volume_list))
+ goto out;
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &raidvol_p1, cfgp1, cfgp2,
+ GET_RAID_VOLUME_PG1);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(raidvol_p1.dev_hdl);
+ cfgp1_extra.size = sizeof(struct leapraid_raidvol_p0);
+ cfgp2_extra.handle = hdl;
+ if (leapraid_op_config_page(adapter, &raidvol_p0, cfgp1_extra,
+ cfgp2_extra, GET_RAID_VOLUME_PG0))
+ continue;
+
+ if (raidvol_p0.volume_state == LEAPRAID_VOL_STATE_OPTIMAL ||
+ raidvol_p0.volume_state == LEAPRAID_VOL_STATE_ONLINE ||
+ raidvol_p0.volume_state == LEAPRAID_VOL_STATE_DEGRADED)
+ leapraid_mark_resp_raid_volume(
+ adapter,
+ le64_to_cpu(raidvol_p1.wwid),
+ hdl);
+ }
+
+ memset(adapter->dev_topo.pd_hdls, 0, adapter->dev_topo.pd_hdls_sz);
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (phys_disk_num = 0xFF, cfgp2.form_specific = phys_disk_num;
+ !leapraid_op_config_page(adapter, &raidpd_p0, cfgp1, cfgp2,
+ GET_PHY_DISK_PG0);
+ cfgp2.form_specific = phys_disk_num) {
+ phys_disk_num = raidpd_p0.phys_disk_num;
+ hdl = le16_to_cpu(raidpd_p0.dev_hdl);
+ set_bit(hdl, (unsigned long *)adapter->dev_topo.pd_hdls);
+ }
+out:
+ dev_info(&adapter->pdev->dev,
+ "raid volumes searching complete\n");
+}
+
+static void leapraid_mark_resp_exp(struct leapraid_adapter *adapter,
+ struct leapraid_exp_p0 *exp_pg0)
+{
+ struct leapraid_enc_node *enc_node = NULL;
+ struct leapraid_topo_node *topo_node_exp;
+ u16 enc_hdl = le16_to_cpu(exp_pg0->enc_hdl);
+ u64 sas_address = le64_to_cpu(exp_pg0->sas_address);
+ u16 hdl = le16_to_cpu(exp_pg0->dev_hdl);
+ u8 port_id = exp_pg0->physical_port;
+ struct leapraid_card_port *card_port = leapraid_get_port_by_id(adapter,
+ port_id,
+ false);
+ unsigned long flags;
+ int i;
+
+ if (enc_hdl)
+ enc_node = leapraid_enc_find_by_hdl(adapter, enc_hdl);
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_for_each_entry(topo_node_exp, &adapter->dev_topo.exp_list, list) {
+ if (topo_node_exp->sas_address != sas_address ||
+ topo_node_exp->card_port != card_port)
+ continue;
+
+ topo_node_exp->resp = true;
+ if (enc_node) {
+ topo_node_exp->enc_lid =
+ le64_to_cpu(enc_node->pg0.enc_lid);
+ topo_node_exp->enc_hdl = le16_to_cpu(exp_pg0->enc_hdl);
+ }
+ if (topo_node_exp->hdl == hdl)
+ goto out;
+
+ dev_info(&adapter->pdev->dev,
+ "hdl changed: 0x%04x -> 0x%04x\n",
+ topo_node_exp->hdl, hdl);
+ topo_node_exp->hdl = hdl;
+ for (i = 0; i < topo_node_exp->phys_num; i++)
+ topo_node_exp->card_phy[i].hdl = hdl;
+ goto out;
+ }
+out:
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+}
+
+static void leapraid_search_resp_exp(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_exp_p0 exp_p0;
+ u64 sas_address;
+ u16 hdl;
+ u8 port;
+
+ dev_info(&adapter->pdev->dev,
+ "begin searching for expanders\n");
+ if (list_empty(&adapter->dev_topo.exp_list))
+ goto out;
+
+ cfgp1.form = LEAPRAID_SAS_CFG_PGAD_GET_NEXT_LOOP;
+ for (hdl = 0xFFFF, cfgp2.handle = hdl;
+ !leapraid_op_config_page(adapter, &exp_p0, cfgp1, cfgp2,
+ GET_SAS_EXPANDER_PG0);
+ cfgp2.handle = hdl) {
+ hdl = le16_to_cpu(exp_p0.dev_hdl);
+ sas_address = le64_to_cpu(exp_p0.sas_address);
+ port = exp_p0.physical_port;
+
+ dev_info(&adapter->pdev->dev,
+ "exp detected: hdl=0x%04x, sas=0x%016llx, port=%u",
+ hdl, (unsigned long long)sas_address,
+ ((adapter->adapter_attr.enable_mp) ? (port) :
+ (LEAPRAID_DISABLE_MP_PORT_ID)));
+ leapraid_mark_resp_exp(adapter, &exp_p0);
+ }
+out:
+ dev_info(&adapter->pdev->dev,
+ "expander searching complete\n");
+}
+
+void leapraid_wait_cmds_done(struct leapraid_adapter *adapter)
+{
+ struct leapraid_io_req_tracker *io_req_tracker;
+ unsigned long flags;
+ u16 i;
+
+ adapter->reset_desc.pending_io_cnt = 0;
+ if (!leapraid_pci_active(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s %s: pci error, device reset or unplugged!\n",
+ adapter->adapter_attr.name, __func__);
+ return;
+ }
+
+ if (leapraid_get_adapter_state(adapter) != LEAPRAID_DB_OPERATIONAL)
+ return;
+
+ spin_lock_irqsave(&adapter->dynamic_task_desc.task_lock, flags);
+ for (i = 1; i <= adapter->shost->can_queue; i++) {
+ io_req_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ i);
+ if (io_req_tracker && io_req_tracker->taskid != 0)
+ if (io_req_tracker->scmd)
+ adapter->reset_desc.pending_io_cnt++;
+ }
+ spin_unlock_irqrestore(&adapter->dynamic_task_desc.task_lock, flags);
+
+ if (!adapter->reset_desc.pending_io_cnt)
+ return;
+
+ wait_event_timeout(adapter->reset_desc.reset_wait_queue,
+ adapter->reset_desc.pending_io_cnt == 0, 10 * HZ);
+}
+
+int leapraid_hard_reset_handler(struct leapraid_adapter *adapter,
+ enum reset_type type)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!mutex_trylock(&adapter->reset_desc.adapter_reset_mutex)) {
+ do {
+ ssleep(1);
+ } while (adapter->access_ctrl.shost_recovering);
+ return adapter->reset_desc.adapter_reset_results;
+ }
+
+ if (!leapraid_pci_active(adapter)) {
+ if (leapraid_pci_removed(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "pci_dev removed, pausing polling and cleaning cmds\n");
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+ leapraid_mq_polling_resume(adapter);
+ }
+ rc = 0;
+ goto exit_pci_unavailable;
+ }
+
+ dev_info(&adapter->pdev->dev, "starting hard reset\n");
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ adapter->access_ctrl.shost_recovering = true;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+
+ leapraid_wait_cmds_done(adapter);
+ leapraid_mask_int(adapter);
+ leapraid_mq_polling_pause(adapter);
+ rc = leapraid_make_adapter_ready(adapter, type);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to make adapter ready, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = leapraid_fw_log_init(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "firmware log init failed\n");
+ goto out;
+ }
+
+ leapraid_clean_active_cmds(adapter);
+ if (adapter->scan_dev_desc.driver_loading &&
+ adapter->scan_dev_desc.scan_dev_failed) {
+ dev_err(&adapter->pdev->dev,
+ "Previous device scan failed or driver loading\n");
+ adapter->access_ctrl.host_removing = true;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = leapraid_make_adapter_available(adapter);
+ if (!rc) {
+ dev_info(&adapter->pdev->dev,
+ "adapter is now available, rebuilding topology\n");
+ if (adapter->adapter_attr.enable_mp) {
+ leapraid_update_card_port_after_reset(adapter);
+ leapraid_update_vphys_after_reset(adapter);
+ }
+ leapraid_mark_all_dev_deleted(adapter);
+ leapraid_rebuild_enc_list_after_reset(adapter);
+ leapraid_search_resp_sas_dev(adapter);
+ leapraid_search_resp_raid_volume(adapter);
+ leapraid_search_resp_exp(adapter);
+ leapraid_hardreset_barrier(adapter);
+ }
+out:
+ dev_info(&adapter->pdev->dev, "hard reset %s\n",
+ ((rc == 0) ? "SUCCESS" : "FAILED"));
+
+ spin_lock_irqsave(&adapter->reset_desc.adapter_reset_lock, flags);
+ adapter->reset_desc.adapter_reset_results = rc;
+ adapter->access_ctrl.shost_recovering = false;
+ spin_unlock_irqrestore(&adapter->reset_desc.adapter_reset_lock, flags);
+ adapter->reset_desc.reset_cnt++;
+ mutex_unlock(&adapter->reset_desc.adapter_reset_mutex);
+
+ if (rc)
+ leapraid_clean_active_scsi_cmds(adapter);
+ leapraid_mq_polling_resume(adapter);
+
+exit_pci_unavailable:
+ dev_info(&adapter->pdev->dev, "pcie unavailable!\n");
+ return rc;
+}
+
+static int leapraid_get_adapter_features(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_features_req leap_mpi_req;
+ struct leapraid_adapter_features_rep leap_mpi_rep;
+ u8 fw_major, fw_minor, fw_build, fw_release;
+ u32 db;
+ int r;
+
+ db = leapraid_readl(&adapter->iomem_base->db);
+ if (db & LEAPRAID_DB_USED ||
+ (db & LEAPRAID_DB_MASK) == LEAPRAID_DB_FAULT)
+ return -EFAULT;
+
+ if (((db & LEAPRAID_DB_MASK) != LEAPRAID_DB_READY) &&
+ ((db & LEAPRAID_DB_MASK) != LEAPRAID_DB_OPERATIONAL)) {
+ if (!leapraid_wait_adapter_ready(adapter))
+ return -EFAULT;
+ }
+
+ memset(&leap_mpi_req, 0, sizeof(struct leapraid_adapter_features_req));
+ memset(&leap_mpi_rep, 0, sizeof(struct leapraid_adapter_features_rep));
+ leap_mpi_req.func = LEAPRAID_FUNC_GET_ADAPTER_FEATURES;
+ r = leapraid_handshake_func(adapter,
+ sizeof(struct leapraid_adapter_features_req),
+ (u32 *)&leap_mpi_req,
+ sizeof(struct leapraid_adapter_features_rep),
+ (u16 *)&leap_mpi_rep);
+ if (r) {
+ dev_err(&adapter->pdev->dev,
+ "%s %s: handshake failed, r=%d\n",
+ adapter->adapter_attr.name, __func__, r);
+ return r;
+ }
+
+ memset(&adapter->adapter_attr.features, 0,
+ sizeof(struct leapraid_adapter_features));
+ adapter->adapter_attr.features.req_slot =
+ le16_to_cpu(leap_mpi_rep.req_slot);
+ adapter->adapter_attr.features.hp_slot =
+ le16_to_cpu(leap_mpi_rep.hp_slot);
+ adapter->adapter_attr.features.adapter_caps =
+ le32_to_cpu(leap_mpi_rep.adapter_caps);
+ adapter->adapter_attr.features.max_volumes =
+ leap_mpi_rep.max_volumes;
+ if (!adapter->adapter_attr.features.max_volumes)
+ adapter->adapter_attr.features.max_volumes =
+ LEAPRAID_MAX_VOLUMES_DEFAULT;
+ adapter->adapter_attr.features.max_dev_handle =
+ le16_to_cpu(leap_mpi_rep.max_dev_hdl);
+ if (!adapter->adapter_attr.features.max_dev_handle)
+ adapter->adapter_attr.features.max_dev_handle =
+ LEAPRAID_MAX_DEV_HANDLE_DEFAULT;
+ adapter->adapter_attr.features.min_dev_handle =
+ le16_to_cpu(leap_mpi_rep.min_dev_hdl);
+ if ((adapter->adapter_attr.features.adapter_caps &
+ LEAPRAID_ADAPTER_FEATURES_CAP_INTEGRATED_RAID))
+ adapter->adapter_attr.raid_support = true;
+ if (WARN_ON(!(adapter->adapter_attr.features.adapter_caps &
+ LEAPRAID_ADAPTER_FEATURES_CAP_ATOMIC_REQ)))
+ return -EFAULT;
+ adapter->adapter_attr.features.fw_version =
+ le32_to_cpu(leap_mpi_rep.fw_version);
+
+ fw_major = (adapter->adapter_attr.features.fw_version >> 24) & 0xFF;
+ fw_minor = (adapter->adapter_attr.features.fw_version >> 16) & 0xFF;
+ fw_build = (adapter->adapter_attr.features.fw_version >> 8) & 0xFF;
+ fw_release = adapter->adapter_attr.features.fw_version & 0xFF;
+
+ dev_info(&adapter->pdev->dev,
+ "Firmware version: %u.%u.%u.%u (0x%08x)\n",
+ fw_major, fw_minor, fw_build, fw_release,
+ adapter->adapter_attr.features.fw_version);
+
+ if (fw_major < 2) {
+ dev_err(&adapter->pdev->dev,
+ "Unsupported firmware major version, requires >= 2\n");
+ return -EFAULT;
+ }
+ adapter->shost->max_id = -1;
+
+ return 0;
+}
+
+static inline void leapraid_disable_pcie(struct leapraid_adapter *adapter)
+{
+ mutex_lock(&adapter->access_ctrl.pci_access_lock);
+ if (adapter->iomem_base) {
+ iounmap(adapter->iomem_base);
+ adapter->iomem_base = NULL;
+ }
+ if (pci_is_enabled(adapter->pdev)) {
+ pci_disable_pcie_error_reporting(adapter->pdev);
+ pci_release_regions(adapter->pdev);
+ pci_disable_device(adapter->pdev);
+ }
+ mutex_unlock(&adapter->access_ctrl.pci_access_lock);
+}
+
+static int leapraid_enable_pcie(struct leapraid_adapter *adapter)
+{
+ u64 dma_mask;
+ int rc;
+
+ rc = pci_enable_device(adapter->pdev);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "failed to enable PCI device\n");
+ return rc;
+ }
+
+ rc = pci_request_regions(adapter->pdev, LEAPRAID_DRIVER_NAME);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to obtain PCI resources\n");
+ goto disable_pcie;
+ }
+
+ if (sizeof(dma_addr_t) > 4) {
+ dma_mask = DMA_BIT_MASK(64);
+ adapter->adapter_attr.use_32_dma_mask = false;
+ } else {
+ dma_mask = DMA_BIT_MASK(32);
+ adapter->adapter_attr.use_32_dma_mask = true;
+ }
+
+ rc = dma_set_mask_and_coherent(&adapter->pdev->dev, dma_mask);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to set %lld DMA mask\n", dma_mask);
+ goto disable_pcie;
+ }
+ adapter->iomem_base = ioremap(pci_resource_start(adapter->pdev, 0),
+ sizeof(struct leapraid_reg_base));
+ if (!adapter->iomem_base) {
+ dev_err(&adapter->pdev->dev,
+ "failed to map memory for controller registers\n");
+ rc = -ENOMEM;
+ goto disable_pcie;
+ }
+
+ pci_enable_pcie_error_reporting(adapter->pdev);
+ pci_set_master(adapter->pdev);
+
+ return 0;
+
+disable_pcie:
+ return rc;
+}
+
+static void leapraid_cpus_on_irq(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ unsigned int i, base_group, this_group;
+ unsigned int cpu, nr_cpus, total_msix, index = 0;
+
+ total_msix = adapter->notification_desc.iopoll_qdex;
+ nr_cpus = num_online_cpus();
+
+ if (!nr_cpus || !total_msix)
+ return;
+ base_group = nr_cpus / total_msix;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (index = 0; index < adapter->notification_desc.iopoll_qdex;
+ index++) {
+ int_rq = &adapter->notification_desc.int_rqs[index];
+
+ if (cpu >= nr_cpus)
+ break;
+
+ this_group = base_group +
+ (index < (nr_cpus % total_msix) ? 1 : 0);
+
+ for (i = 0 ; i < this_group ; i++) {
+ adapter->notification_desc.msix_cpu_map[cpu] =
+ int_rq->rq.msix_idx;
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ }
+}
+
+static void leapraid_map_msix_to_cpu(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ const cpumask_t *affinity_mask;
+ u32 i;
+ u16 cpu;
+
+ if (!adapter->adapter_attr.rq_cnt)
+ return;
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ affinity_mask = pci_irq_get_affinity(adapter->pdev,
+ int_rq->rq.msix_idx);
+ if (!affinity_mask)
+ goto out;
+
+ for_each_cpu_and(cpu, affinity_mask, cpu_online_mask) {
+ if (cpu >= adapter->notification_desc.msix_cpu_map_sz)
+ break;
+
+ adapter->notification_desc.msix_cpu_map[cpu] =
+ int_rq->rq.msix_idx;
+ }
+ }
+out:
+ leapraid_cpus_on_irq(adapter);
+}
+
+static void leapraid_configure_reply_queue_affinity(
+ struct leapraid_adapter *adapter)
+{
+ if (!adapter || !adapter->notification_desc.msix_enable)
+ return;
+
+ leapraid_map_msix_to_cpu(adapter);
+}
+
+static void leapraid_free_irq(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ unsigned int i;
+
+ if (!adapter->notification_desc.int_rqs)
+ return;
+
+ for (i = 0; i < adapter->notification_desc.int_rqs_allocated; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ if (!int_rq)
+ continue;
+
+ irq_set_affinity_hint(pci_irq_vector(adapter->pdev,
+ int_rq->rq.msix_idx), NULL);
+ free_irq(pci_irq_vector(adapter->pdev, int_rq->rq.msix_idx),
+ &int_rq->rq);
+ }
+ adapter->notification_desc.int_rqs_allocated = 0;
+
+ if (!adapter->notification_desc.msix_enable)
+ return;
+
+ pci_free_irq_vectors(adapter->pdev);
+ adapter->notification_desc.msix_enable = false;
+
+ kfree(adapter->notification_desc.blk_mq_poll_rqs);
+ adapter->notification_desc.blk_mq_poll_rqs = NULL;
+
+ kfree(adapter->notification_desc.int_rqs);
+ adapter->notification_desc.int_rqs = NULL;
+
+ kfree(adapter->notification_desc.msix_cpu_map);
+ adapter->notification_desc.msix_cpu_map = NULL;
+}
+
+static inline int leapraid_msix_cnt(struct pci_dev *pdev)
+{
+ return pci_msix_vec_count(pdev);
+}
+
+static inline int leapraid_msi_cnt(struct pci_dev *pdev)
+{
+ return pci_msi_vec_count(pdev);
+}
+
+static int leapraid_setup_irqs(struct leapraid_adapter *adapter)
+{
+ unsigned int i;
+ int rc = 0;
+
+ if (interrupt_mode == 0) {
+ rc = pci_alloc_irq_vectors_affinity(
+ adapter->pdev,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qdex,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, NULL);
+
+ if (rc < 0) {
+ dev_err(&adapter->pdev->dev,
+ "%d msi/msix vectors alloacted failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ adapter->notification_desc.int_rqs[i].rq.adapter = adapter;
+ adapter->notification_desc.int_rqs[i].rq.msix_idx = i;
+ atomic_set(&adapter->notification_desc.int_rqs[i].rq.busy, 0);
+ if (interrupt_mode == 0)
+ snprintf(adapter->notification_desc.int_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH, "%s%u-MSIx%u",
+ LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+ else if (interrupt_mode == 1)
+ snprintf(adapter->notification_desc.int_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH, "%s%u-MSI%u",
+ LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+
+ rc = request_irq(pci_irq_vector(adapter->pdev, i),
+ leapraid_irq_handler,
+ IRQF_SHARED,
+ adapter->notification_desc.int_rqs[i].rq.name,
+ &adapter->notification_desc.int_rqs[i].rq);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "MSI/MSIx: request_irq %s failed!\n",
+ adapter->notification_desc.int_rqs[i].rq.name);
+ return rc;
+ }
+ adapter->notification_desc.int_rqs_allocated++;
+ }
+
+ return 0;
+}
+
+static int leapraid_setup_legacy_int(struct leapraid_adapter *adapter)
+{
+ int rc;
+
+ adapter->notification_desc.int_rqs[0].rq.adapter = adapter;
+ adapter->notification_desc.int_rqs[0].rq.msix_idx = 0;
+ atomic_set(&adapter->notification_desc.int_rqs[0].rq.busy, 0);
+ snprintf(adapter->notification_desc.int_rqs[0].rq.name,
+ LEAPRAID_NAME_LENGTH, "%s%d-LegacyInt",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+
+ rc = pci_alloc_irq_vectors_affinity(
+ adapter->pdev,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qdex,
+ PCI_IRQ_LEGACY | PCI_IRQ_AFFINITY,
+ NULL);
+ if (rc < 0) {
+ dev_err(&adapter->pdev->dev,
+ "legacy irq alloacted failed!\n");
+ return rc;
+ }
+
+ rc = request_irq(pci_irq_vector(adapter->pdev, 0),
+ leapraid_irq_handler,
+ IRQF_SHARED,
+ adapter->notification_desc.int_rqs[0].rq.name,
+ &adapter->notification_desc.int_rqs[0].rq);
+ if (rc) {
+ irq_set_affinity_hint(pci_irq_vector(adapter->pdev, 0), NULL);
+ pci_free_irq_vectors(adapter->pdev);
+ dev_err(&adapter->pdev->dev,
+ "Legact Int: request_irq %s failed!\n",
+ adapter->notification_desc.int_rqs[0].rq.name);
+ return -EBUSY;
+ }
+ adapter->notification_desc.int_rqs_allocated = 1;
+ return rc;
+}
+
+static int leapraid_set_legacy_int(struct leapraid_adapter *adapter)
+{
+ int rc;
+
+ adapter->notification_desc.msix_cpu_map_sz = num_online_cpus();
+ adapter->notification_desc.msix_cpu_map =
+ kzalloc(adapter->notification_desc.msix_cpu_map_sz,
+ GFP_KERNEL);
+ if (!adapter->notification_desc.msix_cpu_map)
+ return -ENOMEM;
+
+ adapter->adapter_attr.rq_cnt = 1;
+ adapter->notification_desc.iopoll_qdex =
+ adapter->adapter_attr.rq_cnt;
+ adapter->notification_desc.iopoll_qcnt = 0;
+ dev_info(&adapter->pdev->dev,
+ "Legacy Intr: req queue cnt=%d, intr=%d/poll=%d rep queues!\n",
+ adapter->adapter_attr.rq_cnt,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qcnt);
+ adapter->notification_desc.int_rqs =
+ kcalloc(adapter->notification_desc.iopoll_qdex,
+ sizeof(struct leapraid_int_rq), GFP_KERNEL);
+ if (!adapter->notification_desc.int_rqs) {
+ dev_err(&adapter->pdev->dev,
+ "Legacy Intr: allocate %d intr rep queues failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return -ENOMEM;
+ }
+
+ rc = leapraid_setup_legacy_int(adapter);
+
+ return rc;
+}
+
+static int leapraid_set_msix(struct leapraid_adapter *adapter)
+{
+ int iopoll_qcnt = 0;
+ unsigned int i;
+ int rc, msix_cnt;
+
+ if (msix_disable == 1)
+ goto legacy_int;
+
+ msix_cnt = leapraid_msix_cnt(adapter->pdev);
+ if (msix_cnt <= 0) {
+ dev_info(&adapter->pdev->dev, "msix unsupported!\n");
+ goto legacy_int;
+ }
+
+ if (reset_devices)
+ adapter->adapter_attr.rq_cnt = 1;
+ else
+ adapter->adapter_attr.rq_cnt = min_t(int,
+ num_online_cpus(),
+ msix_cnt);
+
+ if (max_msix_vectors > 0)
+ adapter->adapter_attr.rq_cnt = min_t(
+ int, max_msix_vectors, adapter->adapter_attr.rq_cnt);
+
+ if (iopoll_qcnt) {
+ adapter->notification_desc.blk_mq_poll_rqs =
+ kcalloc(iopoll_qcnt,
+ sizeof(struct leapraid_blk_mq_poll_rq),
+ GFP_KERNEL);
+ if (!adapter->notification_desc.blk_mq_poll_rqs)
+ return -ENOMEM;
+ adapter->adapter_attr.rq_cnt =
+ min(adapter->adapter_attr.rq_cnt + iopoll_qcnt,
+ msix_cnt);
+ }
+
+ adapter->notification_desc.iopoll_qdex =
+ adapter->adapter_attr.rq_cnt - iopoll_qcnt;
+
+ adapter->notification_desc.iopoll_qcnt = iopoll_qcnt;
+ dev_info(&adapter->pdev->dev,
+ "MSIx: req queue cnt=%d, intr=%d/poll=%d rep queues!\n",
+ adapter->adapter_attr.rq_cnt,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qcnt);
+
+ adapter->notification_desc.int_rqs =
+ kcalloc(adapter->notification_desc.iopoll_qdex,
+ sizeof(struct leapraid_int_rq), GFP_KERNEL);
+ if (!adapter->notification_desc.int_rqs) {
+ dev_err(&adapter->pdev->dev,
+ "MSIx: allocate %d interrupt reply queues failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.adapter =
+ adapter;
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.msix_idx =
+ i + adapter->notification_desc.iopoll_qdex;
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[i].rq.busy, 0);
+ snprintf(adapter->notification_desc.blk_mq_poll_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH,
+ "%s%u-MQ-Poll%u", LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[i].busy, 0);
+ atomic_set(&adapter->notification_desc.blk_mq_poll_rqs[i].pause, 0);
+ }
+
+ adapter->notification_desc.msix_cpu_map_sz =
+ num_online_cpus();
+ adapter->notification_desc.msix_cpu_map =
+ kzalloc(adapter->notification_desc.msix_cpu_map_sz,
+ GFP_KERNEL);
+ if (!adapter->notification_desc.msix_cpu_map)
+ return -ENOMEM;
+ memset(adapter->notification_desc.msix_cpu_map, 0,
+ adapter->notification_desc.msix_cpu_map_sz);
+
+ adapter->notification_desc.msix_enable = true;
+ rc = leapraid_setup_irqs(adapter);
+ if (rc) {
+ leapraid_free_irq(adapter);
+ adapter->notification_desc.msix_enable = false;
+ goto legacy_int;
+ }
+
+ return 0;
+
+legacy_int:
+ rc = leapraid_set_legacy_int(adapter);
+
+ return rc;
+}
+
+static int leapraid_set_msi(struct leapraid_adapter *adapter)
+{
+ int iopoll_qcnt = 0;
+ unsigned int i;
+ int rc, msi_cnt;
+
+ if (msix_disable == 1)
+ goto legacy_int1;
+
+ msi_cnt = leapraid_msi_cnt(adapter->pdev);
+ if (msi_cnt <= 0) {
+ dev_info(&adapter->pdev->dev, "msix unsupported!\n");
+ goto legacy_int1;
+ }
+
+ if (reset_devices)
+ adapter->adapter_attr.rq_cnt = 1;
+ else
+ adapter->adapter_attr.rq_cnt = min_t(int,
+ num_online_cpus(),
+ msi_cnt);
+
+ if (max_msix_vectors > 0)
+ adapter->adapter_attr.rq_cnt = min_t(
+ int, max_msix_vectors, adapter->adapter_attr.rq_cnt);
+
+
+ if (iopoll_qcnt) {
+ adapter->notification_desc.blk_mq_poll_rqs =
+ kcalloc(iopoll_qcnt,
+ sizeof(struct leapraid_blk_mq_poll_rq),
+ GFP_KERNEL);
+ if (!adapter->notification_desc.blk_mq_poll_rqs)
+ return -ENOMEM;
+
+ adapter->adapter_attr.rq_cnt =
+ min(adapter->adapter_attr.rq_cnt + iopoll_qcnt,
+ msi_cnt);
+ }
+
+ adapter->notification_desc.iopoll_qdex =
+ adapter->adapter_attr.rq_cnt - iopoll_qcnt;
+ rc = pci_alloc_irq_vectors_affinity(
+ adapter->pdev,
+ 1,
+ adapter->notification_desc.iopoll_qdex,
+ PCI_IRQ_MSI | PCI_IRQ_AFFINITY, NULL);
+ if (rc < 0) {
+ dev_err(&adapter->pdev->dev,
+ "%d msi vectors alloacted failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ goto legacy_int1;
+ }
+ if (rc != adapter->notification_desc.iopoll_qdex) {
+ adapter->notification_desc.iopoll_qdex = rc;
+ adapter->adapter_attr.rq_cnt =
+ adapter->notification_desc.iopoll_qdex + iopoll_qcnt;
+ }
+ adapter->notification_desc.iopoll_qcnt = iopoll_qcnt;
+ dev_info(&adapter->pdev->dev,
+ "MSI: req queue cnt=%d, intr=%d/poll=%d rep queues!\n",
+ adapter->adapter_attr.rq_cnt,
+ adapter->notification_desc.iopoll_qdex,
+ adapter->notification_desc.iopoll_qcnt);
+
+ adapter->notification_desc.int_rqs =
+ kcalloc(adapter->notification_desc.iopoll_qdex,
+ sizeof(struct leapraid_int_rq),
+ GFP_KERNEL);
+ if (!adapter->notification_desc.int_rqs) {
+ dev_err(&adapter->pdev->dev,
+ "MSI: allocate %d interrupt reply queues failed!\n",
+ adapter->notification_desc.iopoll_qdex);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.adapter =
+ adapter;
+ adapter->notification_desc.blk_mq_poll_rqs[i].rq.msix_idx =
+ i + adapter->notification_desc.iopoll_qdex;
+ atomic_set(
+ &adapter->notification_desc.blk_mq_poll_rqs[i].rq.busy,
+ 0);
+ snprintf(adapter->notification_desc.blk_mq_poll_rqs[i].rq.name,
+ LEAPRAID_NAME_LENGTH,
+ "%s%u-MQ-Poll%u", LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id, i);
+ atomic_set(
+ &adapter->notification_desc.blk_mq_poll_rqs[i].busy,
+ 0);
+ atomic_set(
+ &adapter->notification_desc.blk_mq_poll_rqs[i].pause,
+ 0);
+ }
+
+ adapter->notification_desc.msix_cpu_map_sz = num_online_cpus();
+ adapter->notification_desc.msix_cpu_map =
+ kzalloc(adapter->notification_desc.msix_cpu_map_sz,
+ GFP_KERNEL);
+ if (!adapter->notification_desc.msix_cpu_map)
+ return -ENOMEM;
+ memset(adapter->notification_desc.msix_cpu_map, 0,
+ adapter->notification_desc.msix_cpu_map_sz);
+
+ adapter->notification_desc.msix_enable = true;
+ rc = leapraid_setup_irqs(adapter);
+ if (rc) {
+ leapraid_free_irq(adapter);
+ adapter->notification_desc.msix_enable = false;
+ goto legacy_int1;
+ }
+
+ return 0;
+
+legacy_int1:
+ rc = leapraid_set_legacy_int(adapter);
+
+ return rc;
+}
+
+static int leapraid_set_notification(struct leapraid_adapter *adapter)
+{
+ int rc = 0;
+
+ if (interrupt_mode == 0) {
+ rc = leapraid_set_msix(adapter);
+ if (rc)
+ pr_err("%s enable MSI-X irq failed!\n", __func__);
+ } else if (interrupt_mode == 1) {
+ rc = leapraid_set_msi(adapter);
+ if (rc)
+ pr_err("%s enable MSI irq failed!\n", __func__);
+ } else if (interrupt_mode == 2) {
+ rc = leapraid_set_legacy_int(adapter);
+ if (rc)
+ pr_err("%s enable legacy irq failed!\n", __func__);
+ }
+
+ return rc;
+}
+
+static void leapraid_disable_pcie_and_notification(
+ struct leapraid_adapter *adapter)
+{
+ leapraid_free_irq(adapter);
+ leapraid_disable_pcie(adapter);
+}
+
+int leapraid_set_pcie_and_notification(struct leapraid_adapter *adapter)
+{
+ int rc;
+
+ rc = leapraid_enable_pcie(adapter);
+ if (rc)
+ goto out_fail;
+
+ leapraid_mask_int(adapter);
+
+ rc = leapraid_set_notification(adapter);
+ if (rc)
+ goto out_fail;
+
+ pci_save_state(adapter->pdev);
+
+ return 0;
+
+out_fail:
+ leapraid_disable_pcie_and_notification(adapter);
+ return rc;
+}
+
+void leapraid_disable_controller(struct leapraid_adapter *adapter)
+{
+ if (!adapter->iomem_base)
+ return;
+
+ leapraid_mask_int(adapter);
+
+ adapter->access_ctrl.shost_recovering = true;
+ leapraid_make_adapter_ready(adapter, PART_RESET);
+ adapter->access_ctrl.shost_recovering = false;
+
+ leapraid_disable_pcie_and_notification(adapter);
+}
+
+static int leapraid_adapter_unit_reset(struct leapraid_adapter *adapter)
+{
+ int rc = 0;
+
+ dev_info(&adapter->pdev->dev, "fire unit reset\n");
+ writel(LEAPRAID_FUNC_ADAPTER_UNIT_RESET << LEAPRAID_DB_FUNC_SHIFT,
+ &adapter->iomem_base->db);
+ if (leapraid_db_wait_ack_and_clear_int(adapter))
+ rc = -EFAULT;
+
+ if (!leapraid_wait_adapter_ready(adapter)) {
+ rc = -EFAULT;
+ goto out;
+ }
+out:
+ dev_info(&adapter->pdev->dev, "unit reset: %s\n",
+ ((rc == 0) ? "SUCCESS" : "FAILED"));
+ return rc;
+}
+
+static int leapraid_make_adapter_ready(struct leapraid_adapter *adapter,
+ enum reset_type type)
+{
+ u32 db;
+ int rc;
+ int count;
+
+ if (!leapraid_pci_active(adapter))
+ return 0;
+
+ count = 0;
+ db = leapraid_readl(&adapter->iomem_base->db);
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_RESET) {
+ while ((db & LEAPRAID_DB_MASK) != LEAPRAID_DB_READY) {
+ if (count++ == LEAPRAID_DB_RETRY_COUNT_MAX) {
+ dev_err(&adapter->pdev->dev,
+ "wait adapter ready timeout\n");
+ return -EFAULT;
+ }
+ ssleep(1);
+ db = leapraid_readl(&adapter->iomem_base->db);
+ dev_info(&adapter->pdev->dev,
+ "wait adapter ready, count=%d, db=0x%x\n",
+ count, db);
+ }
+ }
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_READY)
+ return 0;
+
+ if (db & LEAPRAID_DB_USED)
+ goto full_reset;
+
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_FAULT)
+ goto full_reset;
+
+ if (type == FULL_RESET)
+ goto full_reset;
+
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_OPERATIONAL)
+ if (!(leapraid_adapter_unit_reset(adapter)))
+ return 0;
+
+full_reset:
+ rc = leapraid_host_diag_reset(adapter);
+ return rc;
+}
+
+static void leapraid_fw_log_exit(struct leapraid_adapter *adapter)
+{
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return;
+
+ if (adapter->fw_log_desc.fw_log_buffer) {
+ dma_free_coherent(&adapter->pdev->dev,
+ (LEAPRAID_SYS_LOG_BUF_SIZE +
+ LEAPRAID_SYS_LOG_BUF_RESERVE),
+ adapter->fw_log_desc.fw_log_buffer,
+ adapter->fw_log_desc.fw_log_buffer_dma);
+ adapter->fw_log_desc.fw_log_buffer = NULL;
+ }
+}
+
+static int leapraid_fw_log_init(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_log_req adapter_log_req;
+ struct leapraid_adapter_log_rep adapter_log_rep;
+ u16 adapter_status;
+ u64 buf_addr;
+ u32 rc;
+
+ if (!adapter->fw_log_desc.open_pcie_trace)
+ return 0;
+
+ if (!adapter->fw_log_desc.fw_log_buffer) {
+ adapter->fw_log_desc.fw_log_buffer =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ (LEAPRAID_SYS_LOG_BUF_SIZE +
+ LEAPRAID_SYS_LOG_BUF_RESERVE),
+ &adapter->fw_log_desc.fw_log_buffer_dma,
+ GFP_KERNEL);
+ if (!adapter->fw_log_desc.fw_log_buffer) {
+ dev_err(&adapter->pdev->dev,
+ "%s: log buf alloc failed.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ memset(&adapter_log_req, 0, sizeof(struct leapraid_adapter_log_req));
+ adapter_log_req.func = LEAPRAID_FUNC_LOGBUF_INIT;
+ buf_addr = adapter->fw_log_desc.fw_log_buffer_dma;
+
+ adapter_log_req.mbox.w[0] =
+ cpu_to_le32((u32)(buf_addr & 0xFFFFFFFF));
+ adapter_log_req.mbox.w[1] =
+ cpu_to_le32((u32)((buf_addr >> 32) & 0xFFFFFFFF));
+ adapter_log_req.mbox.w[2] =
+ cpu_to_le32(LEAPRAID_SYS_LOG_BUF_SIZE);
+ rc = leapraid_handshake_func(adapter,
+ sizeof(struct leapraid_adapter_log_req),
+ (u32 *)&adapter_log_req,
+ sizeof(struct leapraid_adapter_log_rep),
+ (u16 *)&adapter_log_rep);
+ if (rc != 0) {
+ dev_err(&adapter->pdev->dev, "%s: handshake failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ adapter_status = le16_to_cpu(adapter_log_rep.adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "%s: failed!\n", __func__);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static void leapraid_free_host_memory(struct leapraid_adapter *adapter)
+{
+ unsigned int i;
+
+ if (adapter->mem_desc.task_desc) {
+ dma_free_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.task_desc_dma_size,
+ adapter->mem_desc.task_desc,
+ adapter->mem_desc.task_desc_dma);
+ adapter->mem_desc.task_desc = NULL;
+ }
+
+ if (adapter->mem_desc.sense_data) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.io_qd * SCSI_SENSE_BUFFERSIZE,
+ adapter->mem_desc.sense_data,
+ adapter->mem_desc.sense_data_dma);
+ adapter->mem_desc.sense_data = NULL;
+ }
+
+ if (adapter->mem_desc.rep_msg) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd * LEAPRAID_REPLY_SIEZ,
+ adapter->mem_desc.rep_msg,
+ adapter->mem_desc.rep_msg_dma);
+ adapter->mem_desc.rep_msg = NULL;
+ }
+
+ if (adapter->mem_desc.rep_msg_addr) {
+ dma_free_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REP_MSG_ADDR_SIZE,
+ adapter->mem_desc.rep_msg_addr,
+ adapter->mem_desc.rep_msg_addr_dma);
+ adapter->mem_desc.rep_msg_addr = NULL;
+ }
+
+ if (adapter->mem_desc.rep_desc_seg_maint) {
+ for (i = 0; i < adapter->adapter_attr.rep_desc_q_seg_cnt;
+ i++) {
+ if (adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE) *
+ LEAPRAID_REP_DESC_CHUNK_SIZE,
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg,
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg_dma);
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg = NULL;
+ }
+ }
+
+ if (adapter->mem_desc.rep_desc_q_arr) {
+ dma_free_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.rq_cnt *
+ LEAPRAID_REP_RQ_CNT_SIZE,
+ adapter->mem_desc.rep_desc_q_arr,
+ adapter->mem_desc.rep_desc_q_arr_dma);
+ adapter->mem_desc.rep_desc_q_arr = NULL;
+ }
+
+ for (i = 0; i < adapter->adapter_attr.rep_desc_q_seg_cnt; i++)
+ kfree(adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_maint);
+ kfree(adapter->mem_desc.rep_desc_seg_maint);
+ }
+
+ if (adapter->mem_desc.io_tracker) {
+ for (i = 0; i < (unsigned int)adapter->shost->can_queue; i++)
+ leapraid_internal_exit_cmd_priv(
+ adapter,
+ adapter->mem_desc.io_tracker + i);
+ kfree(adapter->mem_desc.io_tracker);
+ adapter->mem_desc.io_tracker = NULL;
+ }
+
+ dma_pool_destroy(adapter->mem_desc.sg_chain_pool);
+}
+
+static inline bool leapraid_is_in_same_4g_seg(dma_addr_t start, u32 size)
+{
+ return (upper_32_bits(start) == upper_32_bits(start + size - 1));
+}
+
+int leapraid_internal_init_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker)
+{
+ io_tracker->chain =
+ dma_pool_alloc(adapter->mem_desc.sg_chain_pool,
+ GFP_KERNEL,
+ &io_tracker->chain_dma);
+
+ if (!io_tracker->chain)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int leapraid_internal_exit_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker)
+{
+ if (io_tracker && io_tracker->chain)
+ dma_pool_free(adapter->mem_desc.sg_chain_pool,
+ io_tracker->chain,
+ io_tracker->chain_dma);
+
+ return 0;
+}
+
+static int leapraid_request_host_memory(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_features *facts =
+ &adapter->adapter_attr.features;
+ u16 rep_desc_q_cnt_allocated;
+ unsigned int i, j;
+ int rc;
+
+ /* sg table size */
+ adapter->shost->sg_tablesize = LEAPRAID_SG_DEPTH;
+ if (reset_devices)
+ adapter->shost->sg_tablesize =
+ LEAPRAID_KDUMP_MIN_PHYS_SEGMENTS;
+ /* high priority cmds queue depth */
+ adapter->dynamic_task_desc.hp_cmd_qd = facts->hp_slot;
+ adapter->dynamic_task_desc.hp_cmd_qd = LEAPRAID_FIXED_HP_CMDS;
+ /* internal cmds queue depth */
+ adapter->dynamic_task_desc.inter_cmd_qd = LEAPRAID_FIXED_INTER_CMDS;
+ /* adapter cmds total queue depth */
+ if (reset_devices)
+ adapter->adapter_attr.adapter_total_qd =
+ LEAPRAID_DEFAULT_CMD_QD_OFFSET +
+ adapter->dynamic_task_desc.inter_cmd_qd +
+ adapter->dynamic_task_desc.hp_cmd_qd;
+ else
+ adapter->adapter_attr.adapter_total_qd = facts->req_slot +
+ adapter->dynamic_task_desc.hp_cmd_qd;
+ /* reply message queue depth */
+ adapter->adapter_attr.rep_msg_qd =
+ adapter->adapter_attr.adapter_total_qd +
+ LEAPRAID_DEFAULT_CMD_QD_OFFSET;
+ /* reply descriptor queue depth */
+ adapter->adapter_attr.rep_desc_qd =
+ round_up(adapter->adapter_attr.adapter_total_qd +
+ adapter->adapter_attr.rep_msg_qd +
+ LEAPRAID_TASKID_OFFSET_CTRL_CMD,
+ LEAPRAID_REPLY_QD_ALIGNMENT);
+ /* scsi cmd io depth */
+ adapter->adapter_attr.io_qd =
+ adapter->adapter_attr.adapter_total_qd -
+ adapter->dynamic_task_desc.hp_cmd_qd -
+ adapter->dynamic_task_desc.inter_cmd_qd;
+ /* scsi host can queue */
+ adapter->shost->can_queue = adapter->adapter_attr.io_qd -
+ LEAPRAID_TASKID_OFFSET_SCSIIO_CMD;
+ adapter->driver_cmds.ctl_cmd.taskid = adapter->shost->can_queue +
+ LEAPRAID_TASKID_OFFSET_CTRL_CMD;
+ adapter->driver_cmds.driver_scsiio_cmd.taskid =
+ adapter->shost->can_queue +
+ LEAPRAID_TASKID_OFFSET_SCSIIO_CMD;
+
+ /* allocate task descriptor */
+try_again:
+ adapter->adapter_attr.task_desc_dma_size =
+ (adapter->adapter_attr.adapter_total_qd +
+ LEAPRAID_TASKID_OFFSET_CTRL_CMD) *
+ LEAPRAID_REQUEST_SIZE;
+ adapter->mem_desc.task_desc =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.task_desc_dma_size,
+ &adapter->mem_desc.task_desc_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.task_desc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate task descriptor DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* allocate chain message pool */
+ adapter->mem_desc.sg_chain_pool_size =
+ LEAPRAID_DEFAULT_CHAINS_PER_IO * LEAPRAID_CHAIN_SEG_SIZE;
+ adapter->mem_desc.sg_chain_pool =
+ dma_pool_create("leapraid chain pool",
+ &adapter->pdev->dev,
+ adapter->mem_desc.sg_chain_pool_size, 16, 0);
+ if (!adapter->mem_desc.sg_chain_pool) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate chain message DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* allocate io tracker to ref scsi io */
+ adapter->mem_desc.io_tracker =
+ kcalloc(adapter->shost->can_queue,
+ sizeof(struct leapraid_io_req_tracker),
+ GFP_KERNEL);
+ if (!adapter->mem_desc.io_tracker) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; (int)i < adapter->shost->can_queue; i++) {
+ rc = leapraid_internal_init_cmd_priv(
+ adapter,
+ adapter->mem_desc.io_tracker + i);
+ if (rc)
+ goto out;
+ }
+
+
+ adapter->dynamic_task_desc.hp_taskid =
+ adapter->adapter_attr.io_qd +
+ LEAPRAID_HP_TASKID_OFFSET_CTL_CMD;
+ /* allocate static hp taskid */
+ adapter->driver_cmds.ctl_cmd.hp_taskid =
+ adapter->dynamic_task_desc.hp_taskid;
+ adapter->driver_cmds.tm_cmd.hp_taskid =
+ adapter->dynamic_task_desc.hp_taskid +
+ LEAPRAID_HP_TASKID_OFFSET_TM_CMD;
+
+ adapter->dynamic_task_desc.inter_taskid =
+ adapter->dynamic_task_desc.hp_taskid +
+ adapter->dynamic_task_desc.hp_cmd_qd;
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid;
+ adapter->driver_cmds.cfg_op_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_CFG_OP_CMD;
+ adapter->driver_cmds.transport_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_TRANSPORT_CMD;
+ adapter->driver_cmds.timestamp_sync_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_TIMESTAMP_SYNC_CMD;
+ adapter->driver_cmds.raid_action_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_RAID_ACTION_CMD;
+ adapter->driver_cmds.enc_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_ENC_CMD;
+ adapter->driver_cmds.notify_event_cmd.inter_taskid =
+ adapter->dynamic_task_desc.inter_taskid +
+ LEAPRAID_TASKID_OFFSET_NOTIFY_EVENT_CMD;
+ dev_info(&adapter->pdev->dev, "queue depth:\n");
+ dev_info(&adapter->pdev->dev, " host->can_queue: %d\n",
+ adapter->shost->can_queue);
+ dev_info(&adapter->pdev->dev, " io_qd: %d\n",
+ adapter->adapter_attr.io_qd);
+ dev_info(&adapter->pdev->dev, " hpr_cmd_qd: %d\n",
+ adapter->dynamic_task_desc.hp_cmd_qd);
+ dev_info(&adapter->pdev->dev, " inter_cmd_qd: %d\n",
+ adapter->dynamic_task_desc.inter_cmd_qd);
+ dev_info(&adapter->pdev->dev, " adapter_total_qd: %d\n",
+ adapter->adapter_attr.adapter_total_qd);
+
+ dev_info(&adapter->pdev->dev, "taskid range:\n");
+ dev_info(&adapter->pdev->dev,
+ " adapter->dynamic_task_desc.hp_taskid: %d\n",
+ adapter->dynamic_task_desc.hp_taskid);
+ dev_info(&adapter->pdev->dev,
+ " adapter->dynamic_task_desc.inter_taskid: %d\n",
+ adapter->dynamic_task_desc.inter_taskid);
+
+ /*
+ * allocate sense dma, driver maintain
+ * need in same 4GB segment
+ */
+ adapter->mem_desc.sense_data =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.io_qd * SCSI_SENSE_BUFFERSIZE,
+ &adapter->mem_desc.sense_data_dma, GFP_KERNEL);
+ if (!adapter->mem_desc.sense_data) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate sense data DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (!leapraid_is_in_same_4g_seg(adapter->mem_desc.sense_data_dma,
+ adapter->adapter_attr.io_qd *
+ SCSI_SENSE_BUFFERSIZE)) {
+ dev_warn(&adapter->pdev->dev,
+ "try 32 bit dma due to sense data is not in same 4g!\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* reply frame, need in same 4GB segment */
+ adapter->mem_desc.rep_msg =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REPLY_SIEZ,
+ &adapter->mem_desc.rep_msg_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_msg) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply message DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (!leapraid_is_in_same_4g_seg(adapter->mem_desc.rep_msg_dma,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REPLY_SIEZ)) {
+ dev_warn(&adapter->pdev->dev,
+ "use 32 bit dma due to rep msg is not in same 4g!\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* address of reply frame */
+ adapter->mem_desc.rep_msg_addr =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ adapter->adapter_attr.rep_msg_qd *
+ LEAPRAID_REP_MSG_ADDR_SIZE,
+ &adapter->mem_desc.rep_msg_addr_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_msg_addr) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply message address DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ adapter->adapter_attr.rep_desc_q_seg_cnt =
+ DIV_ROUND_UP(adapter->adapter_attr.rq_cnt,
+ LEAPRAID_REP_DESC_CHUNK_SIZE);
+ adapter->mem_desc.rep_desc_seg_maint =
+ kcalloc(adapter->adapter_attr.rep_desc_q_seg_cnt,
+ sizeof(struct leapraid_rep_desc_seg_maint),
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_seg_maint) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rep_desc_q_cnt_allocated = 0;
+ for (i = 0; i < adapter->adapter_attr.rep_desc_q_seg_cnt; i++) {
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_maint =
+ kcalloc(LEAPRAID_REP_DESC_CHUNK_SIZE,
+ sizeof(struct leapraid_rep_desc_maint),
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_maint) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE) *
+ LEAPRAID_REP_DESC_CHUNK_SIZE,
+ &adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_seg_maint[i].rep_desc_seg) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply descriptor segment DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (j = 0; j < LEAPRAID_REP_DESC_CHUNK_SIZE; j++) {
+ if (rep_desc_q_cnt_allocated >=
+ adapter->adapter_attr.rq_cnt)
+ break;
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_maint[j]
+ .rep_desc =
+ (void *)((u8 *)(
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_seg) +
+ j *
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE));
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_maint[j]
+ .rep_desc_dma =
+ adapter->mem_desc
+ .rep_desc_seg_maint[i]
+ .rep_desc_seg_dma +
+ j *
+ (adapter->adapter_attr.rep_desc_qd *
+ LEAPRAID_REP_DESC_ENTRY_SIZE);
+ rep_desc_q_cnt_allocated++;
+ }
+ }
+
+ if (!reset_devices) {
+ adapter->mem_desc.rep_desc_q_arr =
+ dma_alloc_coherent(
+ &adapter->pdev->dev,
+ adapter->adapter_attr.rq_cnt *
+ LEAPRAID_REP_RQ_CNT_SIZE,
+ &adapter->mem_desc.rep_desc_q_arr_dma,
+ GFP_KERNEL);
+ if (!adapter->mem_desc.rep_desc_q_arr) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate reply descriptor queue array DMA!\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ if (rc == -EAGAIN) {
+ leapraid_free_host_memory(adapter);
+ adapter->adapter_attr.use_32_dma_mask = true;
+ rc = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "failed to set 32 DMA mask\n");
+ return rc;
+ }
+ goto try_again;
+ }
+ return rc;
+}
+
+static int leapraid_alloc_dev_topo_bitmaps(struct leapraid_adapter *adapter)
+{
+ adapter->dev_topo.pd_hdls_sz =
+ adapter->adapter_attr.features.max_dev_handle /
+ LEAPRAID_BITS_PER_BYTE;
+ if (adapter->adapter_attr.features.max_dev_handle %
+ LEAPRAID_BITS_PER_BYTE)
+ adapter->dev_topo.pd_hdls_sz++;
+ adapter->dev_topo.pd_hdls =
+ kzalloc(adapter->dev_topo.pd_hdls_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.pd_hdls)
+ return -ENOMEM;
+
+ adapter->dev_topo.blocking_hdls =
+ kzalloc(adapter->dev_topo.pd_hdls_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.blocking_hdls)
+ return -ENOMEM;
+
+ adapter->dev_topo.pending_dev_add_sz =
+ adapter->adapter_attr.features.max_dev_handle /
+ LEAPRAID_BITS_PER_BYTE;
+ if (adapter->adapter_attr.features.max_dev_handle %
+ LEAPRAID_BITS_PER_BYTE)
+ adapter->dev_topo.pending_dev_add_sz++;
+ adapter->dev_topo.pending_dev_add =
+ kzalloc(adapter->dev_topo.pending_dev_add_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.pending_dev_add)
+ return -ENOMEM;
+
+ adapter->dev_topo.dev_removing_sz =
+ adapter->dev_topo.pending_dev_add_sz;
+ adapter->dev_topo.dev_removing =
+ kzalloc(adapter->dev_topo.dev_removing_sz, GFP_KERNEL);
+ if (!adapter->dev_topo.dev_removing)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void leapraid_free_dev_topo_bitmaps(struct leapraid_adapter *adapter)
+{
+ kfree(adapter->dev_topo.pd_hdls);
+ kfree(adapter->dev_topo.blocking_hdls);
+ kfree(adapter->dev_topo.pending_dev_add);
+ kfree(adapter->dev_topo.dev_removing);
+}
+
+static int leapraid_init_driver_cmds(struct leapraid_adapter *adapter)
+{
+ u32 buffer_size = 0;
+ void *buffer;
+
+ INIT_LIST_HEAD(&adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.scan_dev_cmd.cb_idx = LEAPRAID_SCAN_DEV_CB_IDX;
+ list_add_tail(&adapter->driver_cmds.scan_dev_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.cfg_op_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.cfg_op_cmd.cb_idx = LEAPRAID_CONFIG_CB_IDX;
+ mutex_init(&adapter->driver_cmds.cfg_op_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.cfg_op_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.transport_cmd.cb_idx = LEAPRAID_TRANSPORT_CB_IDX;
+ mutex_init(&adapter->driver_cmds.transport_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.transport_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.timestamp_sync_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.timestamp_sync_cmd.cb_idx =
+ LEAPRAID_TIMESTAMP_SYNC_CB_IDX;
+ mutex_init(&adapter->driver_cmds.timestamp_sync_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.timestamp_sync_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.raid_action_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.raid_action_cmd.cb_idx =
+ LEAPRAID_RAID_ACTION_CB_IDX;
+ mutex_init(&adapter->driver_cmds.raid_action_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.raid_action_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.driver_scsiio_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.driver_scsiio_cmd.cb_idx =
+ LEAPRAID_DRIVER_SCSIIO_CB_IDX;
+ mutex_init(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.driver_scsiio_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ buffer_size = sizeof(struct scsi_cmnd) +
+ sizeof(struct leapraid_io_req_tracker) +
+ SCSI_SENSE_BUFFERSIZE +
+ sizeof(struct scatterlist);
+ buffer_size += 32;
+ buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ adapter->driver_cmds.internal_scmd = buffer;
+ buffer = (void *)((u8 *)buffer +
+ sizeof(struct scsi_cmnd) +
+ sizeof(struct leapraid_io_req_tracker));
+ adapter->driver_cmds.internal_scmd->sense_buffer =
+ (unsigned char *)buffer;
+ buffer = (void *)((u8 *)buffer + SCSI_SENSE_BUFFERSIZE);
+ adapter->driver_cmds.internal_scmd->sdb.table.sgl =
+ (struct scatterlist *)buffer;
+ buffer = (void *)((u8 *)buffer + sizeof(struct scatterlist));
+ adapter->driver_cmds.internal_scmd->cmnd = buffer;
+ adapter->driver_cmds.internal_scmd->host_scribble =
+ (unsigned char *)(adapter->driver_cmds.internal_scmd + 1);
+
+ adapter->driver_cmds.enc_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.enc_cmd.cb_idx = LEAPRAID_ENC_CB_IDX;
+ mutex_init(&adapter->driver_cmds.enc_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.enc_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.notify_event_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.notify_event_cmd.cb_idx =
+ LEAPRAID_NOTIFY_EVENT_CB_IDX;
+ mutex_init(&adapter->driver_cmds.notify_event_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.notify_event_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.ctl_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.ctl_cmd.cb_idx = LEAPRAID_CTL_CB_IDX;
+ mutex_init(&adapter->driver_cmds.ctl_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.ctl_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ adapter->driver_cmds.tm_cmd.status = LEAPRAID_CMD_NOT_USED;
+ adapter->driver_cmds.tm_cmd.cb_idx = LEAPRAID_TM_CB_IDX;
+ mutex_init(&adapter->driver_cmds.tm_cmd.mutex);
+ list_add_tail(&adapter->driver_cmds.tm_cmd.list,
+ &adapter->driver_cmds.special_cmd_list);
+
+ return 0;
+}
+
+static void leapraid_unmask_evts(struct leapraid_adapter *adapter, u16 evt)
+{
+ if (evt >= LEAPRAID_MAX_EVENT_NUM)
+ return;
+
+ clear_bit(evt, (unsigned long *)adapter->fw_evt_s.leapraid_evt_masks);
+}
+
+static void leapraid_init_event_mask(struct leapraid_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < LEAPRAID_EVT_MASK_COUNT; i++)
+ adapter->fw_evt_s.leapraid_evt_masks[i] = -1;
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_DISCOVERY);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_TOPO_CHANGE_LIST);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_ENCL_DEV_STATUS_CHANGE);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_SAS_DEV_STATUS_CHANGE);
+ leapraid_unmask_evts(adapter, LEAPRAID_EVT_IR_CHANGE);
+}
+
+static void leapraid_prepare_adp_init_req(
+ struct leapraid_adapter *adapter,
+ struct leapraid_adapter_init_req *init_req)
+{
+ ktime_t cur_time;
+ int i;
+ u32 reply_post_free_ary_sz;
+
+ memset(init_req, 0, sizeof(struct leapraid_adapter_init_req));
+ init_req->func = LEAPRAID_FUNC_ADAPTER_INIT;
+ init_req->who_init = LEAPRAID_WHOINIT_LINUX_DRIVER;
+ init_req->msg_ver = cpu_to_le16(0x0100);
+ init_req->header_ver = cpu_to_le16(0x0000);
+
+ init_req->driver_ver = cpu_to_le32((LEAPRAID_MAJOR_VERSION << 24) |
+ (LEAPRAID_MINOR_VERSION << 16) |
+ (LEAPRAID_BUILD_VERSION << 8) |
+ LEAPRAID_RELEASE_VERSION);
+ if (adapter->notification_desc.msix_enable)
+ init_req->host_msix_vectors = adapter->adapter_attr.rq_cnt;
+
+ init_req->req_frame_size =
+ cpu_to_le16(LEAPRAID_REQUEST_SIZE / LEAPRAID_DWORDS_BYTE_SIZE);
+ init_req->rep_desc_qd =
+ cpu_to_le16(adapter->adapter_attr.rep_desc_qd);
+ init_req->rep_msg_qd =
+ cpu_to_le16(adapter->adapter_attr.rep_msg_qd);
+ init_req->sense_buffer_add_high =
+ cpu_to_le32((u64)adapter->mem_desc.sense_data_dma >> 32);
+ init_req->rep_msg_dma_high =
+ cpu_to_le32((u64)adapter->mem_desc.rep_msg_dma >> 32);
+ init_req->task_desc_base_addr =
+ cpu_to_le64((u64)adapter->mem_desc.task_desc_dma);
+ init_req->rep_msg_addr_dma =
+ cpu_to_le64((u64)adapter->mem_desc.rep_msg_addr_dma);
+ if (!reset_devices) {
+ reply_post_free_ary_sz =
+ adapter->adapter_attr.rq_cnt * LEAPRAID_REP_RQ_CNT_SIZE;
+ memset(adapter->mem_desc.rep_desc_q_arr, 0,
+ reply_post_free_ary_sz);
+
+ for (i = 0; i < adapter->adapter_attr.rq_cnt; i++) {
+ adapter->mem_desc
+ .rep_desc_q_arr[i]
+ .rep_desc_base_addr =
+ cpu_to_le64 (
+ (u64)adapter->mem_desc
+ .rep_desc_seg_maint[i /
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc_maint[i %
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc_dma);
+ }
+
+ init_req->msg_flg =
+ LEAPRAID_ADAPTER_INIT_MSGFLG_RDPQ_ARRAY_MODE;
+ init_req->rep_desc_q_arr_addr =
+ cpu_to_le64((u64)adapter->mem_desc.rep_desc_q_arr_dma);
+ } else {
+ init_req->rep_desc_q_arr_addr =
+ cpu_to_le64((u64)adapter->mem_desc
+ .rep_desc_seg_maint[0]
+ .rep_desc_maint[0]
+ .rep_desc_dma);
+ }
+ cur_time = ktime_get_real();
+ init_req->time_stamp = cpu_to_le64(ktime_to_ms(cur_time));
+}
+
+static int leapraid_send_adapter_init(struct leapraid_adapter *adapter)
+{
+ struct leapraid_adapter_init_req init_req;
+ struct leapraid_adapter_init_rep init_rep;
+ u16 adapter_status;
+ int rc = 0;
+
+ leapraid_prepare_adp_init_req(adapter, &init_req);
+
+ rc = leapraid_handshake_func(adapter,
+ sizeof(struct leapraid_adapter_init_req),
+ (u32 *)&init_req,
+ sizeof(struct leapraid_adapter_init_rep),
+ (u16 *)&init_rep);
+ if (rc != 0) {
+ dev_err(&adapter->pdev->dev, "%s: handshake failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ adapter_status =
+ le16_to_cpu(init_rep.adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "%s: failed\n", __func__);
+ rc = -EIO;
+ }
+
+ adapter->timestamp_sync_cnt = 0;
+ return rc;
+}
+
+static int leapraid_cfg_pages(struct leapraid_adapter *adapter)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_io_unit_page1 *sas_io_unit_page1 = NULL;
+ struct leapraid_bios_page3 bios_page3;
+ struct leapraid_bios_page2 bios_page2;
+ int rc = 0;
+ int sz;
+
+ rc = leapraid_op_config_page(adapter, &bios_page3, cfgp1,
+ cfgp2, GET_BIOS_PG3);
+ if (rc)
+ return rc;
+
+ rc = leapraid_op_config_page(adapter, &bios_page2, cfgp1,
+ cfgp2, GET_BIOS_PG2);
+ if (rc)
+ return rc;
+
+ adapter->adapter_attr.bios_version =
+ le32_to_cpu(bios_page3.bios_version);
+ adapter->adapter_attr.wideport_max_queue_depth =
+ LEAPRAID_SAS_QUEUE_DEPTH;
+ adapter->adapter_attr.narrowport_max_queue_depth =
+ LEAPRAID_SAS_QUEUE_DEPTH;
+ adapter->adapter_attr.sata_max_queue_depth =
+ LEAPRAID_SATA_QUEUE_DEPTH;
+
+ adapter->boot_devs.requested_boot_dev.form =
+ bios_page2.requested_boot_dev_form;
+ memcpy((void *)adapter->boot_devs.requested_boot_dev.pg_dev,
+ (void *)&bios_page2.requested_boot_dev,
+ LEAPRAID_BOOT_DEV_SIZE);
+ adapter->boot_devs.requested_alt_boot_dev.form =
+ bios_page2.requested_alt_boot_dev_form;
+ memcpy((void *)adapter->boot_devs.requested_alt_boot_dev.pg_dev,
+ (void *)&bios_page2.requested_alt_boot_dev,
+ LEAPRAID_BOOT_DEV_SIZE);
+ adapter->boot_devs.current_boot_dev.form =
+ bios_page2.current_boot_dev_form;
+ memcpy((void *)adapter->boot_devs.current_boot_dev.pg_dev,
+ (void *)&bios_page2.current_boot_dev,
+ LEAPRAID_BOOT_DEV_SIZE);
+
+ sz = offsetof(struct leapraid_sas_io_unit_page1, phy_info);
+ sas_io_unit_page1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_page1) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ cfgp1.size = sz;
+
+ rc = leapraid_op_config_page(adapter, sas_io_unit_page1, cfgp1,
+ cfgp2, GET_SAS_IOUNIT_PG1);
+ if (rc)
+ goto out;
+
+ if (le16_to_cpu(sas_io_unit_page1->wideport_max_queue_depth))
+ adapter->adapter_attr.wideport_max_queue_depth =
+ le16_to_cpu(
+ sas_io_unit_page1->wideport_max_queue_depth);
+
+ if (le16_to_cpu(sas_io_unit_page1->narrowport_max_queue_depth))
+ adapter->adapter_attr.narrowport_max_queue_depth =
+ le16_to_cpu(
+ sas_io_unit_page1->narrowport_max_queue_depth);
+
+ if (sas_io_unit_page1->sata_max_queue_depth)
+ adapter->adapter_attr.sata_max_queue_depth =
+ sas_io_unit_page1->sata_max_queue_depth;
+
+out:
+ kfree(sas_io_unit_page1);
+ dev_info(&adapter->pdev->dev,
+ "max wp qd=%d, max np qd=%d, max sata qd=%d\n",
+ adapter->adapter_attr.wideport_max_queue_depth,
+ adapter->adapter_attr.narrowport_max_queue_depth,
+ adapter->adapter_attr.sata_max_queue_depth);
+ return rc;
+}
+
+static int leapraid_evt_notify(struct leapraid_adapter *adapter)
+{
+ struct leapraid_evt_notify_req *evt_notify_req;
+ int rc = 0;
+ int i;
+
+ mutex_lock(&adapter->driver_cmds.notify_event_cmd.mutex);
+ adapter->driver_cmds.notify_event_cmd.status = LEAPRAID_CMD_PENDING;
+ evt_notify_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.notify_event_cmd.inter_taskid);
+ memset(evt_notify_req, 0, sizeof(struct leapraid_evt_notify_req));
+ evt_notify_req->func = LEAPRAID_FUNC_EVENT_NOTIFY;
+ for (i = 0; i < LEAPRAID_EVT_MASK_COUNT; i++)
+ evt_notify_req->evt_masks[i] =
+ cpu_to_le32(adapter->fw_evt_s.leapraid_evt_masks[i]);
+ init_completion(&adapter->driver_cmds.notify_event_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.notify_event_cmd.inter_taskid);
+ wait_for_completion_timeout(
+ &adapter->driver_cmds.notify_event_cmd.done,
+ LEAPRAID_NOTIFY_EVENT_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.notify_event_cmd.status &
+ LEAPRAID_CMD_DONE))
+ if (adapter->driver_cmds.notify_event_cmd.status &
+ LEAPRAID_CMD_RESET)
+ rc = -EFAULT;
+ adapter->driver_cmds.notify_event_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.notify_event_cmd.mutex);
+
+ return rc;
+}
+
+int leapraid_scan_dev(struct leapraid_adapter *adapter, bool async_scan_dev)
+{
+ struct leapraid_scan_dev_req *scan_dev_req;
+ struct leapraid_scan_dev_rep *scan_dev_rep;
+ u16 adapter_status;
+ int rc = 0;
+
+ dev_info(&adapter->pdev->dev,
+ "send device scan, async_scan_dev=%d!\n", async_scan_dev);
+
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_PENDING;
+ adapter->driver_cmds.scan_dev_cmd.async_scan_dev = async_scan_dev;
+ scan_dev_req = leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid);
+ memset(scan_dev_req, 0, sizeof(struct leapraid_scan_dev_req));
+ scan_dev_req->func = LEAPRAID_FUNC_SCAN_DEV;
+
+ if (async_scan_dev) {
+ adapter->scan_dev_desc.first_scan_dev_fired = true;
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid);
+ return 0;
+ }
+
+ init_completion(&adapter->driver_cmds.scan_dev_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.scan_dev_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.scan_dev_cmd.done,
+ LEAPRAID_SCAN_DEV_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.scan_dev_cmd.status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev, "device scan timeout!\n");
+ if (adapter->driver_cmds.scan_dev_cmd.status &
+ LEAPRAID_CMD_RESET)
+ rc = -EFAULT;
+ else
+ rc = -ETIME;
+ goto out;
+ }
+
+ scan_dev_rep = (void *)(&adapter->driver_cmds.scan_dev_cmd.reply);
+ adapter_status =
+ le16_to_cpu(scan_dev_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+ if (adapter_status != LEAPRAID_ADAPTER_STATUS_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "device scan failure!\n");
+ rc = -EFAULT;
+ goto out;
+ }
+
+out:
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_NOT_USED;
+ dev_info(&adapter->pdev->dev,
+ "device scan %s\n", ((rc == 0) ? "SUCCESS" : "FAILED"));
+ return rc;
+}
+
+static void leapraid_init_task_tracker(struct leapraid_adapter *adapter)
+{
+ unsigned long flags;
+ u16 taskid;
+ int i;
+
+ spin_lock_irqsave(&adapter->dynamic_task_desc.task_lock, flags);
+ taskid = 1;
+ for (i = 0; i < adapter->shost->can_queue; i++, taskid++) {
+ adapter->mem_desc.io_tracker[i].taskid = taskid;
+ adapter->mem_desc.io_tracker[i].scmd = NULL;
+ }
+
+ spin_unlock_irqrestore(&adapter->dynamic_task_desc.task_lock, flags);
+}
+
+static void leapraid_init_rep_msg_addr(struct leapraid_adapter *adapter)
+{
+ u32 reply_address;
+ unsigned int i;
+
+ for (i = 0, reply_address = (u32)adapter->mem_desc.rep_msg_dma;
+ i < adapter->adapter_attr.rep_msg_qd;
+ i++, reply_address += LEAPRAID_REPLY_SIEZ) {
+ adapter->mem_desc.rep_msg_addr[i] = cpu_to_le32(reply_address);
+ }
+}
+
+static void init_rep_desc(struct leapraid_rq *rq, int index,
+ union leapraid_rep_desc_union *reply_post_free_contig)
+{
+ struct leapraid_adapter *adapter = rq->adapter;
+ unsigned int i;
+
+ if (!reset_devices)
+ rq->rep_desc =
+ adapter->mem_desc
+ .rep_desc_seg_maint[index /
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc_maint[index %
+ LEAPRAID_REP_DESC_CHUNK_SIZE]
+ .rep_desc;
+ else
+ rq->rep_desc = reply_post_free_contig;
+
+ rq->rep_post_host_idx = 0;
+ for (i = 0; i < adapter->adapter_attr.rep_desc_qd; i++)
+ rq->rep_desc[i].words = cpu_to_le64(ULLONG_MAX);
+}
+
+static void leapraid_init_rep_desc(struct leapraid_adapter *adapter)
+{
+ union leapraid_rep_desc_union *reply_post_free_contig;
+ struct leapraid_int_rq *int_rq;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ unsigned int i;
+ int index;
+
+ index = 0;
+ reply_post_free_contig = adapter->mem_desc
+ .rep_desc_seg_maint[0]
+ .rep_desc_maint[0]
+ .rep_desc;
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ init_rep_desc(&int_rq->rq, index, reply_post_free_contig);
+ if (!reset_devices)
+ index++;
+ else
+ reply_post_free_contig +=
+ adapter->adapter_attr.rep_desc_qd;
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ blk_mq_poll_rq = &adapter->notification_desc.blk_mq_poll_rqs[i];
+ init_rep_desc(&blk_mq_poll_rq->rq,
+ index, reply_post_free_contig);
+ if (!reset_devices)
+ index++;
+ else
+ reply_post_free_contig +=
+ adapter->adapter_attr.rep_desc_qd;
+ }
+}
+
+static void leapraid_init_bar_idx_regs(struct leapraid_adapter *adapter)
+{
+ struct leapraid_int_rq *int_rq;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rq;
+ unsigned int i, j;
+
+ adapter->rep_msg_host_idx = adapter->adapter_attr.rep_msg_qd - 1;
+ writel(adapter->rep_msg_host_idx,
+ &adapter->iomem_base->rep_msg_host_idx);
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qdex; i++) {
+ int_rq = &adapter->notification_desc.int_rqs[i];
+ for (j = 0; j < REP_POST_HOST_IDX_REG_CNT; j++)
+ writel((int_rq->rq.msix_idx & 7) <<
+ LEAPRAID_RPHI_MSIX_IDX_SHIFT,
+ &adapter->iomem_base->rep_post_reg_idx[j].idx);
+ }
+
+ for (i = 0; i < adapter->notification_desc.iopoll_qcnt; i++) {
+ blk_mq_poll_rq =
+ &adapter->notification_desc.blk_mq_poll_rqs[i];
+ for (j = 0; j < REP_POST_HOST_IDX_REG_CNT; j++)
+ writel((blk_mq_poll_rq->rq.msix_idx & 7) <<
+ LEAPRAID_RPHI_MSIX_IDX_SHIFT,
+ &adapter->iomem_base->rep_post_reg_idx[j].idx);
+ }
+}
+
+static int leapraid_make_adapter_available(struct leapraid_adapter *adapter)
+{
+ int rc = 0;
+
+ leapraid_init_task_tracker(adapter);
+ leapraid_init_rep_msg_addr(adapter);
+
+ if (adapter->scan_dev_desc.driver_loading)
+ leapraid_configure_reply_queue_affinity(adapter);
+
+ leapraid_init_rep_desc(adapter);
+ rc = leapraid_send_adapter_init(adapter);
+ if (rc)
+ return rc;
+
+ leapraid_init_bar_idx_regs(adapter);
+ leapraid_unmask_int(adapter);
+ rc = leapraid_cfg_pages(adapter);
+ if (rc)
+ return rc;
+
+ rc = leapraid_evt_notify(adapter);
+ if (rc)
+ return rc;
+
+ if (!adapter->access_ctrl.shost_recovering) {
+ adapter->scan_dev_desc.wait_scan_dev_done = true;
+ return 0;
+ }
+
+ rc = leapraid_scan_dev(adapter, false);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+int leapraid_ctrl_init(struct leapraid_adapter *adapter)
+{
+ u32 cap;
+ int rc = 0;
+
+ rc = leapraid_set_pcie_and_notification(adapter);
+ if (rc)
+ goto out_free_resources;
+
+ pci_set_drvdata(adapter->pdev, adapter->shost);
+
+ pcie_capability_read_dword(adapter->pdev, PCI_EXP_DEVCAP, &cap);
+
+ if (cap & PCI_EXP_DEVCAP_EXT_TAG) {
+ pcie_capability_set_word(adapter->pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_EXT_TAG);
+ }
+
+ rc = leapraid_make_adapter_ready(adapter, PART_RESET);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "make adapter ready failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_get_adapter_features(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "get adapter feature failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_fw_log_init(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "fw log init failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_request_host_memory(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "request host memory failure\n");
+ goto out_free_resources;
+ }
+
+ init_waitqueue_head(&adapter->reset_desc.reset_wait_queue);
+
+ rc = leapraid_alloc_dev_topo_bitmaps(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "alloc topo bitmaps failure\n");
+ goto out_free_resources;
+ }
+
+ rc = leapraid_init_driver_cmds(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev, "init driver cmds failure\n");
+ goto out_free_resources;
+ }
+
+ leapraid_init_event_mask(adapter);
+
+ rc = leapraid_make_adapter_available(adapter);
+ if (rc) {
+ dev_err(&adapter->pdev->dev,
+ "make adapter available failure\n");
+ goto out_free_resources;
+ }
+ return 0;
+
+out_free_resources:
+ adapter->access_ctrl.host_removing = true;
+ leapraid_fw_log_exit(adapter);
+ leapraid_disable_controller(adapter);
+ leapraid_free_host_memory(adapter);
+ leapraid_free_dev_topo_bitmaps(adapter);
+ pci_set_drvdata(adapter->pdev, NULL);
+ return rc;
+}
+
+void leapraid_remove_ctrl(struct leapraid_adapter *adapter)
+{
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_fw_log_exit(adapter);
+ leapraid_disable_controller(adapter);
+ leapraid_free_host_memory(adapter);
+ leapraid_free_dev_topo_bitmaps(adapter);
+ leapraid_free_enc_list(adapter);
+ pci_set_drvdata(adapter->pdev, NULL);
+}
+
+void leapraid_free_internal_scsi_cmd(struct leapraid_adapter *adapter)
+{
+ mutex_lock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+ kfree(adapter->driver_cmds.internal_scmd);
+ adapter->driver_cmds.internal_scmd = NULL;
+ mutex_unlock(&adapter->driver_cmds.driver_scsiio_cmd.mutex);
+}
diff --git a/drivers/scsi/leapraid/leapraid_func.h b/drivers/scsi/leapraid/leapraid_func.h
new file mode 100644
index 000000000000..9f42763bda72
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_func.h
@@ -0,0 +1,1423 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#ifndef LEAPRAID_FUNC_H_INCLUDED
+#define LEAPRAID_FUNC_H_INCLUDED
+
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_sas.h>
+
+#include "leapraid.h"
+
+/* some requset and reply buffer size */
+#define LEAPRAID_REQUEST_SIZE 128
+#define LEAPRAID_REPLY_SIEZ 128
+#define LEAPRAID_CHAIN_SEG_SIZE 128
+#define LEAPRAID_MAX_SGES_IN_CHAIN 7
+#define LEAPRAID_DEFAULT_CHAINS_PER_IO 19
+#define LEAPRAID_DEFAULT_DIX_CHAINS_PER_IO \
+ (2 * LEAPRAID_DEFAULT_CHAINS_PER_IO) /* TODO DIX */
+#define LEAPRAID_IEEE_SGE64_ENTRY_SIZE 16
+#define LEAPRAID_REP_DESC_CHUNK_SIZE 16
+#define LEAPRAID_REP_DESC_ENTRY_SIZE 8
+#define LEAPRAID_REP_MSG_ADDR_SIZE 4
+#define LEAPRAID_REP_RQ_CNT_SIZE 16
+
+#define LEAPRAID_SYS_LOG_BUF_SIZE 0x200000
+#define LEAPRAID_SYS_LOG_BUF_RESERVE 0x1000
+
+/* Driver version and name */
+#define LEAPRAID_DRIVER_NAME "LeapRaid"
+#define LEAPRAID_NAME_LENGTH 48
+#define LEAPRAID_AUTHOR "LeapIO Inc."
+#define LEAPRAID_DESCRIPTION "LeapRaid Driver"
+#define LEAPRAID_DRIVER_VERSION "2.00.00.05"
+#define LEAPRAID_MAJOR_VERSION 2
+#define LEAPRAID_MINOR_VERSION 00
+#define LEAPRAID_BUILD_VERSION 00
+#define LEAPRAID_RELEASE_VERSION 05
+
+/* Device ID */
+#define LEAPRAID_VENDOR_ID 0xD405
+#define LEAPRAID_DEVID_HBA 0x8200
+#define LEAPRAID_DEVID_RAID 0x8201
+
+#define LEAPRAID_PCI_VENDOR_ID_MASK 0xFFFF
+
+ /* RAID virtual channel ID */
+#define RAID_CHANNEL 1
+
+/* Scatter/Gather (SG) segment limits */
+#define LEAPRAID_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE
+
+#define LEAPRAID_KDUMP_MIN_PHYS_SEGMENTS 32
+#define LEAPRAID_SG_DEPTH LEAPRAID_MAX_PHYS_SEGMENTS
+
+/* firmware / config page operations */
+#define LEAPRAID_SET_PARAMETER_SYNC_TIMESTAMP 0x81
+#define LEAPRAID_CFG_REQ_RETRY_TIMES 2
+
+/* Hardware access helpers*/
+#define leapraid_readl(addr) readl(addr)
+#define leapraid_check_reset(status) \
+ (!((status) & LEAPRAID_CMD_RESET))
+
+/* Polling intervals */
+#define LEAPRAID_PCIE_LOG_POLLING_INTERVAL 1
+#define LEAPRAID_FAULT_POLLING_INTERVAL 1000
+#define LEAPRAID_TIMESTAMP_SYNC_INTERVAL 900
+#define LEAPRAID_SMART_POLLING_INTERVAL (300 * 1000)
+
+/* init mask */
+#define LEAPRAID_RESET_IRQ_MASK 0x40000000
+#define LEAPRAID_REPLY_INT_MASK 0x00000008
+#define LEAPRAID_TO_SYS_DB_MASK 0x00000001
+
+/* queue depth */
+#define LEAPRAID_SATA_QUEUE_DEPTH 32
+#define LEAPRAID_SAS_QUEUE_DEPTH 254
+#define LEAPRAID_RAID_QUEUE_DEPTH 128
+
+/* SCSI device and queue limits */
+#define LEAPRAID_MAX_SECTORS 8192
+#define LEAPRAID_DEF_MAX_SECTORS 32767
+#define LEAPRAID_MAX_CDB_LEN 32
+#define LEAPRAID_MAX_LUNS 16384
+#define LEAPRAID_CAN_QUEUE_MIN 1
+#define LEAPRAID_THIS_ID_NONE -1
+#define LEAPRAID_CMD_PER_LUN 128
+#define LEAPRAID_MAX_SEGMENT_SIZE 0xffffffff
+
+/* SCSI sense and ASC/ASCQ and disk geometry configuration */
+#define DESC_FORMAT_THRESHOLD 0x72
+#define SENSE_KEY_MASK 0x0F
+#define SCSI_SENSE_RESPONSE_CODE_MASK 0x7F
+#define ASC_FAILURE_PREDICTION_THRESHOLD_EXCEEDED 0x5D
+#define LEAPRAID_LARGE_DISK_THRESHOLD 0x200000UL /* in sectors, 1GB */
+#define LEAPRAID_LARGE_DISK_HEADS 255
+#define LEAPRAID_LARGE_DISK_SECTORS 63
+#define LEAPRAID_SMALL_DISK_HEADS 64
+#define LEAPRAID_SMALL_DISK_SECTORS 32
+
+/* SMP (Serial Management Protocol) */
+#define LEAPRAID_SMP_PT_FLAG_SGL_PTR 0x80
+#define LEAPRAID_SMP_FN_REPORT_PHY_ERR_LOG 0x91
+#define LEAPRAID_SMP_FRAME_HEADER_SIZE 4
+#define LEAPRAID_SCSI_HOST_SHIFT 16
+#define LEAPRAID_SCSI_DRIVER_SHIFT 24
+
+/* SCSI ASC/ASCQ definitions */
+#define LEAPRAID_SCSI_ASCQ_DEFAULT 0x00
+#define LEAPRAID_SCSI_ASC_POWER_ON_RESET 0x29
+#define LEAPRAID_SCSI_ASC_INVALID_CMD_CODE 0x20
+#define LEAPRAID_SCSI_ASCQ_POWER_ON_RESET 0x07
+
+/* ---- VPD Page 0x89 (ATA Information) ---- */
+#define LEAPRAID_VPD_PAGE_ATA_INFO 0x89
+#define LEAPRAID_VPD_PG89_MAX_LEN 255
+#define LEAPRAID_VPD_PG89_MIN_LEN 214
+
+/* Byte index for NCQ support flag in VPD Page 0x89 */
+#define LEAPRAID_VPD_PG89_NCQ_BYTE_IDX 213
+#define LEAPRAID_VPD_PG89_NCQ_BIT_SHIFT 4
+#define LEAPRAID_VPD_PG89_NCQ_BIT_MASK 0x1
+
+/* readiness polling: max retries, sleep µs between */
+#define LEAPRAID_ADAPTER_READY_MAX_RETRY 15000
+#define LEAPRAID_ADAPTER_READY_SLEEP_MIN_US 1000
+#define LEAPRAID_ADAPTER_READY_SLEEP_MAX_US 1100
+
+/* Doorbell wait parameters */
+#define LEAPRAID_DB_WAIT_MAX_RETRY 20000
+#define LEAPRAID_DB_WAIT_DELAY_US 500
+
+/* Basic data size definitions */
+#define LEAPRAID_DWORDS_BYTE_SIZE 4
+#define LEAPRAID_WORD_BYTE_SIZE 2
+
+/* SGL threshold and chain offset*/
+#define LEAPRAID_SGL_INLINE_THRESHOLD 2
+#define LEAPRAID_CHAIN_OFFSET_DWORDS 7
+
+/* MSI-X group size and mask */
+#define LEAPRAID_MSIX_GROUP_SIZE 8
+#define LEAPRAID_MSIX_GROUP_MASK 7
+
+/* basic constants and limits */
+#define LEAPRAID_BUSY_LIMIT 1
+#define LEAPRAID_INDEX_FIRST 0
+#define LEAPRAID_BITS_PER_BYTE 8
+#define LEAPRAID_INVALID_HOST_DIAG_VAL 0xFFFFFFFF
+
+/* retry / sleep configuration */
+#define LEAPRAID_UNLOCK_RETRY_LIMIT 20
+#define LEAPRAID_UNLOCK_SLEEP_MS 100
+#define LEAPRAID_MSLEEP_SHORT_MS 50
+#define LEAPRAID_MSLEEP_NORMAL_MS 100
+#define LEAPRAID_MSLEEP_LONG_MS 256
+#define LEAPRAID_MSLEEP_EXTRA_LONG_MS 500
+#define LEAPRAID_IO_POLL_DELAY_US 500
+
+/* controller reset loop parameters */
+#define LEAPRAID_RESET_LOOP_COUNT_REF (300000 / 256)
+#define LEAPRAID_RESET_LOOP_COUNT_DEFAULT 10000
+#define LEAPRAID_RESET_POLL_INTERVAL_MS 500
+
+/* Device / Volume configuration */
+#define LEAPRAID_MAX_VOLUMES_DEFAULT 32
+#define LEAPRAID_MAX_DEV_HANDLE_DEFAULT 2048
+#define LEAPRAID_INVALID_DEV_HANDLE 0xFFFF
+
+/* cmd queue depth */
+#define LEAPRAID_COALESCING_DEPTH_MAX 256
+#define LEAPRAID_DEFAULT_CMD_QD_OFFSET 64
+#define LEAPRAID_REPLY_QD_ALIGNMENT 16
+/* task id offset */
+#define LEAPRAID_TASKID_OFFSET_CTRL_CMD 1
+#define LEAPRAID_TASKID_OFFSET_SCSIIO_CMD 2
+#define LEAPRAID_TASKID_OFFSET_CFG_OP_CMD 1
+#define LEAPRAID_TASKID_OFFSET_TRANSPORT_CMD 2
+#define LEAPRAID_TASKID_OFFSET_TIMESTAMP_SYNC_CMD 3
+#define LEAPRAID_TASKID_OFFSET_RAID_ACTION_CMD 4
+#define LEAPRAID_TASKID_OFFSET_ENC_CMD 5
+#define LEAPRAID_TASKID_OFFSET_NOTIFY_EVENT_CMD 6
+
+/* task id offset for high-priority */
+#define LEAPRAID_HP_TASKID_OFFSET_CTL_CMD 0
+#define LEAPRAID_HP_TASKID_OFFSET_TM_CMD 1
+
+/* Event / Boot configuration */
+#define LEAPRAID_EVT_MASK_COUNT 4
+#define LEAPRAID_BOOT_DEV_SIZE 24
+
+/* logsense command definitions */
+#define LEAPRAID_LOGSENSE_DATA_LENGTH 16
+#define LEAPRAID_LOGSENSE_CDB_LENGTH 10
+#define LEAPRAID_LOGSENSE_CDB_CODE 0x6F
+#define LEAPRAID_LOGSENSE_TIMEOUT 5
+#define LEAPRAID_LOGSENSE_SMART_CODE 0x5D
+
+/* cmd timeout */
+#define LEAPRAID_DRIVER_SCSIIO_CMD_TIMEOUT LEAPRAID_LOGSENSE_TIMEOUT
+#define LEAPRAID_CFG_OP_TIMEOUT 15
+#define LEAPRAID_CTL_CMD_TIMEOUT 10
+#define LEAPRAID_SCAN_DEV_CMD_TIMEOUT 300
+#define LEAPRAID_TIMESTAMP_SYNC_CMD_TIMEOUT 10
+#define LEAPRAID_RAID_ACTION_CMD_TIMEOUT 10
+#define LEAPRAID_ENC_CMD_TIMEOUT 10
+#define LEAPRAID_NOTIFY_EVENT_CMD_TIMEOUT 30
+#define LEAPRAID_TM_CMD_TIMEOUT 30
+#define LEAPRAID_TRANSPORT_CMD_TIMEOUT 10
+
+/**
+ * struct leapraid_adapter_features - Features and
+ * capabilities of a LeapRAID adapter
+ *
+ * @req_slot: Number of request slots supported by the adapter
+ * @hp_slot: Number of high-priority slots supported by the adapter
+ * @adapter_caps: Adapter capabilities
+ * @fw_version: Firmware version of the adapter
+ * @max_dev_handle: Maximum device supported by the adapter
+ */
+struct leapraid_adapter_features {
+ u16 req_slot;
+ u16 hp_slot;
+ u32 adapter_caps;
+ u32 fw_version;
+ u8 max_volumes;
+ u16 max_dev_handle;
+ u16 min_dev_handle;
+};
+
+/**
+ * struct leapraid_adapter_attr - Adapter attributes and capabilities
+ *
+ * @id: Adapter identifier
+ * @raid_support: Indicates if RAID is supported
+ * @bios_version: Version of the adapter BIOS
+ * @enable_mp: Indicates if multipath (MP) support is enabled
+ * @wideport_max_queue_depth: Maximum queue depth for wide ports
+ * @narrowport_max_queue_depth: Maximum queue depth for narrow ports
+ * @sata_max_queue_depth: Maximum queue depth for SATA
+ * @features: Detailed features of the adapter
+ * @adapter_total_qd: Total queue depth available on the adapter
+ * @io_qd: Queue depth allocated for I/O operations
+ * @rep_msg_qd: Queue depth for reply messages
+ * @rep_desc_qd: Queue depth for reply descriptors
+ * @rep_desc_q_seg_cnt: Number of segments in a reply descriptor queue
+ * @rq_cnt: Number of request queues
+ * @task_desc_dma_size: Size of task descriptor DMA memory
+ * @use_32_dma_mask: Indicates if 32-bit DMA mask is used
+ * @name: Adapter name string
+ */
+struct leapraid_adapter_attr {
+ u8 id;
+ bool raid_support;
+ u32 bios_version;
+ bool enable_mp;
+ u32 wideport_max_queue_depth;
+ u32 narrowport_max_queue_depth;
+ u32 sata_max_queue_depth;
+ struct leapraid_adapter_features features;
+ u32 adapter_total_qd;
+ u32 io_qd;
+ u32 rep_msg_qd;
+ u32 rep_desc_qd;
+ u32 rep_desc_q_seg_cnt;
+ u16 rq_cnt;
+ u32 task_desc_dma_size;
+ bool use_32_dma_mask;
+ char name[LEAPRAID_NAME_LENGTH];
+};
+
+/**
+ * struct leapraid_io_req_tracker - Track a SCSI I/O request
+ * for the adapter
+ *
+ * @taskid: Unique task ID for this I/O request
+ * @scmd: Pointer to the associated SCSI command
+ * @chain_list: List of chain frames associated with this request
+ * @msix_io: MSI-X vector assigned to this I/O request
+ * @chain: Pointer to the chain memory for this request
+ * @chain_dma: DMA address of the chain memory
+ */
+struct leapraid_io_req_tracker {
+ u16 taskid;
+ struct scsi_cmnd *scmd;
+ struct list_head chain_list;
+ u16 msix_io;
+ void *chain;
+ dma_addr_t chain_dma;
+};
+
+/**
+ * struct leapraid_task_tracker - Tracks a task in the adapter
+ *
+ * @taskid: Unique task ID for this tracker
+ * @cb_idx: Callback index associated with this task
+ * @tracker_list: Linked list node to chain this tracker in lists
+ */
+struct leapraid_task_tracker {
+ u16 taskid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct leapraid_rep_desc_maint - Maintains reply descriptor
+ * memory
+ *
+ * @rep_desc: Pointer to the reply descriptor
+ * @rep_desc_dma: DMA address of the reply descriptor
+ */
+struct leapraid_rep_desc_maint {
+ union leapraid_rep_desc_union *rep_desc;
+ dma_addr_t rep_desc_dma;
+};
+
+/**
+ * struct leapraid_rep_desc_seg_maint - Maintains reply descriptor
+ * segment memory
+ *
+ * @rep_desc_seg: Pointer to the reply descriptor segment
+ * @rep_desc_seg_dma: DMA address of the reply descriptor segment
+ * @rep_desc_maint: Pointer to the main reply descriptor structure
+ */
+struct leapraid_rep_desc_seg_maint {
+ void *rep_desc_seg;
+ dma_addr_t rep_desc_seg_dma;
+ struct leapraid_rep_desc_maint *rep_desc_maint;
+};
+
+/**
+ * struct leapraid_mem_desc - Memory descriptor for LeapRaid adapter
+ *
+ * @task_desc: Pointer to task descriptor
+ * @task_desc_dma: DMA address of task descriptor
+ * @sg_chain_pool: DMA pool for SGL chain allocations
+ * @sg_chain_pool_size: Size of the sg_chain_pool
+ * @io_tracker: IO request tracker array
+ * @sense_data: Buffer for SCSI sense data
+ * @sense_data_dma: DMA address of sense_data buffer
+ * @rep_msg: Buffer for reply message
+ * @rep_msg_dma: DMA address of reply message buffer
+ * @rep_msg_addr: Pointer to reply message address
+ * @rep_msg_addr_dma: DMA address of reply message address
+ * @rep_desc_seg_maint: Pointer to reply descriptor segment
+ * @rep_desc_q_arr: Pointer to reply descriptor queue array
+ * @rep_desc_q_arr_dma: DMA address of reply descriptor queue array
+ */
+struct leapraid_mem_desc {
+ void *task_desc;
+ dma_addr_t task_desc_dma;
+ struct dma_pool *sg_chain_pool;
+ u16 sg_chain_pool_size;
+ struct leapraid_io_req_tracker *io_tracker;
+ u8 *sense_data;
+ dma_addr_t sense_data_dma;
+ u8 *rep_msg;
+ dma_addr_t rep_msg_dma;
+ __le32 *rep_msg_addr;
+ dma_addr_t rep_msg_addr_dma;
+ struct leapraid_rep_desc_seg_maint *rep_desc_seg_maint;
+ struct leapraid_rep_desc_q_arr *rep_desc_q_arr;
+ dma_addr_t rep_desc_q_arr_dma;
+};
+
+#define LEAPRAID_FIXED_INTER_CMDS 7
+#define LEAPRAID_FIXED_HP_CMDS 2
+#define LEAPRAID_INTER_HP_CMDS_DIF \
+ (LEAPRAID_FIXED_INTER_CMDS - LEAPRAID_FIXED_HP_CMDS)
+
+#define LEAPRAID_CMD_NOT_USED 0x8000
+#define LEAPRAID_CMD_DONE 0x0001
+#define LEAPRAID_CMD_PENDING 0x0002
+#define LEAPRAID_CMD_REPLY_VALID 0x0004
+#define LEAPRAID_CMD_RESET 0x0008
+
+/**
+ * enum LEAPRAID_CB_INDEX - Callback index for LeapRaid driver
+ *
+ * @LEAPRAID_SCAN_DEV_CB_IDX: Scan device callback index
+ * @LEAPRAID_CONFIG_CB_IDX: Configuration callback index
+ * @LEAPRAID_TRANSPORT_CB_IDX: Transport callback index
+ * @LEAPRAID_TIMESTAMP_SYNC_CB_IDX: Timestamp sync callback index
+ * @LEAPRAID_RAID_ACTION_CB_IDX: RAID action callback index
+ * @LEAPRAID_DRIVER_SCSIIO_CB_IDX: Driver SCSI I/O callback index
+ * @LEAPRAID_SAS_CTRL_CB_IDX: SAS controller callback index
+ * @LEAPRAID_ENC_CB_IDX: Encryption callback index
+ * @LEAPRAID_NOTIFY_EVENT_CB_IDX: Notify event callback index
+ * @LEAPRAID_CTL_CB_IDX: Control callback index
+ * @LEAPRAID_TM_CB_IDX: Task management callback index
+ */
+enum LEAPRAID_CB_INDEX {
+ LEAPRAID_SCAN_DEV_CB_IDX = 0x1,
+ LEAPRAID_CONFIG_CB_IDX = 0x2,
+ LEAPRAID_TRANSPORT_CB_IDX = 0x3,
+ LEAPRAID_TIMESTAMP_SYNC_CB_IDX = 0x4,
+ LEAPRAID_RAID_ACTION_CB_IDX = 0x5,
+ LEAPRAID_DRIVER_SCSIIO_CB_IDX = 0x6,
+ LEAPRAID_SAS_CTRL_CB_IDX = 0x7,
+ LEAPRAID_ENC_CB_IDX = 0x8,
+ LEAPRAID_NOTIFY_EVENT_CB_IDX = 0x9,
+ LEAPRAID_CTL_CB_IDX = 0xA,
+ LEAPRAID_TM_CB_IDX = 0xB,
+ LEAPRAID_NUM_CB_IDXS
+};
+
+struct leapraid_default_reply {
+ u8 pad[LEAPRAID_REPLY_SIEZ];
+};
+
+struct leapraid_sense_buffer {
+ u8 pad[SCSI_SENSE_BUFFERSIZE];
+};
+
+/**
+ * struct leapraid_driver_cmd - Driver command tracking structure
+ *
+ * @reply: Default reply structure returned by the adapter
+ * @done: Completion object used to signal command completion
+ * @status: Status code returned by the firmware
+ * @taskid: Unique task identifier for this command
+ * @hp_taskid: Task identifier for high-priority commands
+ * @inter_taskid: Task identifier for internal commands
+ * @cb_idx: Callback index used to identify completion context
+ * @async_scan_dev: True if this command is for asynchronous device scan
+ * @sense: Sense buffer holding error information from device
+ * @mutex: Mutex to protect access to this command structure
+ * @list: List node for linking driver commands into lists
+ */
+struct leapraid_driver_cmd {
+ struct leapraid_default_reply reply;
+ struct completion done;
+ u16 status;
+ u16 taskid;
+ u16 hp_taskid;
+ u16 inter_taskid;
+ u8 cb_idx;
+ bool async_scan_dev;
+ struct leapraid_sense_buffer sense;
+ struct mutex mutex;
+ struct list_head list;
+};
+
+/**
+ * struct leapraid_driver_cmds - Collection of driver command objects
+ *
+ * @special_cmd_list: List head for tracking special driver commands
+ * @scan_dev_cmd: Command used for asynchronous device scan operations
+ * @cfg_op_cmd: Command for configuration operations
+ * @transport_cmd: Command for transport-level operations
+ * @timestamp_sync_cmd: Command for synchronizing timestamp with firmware
+ * @raid_action_cmd: Command for RAID-related management or action requests
+ * @driver_scsiio_cmd: Command used for internal SCSI I/O processing
+ * @enc_cmd: Command for enclosure management operations
+ * @notify_event_cmd: Command for asynchronous event notification handling
+ * @ctl_cmd: Command for generic control or maintenance operations
+ * @tm_cmd: Task management command
+ * @internal_scmd: Pointer to internal SCSI command used by the driver
+ */
+struct leapraid_driver_cmds {
+ struct list_head special_cmd_list;
+ struct leapraid_driver_cmd scan_dev_cmd;
+ struct leapraid_driver_cmd cfg_op_cmd;
+ struct leapraid_driver_cmd transport_cmd;
+ struct leapraid_driver_cmd timestamp_sync_cmd;
+ struct leapraid_driver_cmd raid_action_cmd;
+ struct leapraid_driver_cmd driver_scsiio_cmd;
+ struct leapraid_driver_cmd enc_cmd;
+ struct leapraid_driver_cmd notify_event_cmd;
+ struct leapraid_driver_cmd ctl_cmd;
+ struct leapraid_driver_cmd tm_cmd;
+ struct scsi_cmnd *internal_scmd;
+};
+
+/**
+ * struct leapraid_dynamic_task_desc - Dynamic task descriptor
+ *
+ * @task_lock: Spinlock to protect concurrent access
+ * @hp_taskid: Current high-priority task ID
+ * @hp_cmd_qd: Fixed command queue depth for high-priority tasks
+ * @inter_taskid: Current internal task ID
+ * @inter_cmd_qd: Fixed command queue depth for internal tasks
+ */
+struct leapraid_dynamic_task_desc {
+ spinlock_t task_lock;
+ u16 hp_taskid;
+ u16 hp_cmd_qd;
+ u16 inter_taskid;
+ u16 inter_cmd_qd;
+};
+
+/**
+ * struct leapraid_fw_evt_work - Firmware event work structure
+ *
+ * @list: Linked list node for queuing the work
+ * @adapter: Pointer to the associated LeapRaid adapter
+ * @work: Work structure used by the kernel workqueue
+ * @refcnt: Reference counter for managing the lifetime of this work
+ * @evt_data: Pointer to firmware event data
+ * @dev_handle: Device handle associated with the event
+ * @evt_type: Type of firmware event
+ * @ignore: Flag indicating whether the event should be ignored
+ */
+struct leapraid_fw_evt_work {
+ struct list_head list;
+ struct leapraid_adapter *adapter;
+ struct work_struct work;
+ struct kref refcnt;
+ void *evt_data;
+ u16 dev_handle;
+ u16 evt_type;
+ u8 ignore;
+};
+
+/**
+ * struct leapraid_fw_evt_struct - Firmware event handling structure
+ *
+ * @fw_evt_name: Name of the firmware event
+ * @fw_evt_thread: Workqueue used for processing firmware events
+ * @fw_evt_lock: Spinlock protecting access to the firmware event list
+ * @fw_evt_list: Linked list of pending firmware events
+ * @cur_evt: Pointer to the currently processing firmware event
+ * @fw_evt_cleanup: Flag indicating whether cleanup of events is in progress
+ * @leapraid_evt_masks: Array of event masks for filtering firmware events
+ */
+struct leapraid_fw_evt_struct {
+ char fw_evt_name[48];
+ struct workqueue_struct *fw_evt_thread;
+ spinlock_t fw_evt_lock;
+ struct list_head fw_evt_list;
+ struct leapraid_fw_evt_work *cur_evt;
+ int fw_evt_cleanup;
+ u32 leapraid_evt_masks[4];
+};
+
+/**
+ * struct leapraid_rq - Represents a LeapRaid request queue
+ *
+ * @adapter: Pointer to the associated LeapRaid adapter
+ * @msix_idx: MSI-X vector index used by this queue
+ * @rep_post_host_idx: Index of the last processed reply descriptor
+ * @rep_desc: Pointer to the reply descriptor associated with this queue
+ * @name: Name of the request queue
+ * @busy: Atomic counter indicating if the queue is busy
+ */
+struct leapraid_rq {
+ struct leapraid_adapter *adapter;
+ u8 msix_idx;
+ u32 rep_post_host_idx;
+ union leapraid_rep_desc_union *rep_desc;
+ char name[LEAPRAID_NAME_LENGTH];
+ atomic_t busy;
+};
+
+/**
+ * struct leapraid_int_rq - Internal request queue for a CPU
+ *
+ * @affinity_hint: CPU affinity mask for the queue
+ * @rq: Underlying LeapRaid request queue structure
+ */
+struct leapraid_int_rq {
+ cpumask_var_t affinity_hint;
+ struct leapraid_rq rq;
+};
+
+/**
+ * struct leapraid_blk_mq_poll_rq - Polling request for LeapRaid blk-mq
+ *
+ * @busy: Atomic flag indicating request is being processed
+ * @pause: Atomic flag to temporarily suspend polling
+ * @rq: The underlying LeapRaid request structure
+ */
+struct leapraid_blk_mq_poll_rq {
+ atomic_t busy;
+ atomic_t pause;
+ struct leapraid_rq rq;
+};
+
+/**
+ * struct leapraid_notification_desc - Notification
+ * descriptor for LeapRaid
+ *
+ * @iopoll_qdex: Index of the I/O polling queue
+ * @iopoll_qcnt: Count of I/O polling queues
+ * @msix_enable: Flag indicating MSI-X is enabled
+ * @msix_cpu_map: CPU map for MSI-X interrupts
+ * @msix_cpu_map_sz: Size of the MSI-X CPU map
+ * @int_rqs: Array of interrupt request queues
+ * @int_rqs_allocated: Count of allocated interrupt request queues
+ * @blk_mq_poll_rqs: Array of blk-mq polling requests
+ */
+struct leapraid_notification_desc {
+ u32 iopoll_qdex;
+ u32 iopoll_qcnt;
+ bool msix_enable;
+ u8 *msix_cpu_map;
+ u32 msix_cpu_map_sz;
+ struct leapraid_int_rq *int_rqs;
+ u32 int_rqs_allocated;
+ struct leapraid_blk_mq_poll_rq *blk_mq_poll_rqs;
+};
+
+/**
+ * struct leapraid_reset_desc - Reset descriptor for LeapRaid
+ *
+ * @fault_reset_wq: Workqueue for fault reset operations
+ * @fault_reset_work: Delayed work structure for fault reset
+ * @fault_reset_wq_name: Name of the fault reset workqueue
+ * @host_diag_mutex: Mutex for host diagnostic operations
+ * @adapter_reset_lock: Spinlock for adapter reset operations
+ * @adapter_reset_mutex: Mutex for adapter reset operations
+ * @adapter_link_resetting: Flag indicating if adapter link is resetting
+ * @adapter_reset_results: Results of the adapter reset operation
+ * @pending_io_cnt: Count of pending I/O operations
+ * @reset_wait_queue: Wait queue for reset operations
+ * @reset_cnt: Counter for reset operations
+ */
+struct leapraid_reset_desc {
+ struct workqueue_struct *fault_reset_wq;
+ struct delayed_work fault_reset_work;
+ char fault_reset_wq_name[48];
+ struct mutex host_diag_mutex;
+ spinlock_t adapter_reset_lock;
+ struct mutex adapter_reset_mutex;
+ bool adapter_link_resetting;
+ int adapter_reset_results;
+ int pending_io_cnt;
+ wait_queue_head_t reset_wait_queue;
+ u32 reset_cnt;
+};
+
+/**
+ * struct leapraid_scan_dev_desc - Scan device descriptor
+ * for LeapRaid
+ *
+ * @wait_scan_dev_done: Flag indicating if scan device operation is done
+ * @driver_loading: Flag indicating if driver is loading
+ * @first_scan_dev_fired: Flag indicating if first scan device operation fired
+ * @scan_dev_failed: Flag indicating if scan device operation failed
+ * @scan_start: Flag indicating if scan operation started
+ * @scan_start_failed: Count of failed scan start operations
+ */
+struct leapraid_scan_dev_desc {
+ bool wait_scan_dev_done;
+ bool driver_loading;
+ bool first_scan_dev_fired;
+ bool scan_dev_failed;
+ bool scan_start;
+ u16 scan_start_failed;
+};
+
+/**
+ * struct leapraid_access_ctrl - Access control structure for LeapRaid
+ *
+ * @pci_access_lock: Mutex for PCI access control
+ * @adapter_thermal_alert: Flag indicating if adapter thermal alert is active
+ * @shost_recovering: Flag indicating if host is recovering
+ * @host_removing: Flag indicating if host is being removed
+ * @pcie_recovering: Flag indicating if PCIe is recovering
+ */
+struct leapraid_access_ctrl {
+ struct mutex pci_access_lock;
+ bool adapter_thermal_alert;
+ bool shost_recovering;
+ bool host_removing;
+ bool pcie_recovering;
+};
+
+/**
+ * struct leapraid_fw_log_desc - Firmware log descriptor for LeapRaid
+ *
+ * @fw_log_buffer: Buffer for firmware log data
+ * @fw_log_buffer_dma: DMA address of the firmware log buffer
+ * @fw_log_wq_name: Name of the firmware log workqueue
+ * @fw_log_wq: Workqueue for firmware log operations
+ * @fw_log_work: Delayed work structure for firmware log
+ * @open_pcie_trace: Flag indicating if PCIe tracing is open
+ * @fw_log_init_flag: Flag indicating if firmware log is initialized
+ */
+struct leapraid_fw_log_desc {
+ u8 *fw_log_buffer;
+ dma_addr_t fw_log_buffer_dma;
+ char fw_log_wq_name[48];
+ struct workqueue_struct *fw_log_wq;
+ struct delayed_work fw_log_work;
+ int open_pcie_trace;
+ int fw_log_init_flag;
+};
+
+#define LEAPRAID_CARD_PORT_FLG_DIRTY 0x01
+#define LEAPRAID_CARD_PORT_FLG_NEW 0x02
+#define LEAPRAID_DISABLE_MP_PORT_ID 0xFF
+/**
+ * struct leapraid_card_port - Card port structure for LeapRaid
+ *
+ * @list: List head for card port
+ * @vphys_list: List head for virtual phy list
+ * @port_id: Port ID
+ * @sas_address: SAS address
+ * @phy_mask: Mask of phy
+ * @vphys_mask: Mask of virtual phy
+ * @flg: Flags for the port
+ */
+struct leapraid_card_port {
+ struct list_head list;
+ struct list_head vphys_list;
+ u8 port_id;
+ u64 sas_address;
+ u32 phy_mask;
+ u32 vphys_mask;
+ u8 flg;
+};
+
+/**
+ * struct leapraid_card_phy - Card phy structure for LeapRaid
+ *
+ * @port_siblings: List head for port siblings
+ * @card_port: Pointer to the card port
+ * @identify: SAS identify structure
+ * @remote_identify: Remote SAS identify structure
+ * @phy: SAS phy structure
+ * @phy_id: Phy ID
+ * @hdl: Handle for the port
+ * @attached_hdl: Handle for the attached port
+ * @phy_is_assigned: Flag indicating if phy is assigned
+ * @vphy: Flag indicating if virtual phy
+ */
+struct leapraid_card_phy {
+ struct list_head port_siblings;
+ struct leapraid_card_port *card_port;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 hdl;
+ u16 attached_hdl;
+ bool phy_is_assigned;
+ bool vphy;
+};
+
+/**
+ * struct leapraid_topo_node - SAS topology node for LeapRaid
+ *
+ * @list: List head for linking nodes
+ * @sas_port_list: List of SAS ports
+ * @card_port: Associated card port
+ * @card_phy: Associated card PHY
+ * @rphy: SAS remote PHY device
+ * @parent_dev: Parent device pointer
+ * @sas_address: SAS address of this node
+ * @sas_address_parent: Parent node's SAS address
+ * @phys_num: Number of physical links
+ * @hdl: Handle identifier
+ * @enc_hdl: Enclosure handle
+ * @enc_lid: Enclosure logical identifier
+ * @resp: Response status flag
+ */
+struct leapraid_topo_node {
+ struct list_head list;
+ struct list_head sas_port_list;
+ struct leapraid_card_port *card_port;
+ struct leapraid_card_phy *card_phy;
+ struct sas_rphy *rphy;
+ struct device *parent_dev;
+ u64 sas_address;
+ u64 sas_address_parent;
+ u8 phys_num;
+ u16 hdl;
+ u16 enc_hdl;
+ u64 enc_lid;
+ bool resp;
+};
+
+/**
+ * struct leapraid_dev_topo - LeapRaid device topology management structure
+ *
+ * @topo_node_lock: Spinlock for protecting topology node operations
+ * @sas_dev_lock: Spinlock for SAS device list access
+ * @raid_volume_lock: Spinlock for RAID volume list access
+ * @sas_id: SAS domain identifier
+ * @card: Main card topology node
+ * @exp_list: List of expander devices
+ * @enc_list: List of enclosure devices
+ * @sas_dev_list: List of SAS devices
+ * @sas_dev_init_list: List of SAS devices being initialized
+ * @raid_volume_list: List of RAID volumes
+ * @card_port_list: List of card ports
+ * @pd_hdls: Array of physical disk handles
+ * @dev_removing: Array tracking devices being removed
+ * @pending_dev_add: Array tracking devices pending addition
+ * @blocking_hdls: Array of blocking handles
+ */
+struct leapraid_dev_topo {
+ spinlock_t topo_node_lock;
+ spinlock_t sas_dev_lock;
+ spinlock_t raid_volume_lock;
+ int sas_id;
+ struct leapraid_topo_node card;
+ struct list_head exp_list;
+ struct list_head enc_list;
+ struct list_head sas_dev_list;
+ struct list_head sas_dev_init_list;
+ struct list_head raid_volume_list;
+ struct list_head card_port_list;
+ u16 pd_hdls_sz;
+ void *pd_hdls;
+ void *blocking_hdls;
+ u16 pending_dev_add_sz;
+ void *pending_dev_add;
+ u16 dev_removing_sz;
+ void *dev_removing;
+};
+
+/**
+ * struct leapraid_boot_dev - Boot device structure for LeapRaid
+ *
+ * @dev: Device pointer
+ * @chnl: Channel number
+ * @form: Form factor
+ * @pg_dev: Config page device content
+ */
+struct leapraid_boot_dev {
+ void *dev;
+ u8 chnl;
+ u8 form;
+ u8 pg_dev[24];
+};
+
+/**
+ * struct leapraid_boot_devs - Boot device management structure
+ * @requested_boot_dev: Requested primary boot device
+ * @requested_alt_boot_dev: Requested alternate boot device
+ * @current_boot_dev: Currently active boot device
+ */
+struct leapraid_boot_devs {
+ struct leapraid_boot_dev requested_boot_dev;
+ struct leapraid_boot_dev requested_alt_boot_dev;
+ struct leapraid_boot_dev current_boot_dev;
+};
+
+/**
+ * struct leapraid_smart_poll_desc - SMART polling descriptor
+ * @smart_poll_wq: Workqueue for SMART polling tasks
+ * @smart_poll_work: Delayed work for SMART polling operations
+ * @smart_poll_wq_name: Workqueue name string
+ */
+struct leapraid_smart_poll_desc {
+ struct workqueue_struct *smart_poll_wq;
+ struct delayed_work smart_poll_work;
+ char smart_poll_wq_name[48];
+};
+
+/**
+ * struct leapraid_adapter - Main LeapRaid adapter structure
+ * @list: List head for adapter management
+ * @shost: SCSI host structure
+ * @pdev: PCI device structure
+ * @iomem_base: I/O memory mapped base address
+ * @rep_msg_host_idx: Host index for reply messages
+ * @mask_int: Interrupt masking flag
+ * @timestamp_sync_cnt: Timestamp synchronization counter
+ * @adapter_attr: Adapter attributes
+ * @mem_desc: Memory descriptor
+ * @driver_cmds: Driver commands
+ * @dynamic_task_desc: Dynamic task descriptor
+ * @fw_evt_s: Firmware event structure
+ * @notification_desc: Notification descriptor
+ * @reset_desc: Reset descriptor
+ * @scan_dev_desc: Device scan descriptor
+ * @access_ctrl: Access control
+ * @fw_log_desc: Firmware log descriptor
+ * @dev_topo: Device topology
+ * @boot_devs: Boot devices
+ * @smart_poll_desc: SMART polling descriptor
+ */
+struct leapraid_adapter {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ struct pci_dev *pdev;
+ struct leapraid_reg_base __iomem *iomem_base;
+ u32 rep_msg_host_idx;
+ bool mask_int;
+ u32 timestamp_sync_cnt;
+
+ struct leapraid_adapter_attr adapter_attr;
+ struct leapraid_mem_desc mem_desc;
+ struct leapraid_driver_cmds driver_cmds;
+ struct leapraid_dynamic_task_desc dynamic_task_desc;
+ struct leapraid_fw_evt_struct fw_evt_s;
+ struct leapraid_notification_desc notification_desc;
+ struct leapraid_reset_desc reset_desc;
+ struct leapraid_scan_dev_desc scan_dev_desc;
+ struct leapraid_access_ctrl access_ctrl;
+ struct leapraid_fw_log_desc fw_log_desc;
+ struct leapraid_dev_topo dev_topo;
+ struct leapraid_boot_devs boot_devs;
+ struct leapraid_smart_poll_desc smart_poll_desc;
+};
+
+union cfg_param_1 {
+ u32 form;
+ u32 size;
+ u32 phy_number;
+};
+
+union cfg_param_2 {
+ u32 handle;
+ u32 form_specific;
+};
+
+enum config_page_action {
+ GET_BIOS_PG2,
+ GET_BIOS_PG3,
+ GET_SAS_DEVICE_PG0,
+ GET_SAS_IOUNIT_PG0,
+ GET_SAS_IOUNIT_PG1,
+ GET_SAS_EXPANDER_PG0,
+ GET_SAS_EXPANDER_PG1,
+ GET_SAS_ENCLOSURE_PG0,
+ GET_PHY_PG0,
+ GET_RAID_VOLUME_PG0,
+ GET_RAID_VOLUME_PG1,
+ GET_PHY_DISK_PG0,
+};
+
+/**
+ * struct leapraid_enc_node - Enclosure node structure
+ * @list: List head for enclosure management
+ * @pg0: Enclosure page 0 data
+ */
+struct leapraid_enc_node {
+ struct list_head list;
+ struct leapraid_enc_p0 pg0;
+};
+
+/**
+ * struct leapraid_raid_volume - RAID volume structure
+ * @list: List head for volume management
+ * @starget: SCSI target structure
+ * @sdev: SCSI device structure
+ * @id: Volume ID
+ * @channel: SCSI channel
+ * @wwid: World Wide Identifier
+ * @hdl: Volume handle
+ * @vol_type: Volume type
+ * @pd_num: Number of physical disks
+ * @resp: Response status
+ * @dev_info: Device information
+ */
+struct leapraid_raid_volume {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct scsi_device *sdev;
+ unsigned int id;
+ unsigned int channel;
+ u64 wwid;
+ u16 hdl;
+ u8 vol_type;
+ u8 pd_num;
+ u8 resp;
+ u32 dev_info;
+};
+
+#define LEAPRAID_TGT_FLG_RAID_MEMBER 0x01
+#define LEAPRAID_TGT_FLG_VOLUME 0x02
+#define LEAPRAID_NO_ULD_ATTACH 1
+/**
+ * struct leapraid_starget_priv - SCSI target private data
+ * @starget: SCSI target structure
+ * @sas_address: SAS address
+ * @hdl: Device handle
+ * @num_luns: Number of LUNs
+ * @flg: Flags
+ * @deleted: Deletion flag
+ * @tm_busy: Task management busy flag
+ * @card_port: Associated card port
+ * @sas_dev: SAS device structure
+ */
+struct leapraid_starget_priv {
+ struct scsi_target *starget;
+ u64 sas_address;
+ u16 hdl;
+ int num_luns;
+ u32 flg;
+ bool deleted;
+ bool tm_busy;
+ struct leapraid_card_port *card_port;
+ struct leapraid_sas_dev *sas_dev;
+};
+
+#define LEAPRAID_DEVICE_FLG_INIT 0x01
+/**
+ * struct leapraid_sdev_priv - SCSI device private data
+ * @starget_priv: Associated target private data
+ * @lun: Logical Unit Number
+ * @flg: Flags
+ * @block: Block flag
+ * @deleted: Deletion flag
+ * @sep: SEP flag
+ */
+struct leapraid_sdev_priv {
+ struct leapraid_starget_priv *starget_priv;
+ unsigned int lun;
+ u32 flg;
+ bool ncq;
+ bool block;
+ bool deleted;
+ bool sep;
+};
+
+/**
+ * struct leapraid_sas_dev - SAS device structure
+ * @list: List head for device management
+ * @starget: SCSI target structure
+ * @card_port: Associated card port
+ * @rphy: SAS remote PHY
+ * @refcnt: Reference count
+ * @id: Device ID
+ * @channel: SCSI channel
+ * @slot: Slot number
+ * @phy: PHY identifier
+ * @resp: Response status
+ * @led_on: LED state
+ * @sas_addr: SAS address
+ * @dev_name: Device name
+ * @hdl: Device handle
+ * @parent_sas_addr: Parent SAS address
+ * @enc_hdl: Enclosure handle
+ * @enc_lid: Enclosure logical ID
+ * @volume_hdl: Volume handle
+ * @volume_wwid: Volume WWID
+ * @dev_info: Device information
+ * @pend_sas_rphy_add: Pending SAS rphy addition flag
+ * @enc_level: Enclosure level
+ * @port_type: Port type
+ * @connector_name: Connector name
+ * @support_smart: SMART support flag
+ */
+struct leapraid_sas_dev {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct leapraid_card_port *card_port;
+ struct sas_rphy *rphy;
+ struct kref refcnt;
+ unsigned int id;
+ unsigned int channel;
+ u16 slot;
+ u8 phy;
+ bool resp;
+ bool led_on;
+ u64 sas_addr;
+ u64 dev_name;
+ u16 hdl;
+ u64 parent_sas_addr;
+ u16 enc_hdl;
+ u64 enc_lid;
+ u16 volume_hdl;
+ u64 volume_wwid;
+ u32 dev_info;
+ u8 pend_sas_rphy_add;
+ u8 enc_level;
+ u8 port_type;
+ u8 connector_name[5];
+ bool support_smart;
+};
+
+static inline void leapraid_sdev_free(struct kref *ref)
+{
+ kfree(container_of(ref, struct leapraid_sas_dev, refcnt));
+}
+
+#define leapraid_sdev_get(sdev) kref_get(&(sdev)->refcnt)
+#define leapraid_sdev_put(sdev) kref_put(&(sdev)->refcnt, leapraid_sdev_free)
+
+/**
+ * struct leapraid_sas_port - SAS port structure
+ * @port_list: List head for port management
+ * @phy_list: List of PHYs in this port
+ * @port: SAS port structure
+ * @card_port: Associated card port
+ * @remote_identify: Remote device identification
+ * @rphy: SAS remote PHY
+ * @phys_num: Number of PHYs in this port
+ */
+struct leapraid_sas_port {
+ struct list_head port_list;
+ struct list_head phy_list;
+ struct sas_port *port;
+ struct leapraid_card_port *card_port;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ u8 phys_num;
+};
+
+#define LEAPRAID_VPHY_FLG_DIRTY 0x01
+/**
+ * struct leapraid_vphy - Virtual PHY structure
+ * @list: List head for PHY management
+ * @sas_address: SAS address
+ * @phy_mask: PHY mask
+ * @flg: Flags
+ */
+struct leapraid_vphy {
+ struct list_head list;
+ u64 sas_address;
+ u32 phy_mask;
+ u8 flg;
+};
+
+struct leapraid_tgt_rst_list {
+ struct list_head list;
+ u16 handle;
+ u16 state;
+};
+
+struct leapraid_sc_list {
+ struct list_head list;
+ u16 handle;
+};
+
+struct sense_info {
+ u8 sense_key;
+ u8 asc;
+ u8 ascq;
+};
+
+struct leapraid_fw_log_info {
+ u32 user_position;
+ u32 adapter_position;
+};
+
+/**
+ * enum reset_type - Reset type enumeration
+ * @FULL_RESET: Full hardware reset
+ * @PART_RESET: Partial reset
+ */
+enum reset_type {
+ FULL_RESET,
+ PART_RESET,
+};
+
+enum leapraid_card_port_checking_flg {
+ CARD_PORT_FURTHER_CHECKING_NEEDED = 0,
+ CARD_PORT_SKIP_CHECKING,
+};
+
+enum leapraid_port_checking_state {
+ NEW_CARD_PORT = 0,
+ SAME_PORT_WITH_NOTHING_CHANGED,
+ SAME_PORT_WITH_PARTIALLY_CHANGED_PHYS,
+ SAME_ADDR_WITH_PARTIALLY_CHANGED_PHYS,
+ SAME_ADDR_ONLY,
+};
+
+/**
+ * struct leapraid_card_port_feature - Card port feature
+ * @dirty_flg: Dirty flag indicator
+ * @same_addr: Same address flag
+ * @exact_phy: Exact PHY match flag
+ * @phy_overlap: PHY overlap bitmap
+ * @same_port: Same port flag
+ * @cur_chking_old_port: Current checking old port
+ * @expected_old_port: Expected old port
+ * @same_addr_port_count: Same address port count
+ * @checking_state: Port checking state
+ */
+struct leapraid_card_port_feature {
+ u8 dirty_flg;
+ bool same_addr;
+ bool exact_phy;
+ u32 phy_overlap;
+ bool same_port;
+ struct leapraid_card_port *cur_chking_old_port;
+ struct leapraid_card_port *expected_old_port;
+ int same_addr_port_count;
+ enum leapraid_port_checking_state checking_state;
+};
+
+#define SMP_REPORT_MANUFACTURER_INFORMATION_FRAME_TYPE 0x40
+#define SMP_REPORT_MANUFACTURER_INFORMATION_FUNC 0x01
+
+/**
+ * ref: SAS-2(INCITS 457-2010) 10.4.3.5
+ */
+struct leapraid_rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 allocated_response_length;
+ u8 request_length;
+};
+
+/**
+ * ref: SAS-2(INCITS 457-2010) 10.4.3.5
+ */
+struct leapraid_rep_manu_reply {
+ u8 smp_frame_type;
+ u8 function;
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 r1[2];
+ u8 sas_format;
+ u8 r2[3];
+ u8 vendor_identification[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_identification[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_revision_level[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_identification[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_level;
+ u8 r3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * struct leapraid_scsi_cmd_desc - SCSI command descriptor
+ * @hdl: Device handle
+ * @lun: Logical Unit Number
+ * @raid_member: RAID member flag
+ * @dir: DMA data direction
+ * @data_length: Data transfer length
+ * @data_buffer: Data buffer pointer
+ * @cdb_length: CDB length
+ * @cdb: Command Descriptor Block
+ * @time_out: Timeout
+ */
+struct leapraid_scsi_cmd_desc {
+ u16 hdl;
+ u32 lun;
+ bool raid_member;
+ enum dma_data_direction dir;
+ u32 data_length;
+ void *data_buffer;
+ u8 cdb_length;
+ u8 cdb[32];
+ u8 time_out;
+};
+
+extern struct list_head leapraid_adapter_list;
+extern spinlock_t leapraid_adapter_lock;
+extern char driver_name[LEAPRAID_NAME_LENGTH];
+
+int leapraid_ctrl_init(struct leapraid_adapter *adapter);
+void leapraid_remove_ctrl(struct leapraid_adapter *adapter);
+void leapraid_check_scheduled_fault_start(struct leapraid_adapter *adapter);
+void leapraid_check_scheduled_fault_stop(struct leapraid_adapter *adapter);
+void leapraid_fw_log_start(struct leapraid_adapter *adapter);
+void leapraid_fw_log_stop(struct leapraid_adapter *adapter);
+int leapraid_set_pcie_and_notification(struct leapraid_adapter *adapter);
+void leapraid_disable_controller(struct leapraid_adapter *adapter);
+int leapraid_hard_reset_handler(struct leapraid_adapter *adapter,
+ enum reset_type type);
+void leapraid_mask_int(struct leapraid_adapter *adapter);
+void leapraid_unmask_int(struct leapraid_adapter *adapter);
+u32 leapraid_get_adapter_state(struct leapraid_adapter *adapter);
+bool leapraid_pci_removed(struct leapraid_adapter *adapter);
+int leapraid_check_adapter_is_op(struct leapraid_adapter *adapter);
+void *leapraid_get_task_desc(struct leapraid_adapter *adapter, u16 taskid);
+void *leapraid_get_sense_buffer(struct leapraid_adapter *adapter, u16 taskid);
+__le32 leapraid_get_sense_buffer_dma(struct leapraid_adapter *adapter,
+ u16 taskid);
+void *leapraid_get_reply_vaddr(struct leapraid_adapter *adapter,
+ u32 phys_addr);
+u16 leapraid_alloc_scsiio_taskid(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd);
+void leapraid_free_taskid(struct leapraid_adapter *adapter, u16 taskid);
+struct leapraid_io_req_tracker *leapraid_get_io_tracker_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid);
+struct leapraid_io_req_tracker *leapraid_get_scmd_priv(struct scsi_cmnd *scmd);
+struct scsi_cmnd *leapraid_get_scmd_from_taskid(
+ struct leapraid_adapter *adapter, u16 taskid);
+int leapraid_scan_dev(struct leapraid_adapter *adapter, bool async_scan_dev);
+void leapraid_scan_dev_done(struct leapraid_adapter *adapter);
+void leapraid_wait_cmds_done(struct leapraid_adapter *adapter);
+void leapraid_clean_active_scsi_cmds(struct leapraid_adapter *adapter);
+void leapraid_sync_irqs(struct leapraid_adapter *adapter, bool poll);
+int leapraid_rep_queue_handler(struct leapraid_rq *rq);
+void leapraid_mq_polling_pause(struct leapraid_adapter *adapter);
+void leapraid_mq_polling_resume(struct leapraid_adapter *adapter);
+void leapraid_set_tm_flg(struct leapraid_adapter *adapter, u16 handle);
+void leapraid_clear_tm_flg(struct leapraid_adapter *adapter, u16 handle);
+void leapraid_async_turn_on_led(struct leapraid_adapter *adapter, u16 handle);
+int leapraid_issue_locked_tm(struct leapraid_adapter *adapter, u16 handle,
+ uint channel, uint id, uint lun, u8 type,
+ u16 taskid_task, u8 tr_method);
+int leapraid_issue_tm(struct leapraid_adapter *adapter, u16 handle,
+ uint channel, uint id, uint lun, u8 type,
+ u16 taskid_task, u8 tr_method);
+u8 leapraid_scsiio_done(struct leapraid_adapter *adapter, u16 taskid,
+ u8 msix_index, u32 rep);
+int leapraid_get_volume_cap(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume);
+int leapraid_internal_init_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker);
+int leapraid_internal_exit_cmd_priv(struct leapraid_adapter *adapter,
+ struct leapraid_io_req_tracker *io_tracker);
+void leapraid_clean_active_fw_evt(struct leapraid_adapter *adapter);
+bool leapraid_scmd_find_by_lun(struct leapraid_adapter *adapter,
+ uint id, unsigned int lun, uint channel);
+bool leapraid_scmd_find_by_tgt(struct leapraid_adapter *adapter,
+ uint id, uint channel);
+struct leapraid_vphy *leapraid_get_vphy_by_phy(struct leapraid_card_port *port,
+ u32 phy);
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_id(
+ struct leapraid_adapter *adapter, uint id, uint channel);
+struct leapraid_raid_volume *leapraid_raid_volume_find_by_hdl(
+ struct leapraid_adapter *adapter, u16 handle);
+struct leapraid_topo_node *leapraid_exp_find_by_sas_address(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct leapraid_card_port *port);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct sas_rphy *rphy);
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct leapraid_card_port *port);
+struct leapraid_sas_dev *leapraid_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 handle);
+struct leapraid_sas_dev *leapraid_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_from_tgt(
+ struct leapraid_adapter *adapter,
+ struct leapraid_starget_priv *tgt_priv);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_hdl(
+ struct leapraid_adapter *adapter, u16 handle);
+struct leapraid_sas_dev *leapraid_hold_lock_get_sas_dev_by_addr(
+ struct leapraid_adapter *adapter, u64 sas_address,
+ struct leapraid_card_port *port);
+struct leapraid_sas_dev *leapraid_get_next_sas_dev_from_init_list(
+ struct leapraid_adapter *adapter);
+void leapraid_sas_dev_remove_by_sas_address(
+ struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port);
+void leapraid_sas_dev_remove(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev);
+void leapraid_raid_volume_remove(struct leapraid_adapter *adapter,
+ struct leapraid_raid_volume *raid_volume);
+void leapraid_exp_rm(struct leapraid_adapter *adapter,
+ u64 sas_address, struct leapraid_card_port *port);
+void leapraid_build_mpi_sg(struct leapraid_adapter *adapter,
+ void *sge, dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size);
+void leapraid_build_ieee_nodata_sg(struct leapraid_adapter *adapter,
+ void *sge);
+void leapraid_build_ieee_sg(struct leapraid_adapter *adapter,
+ void *psge, dma_addr_t h2c_dma_addr,
+ size_t h2c_size, dma_addr_t c2h_dma_addr,
+ size_t c2h_size);
+int leapraid_build_scmd_ieee_sg(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd, u16 taskid);
+void leapraid_fire_scsi_io(struct leapraid_adapter *adapter,
+ u16 taskid, u16 handle);
+void leapraid_fire_hpr_task(struct leapraid_adapter *adapter, u16 taskid,
+ u16 msix_task);
+void leapraid_fire_task(struct leapraid_adapter *adapter, u16 taskid);
+int leapraid_cfg_get_volume_hdl(struct leapraid_adapter *adapter,
+ u16 pd_handle, u16 *volume_handle);
+int leapraid_cfg_get_volume_wwid(struct leapraid_adapter *adapter,
+ u16 volume_handle, u64 *wwid);
+int leapraid_op_config_page(struct leapraid_adapter *adapter,
+ void *cfgp, union cfg_param_1 cfgp1,
+ union cfg_param_2 cfgp2,
+ enum config_page_action cfg_op);
+void leapraid_adjust_sdev_queue_depth(struct scsi_device *sdev, int qdepth);
+
+int leapraid_ctl_release(struct inode *inode, struct file *filep);
+void leapraid_ctl_init(void);
+void leapraid_ctl_exit(void);
+
+extern struct sas_function_template leapraid_transport_functions;
+extern struct scsi_transport_template *leapraid_transport_template;
+struct leapraid_sas_port *leapraid_transport_port_add(
+ struct leapraid_adapter *adapter, u16 handle, u64 sas_address,
+ struct leapraid_card_port *card_port);
+void leapraid_transport_port_remove(struct leapraid_adapter *adapter,
+ u64 sas_address, u64 sas_address_parent,
+ struct leapraid_card_port *card_port);
+void leapraid_transport_add_card_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_sas_phy_p0 *phy_pg0,
+ struct device *parent_dev);
+int leapraid_transport_add_exp_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_exp_p1 *exp_pg1,
+ struct device *parent_dev);
+void leapraid_transport_update_links(struct leapraid_adapter *adapter,
+ u64 sas_address, u16 handle,
+ u8 phy_number, u8 link_rate,
+ struct leapraid_card_port *card_port);
+void leapraid_transport_detach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_phy *card_phy);
+void leapraid_transport_attach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *sas_node,
+ struct leapraid_card_phy *card_phy,
+ u64 sas_address,
+ struct leapraid_card_port *card_port);
+int leapraid_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd);
+void leapraid_smart_polling_start(struct leapraid_adapter *adapter);
+void leapraid_smart_polling_stop(struct leapraid_adapter *adapter);
+void leapraid_smart_fault_detect(struct leapraid_adapter *adapter, u16 hdl);
+void leapraid_free_internal_scsi_cmd(struct leapraid_adapter *adapter);
+
+#endif /* LEAPRAID_FUNC_H_INCLUDED */
diff --git a/drivers/scsi/leapraid/leapraid_os.c b/drivers/scsi/leapraid/leapraid_os.c
new file mode 100644
index 000000000000..44ec2615648f
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_os.c
@@ -0,0 +1,2271 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <linux/module.h>
+
+#include "leapraid_func.h"
+#include "leapraid.h"
+
+LIST_HEAD(leapraid_adapter_list);
+DEFINE_SPINLOCK(leapraid_adapter_lock);
+
+MODULE_AUTHOR(LEAPRAID_AUTHOR);
+MODULE_DESCRIPTION(LEAPRAID_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LEAPRAID_DRIVER_VERSION);
+
+static int leapraid_ids;
+
+static int open_pcie_trace = 1;
+module_param(open_pcie_trace, int, 0644);
+MODULE_PARM_DESC(open_pcie_trace, "open_pcie_trace: default=1(open)/0(close)");
+
+static int enable_mp = 1;
+module_param(enable_mp, int, 0444);
+MODULE_PARM_DESC(enable_mp,
+ "enable multipath on target device. default=1(enable)");
+
+static inline void leapraid_get_sense_data(char *sense,
+ struct sense_info *data)
+{
+ bool desc_format = (sense[0] & SCSI_SENSE_RESPONSE_CODE_MASK) >=
+ DESC_FORMAT_THRESHOLD;
+
+ if (desc_format) {
+ data->sense_key = sense[1] & SENSE_KEY_MASK;
+ data->asc = sense[2];
+ data->ascq = sense[3];
+ } else {
+ data->sense_key = sense[2] & SENSE_KEY_MASK;
+ data->asc = sense[12];
+ data->ascq = sense[13];
+ }
+}
+
+static struct Scsi_Host *pdev_to_shost(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+static struct leapraid_adapter *pdev_to_adapter(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+
+ if (!shost)
+ return NULL;
+
+ return shost_priv(shost);
+}
+
+struct leapraid_io_req_tracker *leapraid_get_scmd_priv(struct scsi_cmnd *scmd)
+{
+ return (struct leapraid_io_req_tracker *)scmd->host_scribble;
+}
+
+void leapraid_set_tm_flg(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+ bool skip = false;
+
+ /* don't break out of the loop */
+ shost_for_each_device(sdev, adapter->shost) {
+ if (skip)
+ continue;
+
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl == hdl) {
+ sdev_priv->starget_priv->tm_busy = true;
+ skip = true;
+ }
+ }
+}
+
+void leapraid_clear_tm_flg(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_sdev_priv *sdev_priv;
+ struct scsi_device *sdev;
+ bool skip = false;
+
+ /* don't break out of the loop */
+ shost_for_each_device(sdev, adapter->shost) {
+ if (skip)
+ continue;
+
+ sdev_priv = sdev->hostdata;
+ if (!sdev_priv)
+ continue;
+
+ if (sdev_priv->starget_priv->hdl == hdl) {
+ sdev_priv->starget_priv->tm_busy = false;
+ skip = true;
+ }
+ }
+}
+
+static int leapraid_tm_cmd_map_status(struct leapraid_adapter *adapter,
+ uint channel,
+ uint id,
+ uint lun,
+ u8 type,
+ u16 taskid_task)
+{
+ int rc = FAILED;
+
+ if (taskid_task <= adapter->shost->can_queue) {
+ switch (type) {
+ case LEAPRAID_TM_TASKTYPE_ABRT_TASK_SET:
+ case LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!leapraid_scmd_find_by_lun(adapter, id, lun,
+ channel))
+ rc = SUCCESS;
+ break;
+ case LEAPRAID_TM_TASKTYPE_TARGET_RESET:
+ if (!leapraid_scmd_find_by_tgt(adapter, id, channel))
+ rc = SUCCESS;
+ break;
+ default:
+ rc = SUCCESS;
+ }
+ }
+
+ if (taskid_task == adapter->driver_cmds.driver_scsiio_cmd.taskid) {
+ if ((adapter->driver_cmds.driver_scsiio_cmd.status &
+ LEAPRAID_CMD_DONE) ||
+ (adapter->driver_cmds.driver_scsiio_cmd.status &
+ LEAPRAID_CMD_NOT_USED))
+ rc = SUCCESS;
+ }
+
+ if (taskid_task == adapter->driver_cmds.ctl_cmd.hp_taskid) {
+ if ((adapter->driver_cmds.ctl_cmd.status &
+ LEAPRAID_CMD_DONE) ||
+ (adapter->driver_cmds.ctl_cmd.status &
+ LEAPRAID_CMD_NOT_USED))
+ rc = SUCCESS;
+ }
+
+ return rc;
+}
+
+static int leapraid_tm_post_processing(struct leapraid_adapter *adapter,
+ u16 hdl, uint channel, uint id,
+ uint lun, u8 type, u16 taskid_task)
+{
+ int rc;
+
+ rc = leapraid_tm_cmd_map_status(adapter, channel, id, lun,
+ type, taskid_task);
+ if (rc == SUCCESS)
+ return rc;
+
+ leapraid_mask_int(adapter);
+ leapraid_sync_irqs(adapter, true);
+ leapraid_unmask_int(adapter);
+
+ rc = leapraid_tm_cmd_map_status(adapter, channel, id, lun, type,
+ taskid_task);
+ return rc;
+}
+
+static void leapraid_build_tm_req(struct leapraid_scsi_tm_req *scsi_tm_req,
+ u16 hdl, uint lun, u8 type, u8 tr_method,
+ u16 target_taskid)
+{
+ memset(scsi_tm_req, 0, sizeof(*scsi_tm_req));
+ scsi_tm_req->func = LEAPRAID_FUNC_SCSI_TMF;
+ scsi_tm_req->dev_hdl = cpu_to_le16(hdl);
+ scsi_tm_req->task_type = type;
+ scsi_tm_req->msg_flg = tr_method;
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK ||
+ type == LEAPRAID_TM_TASKTYPE_QUERY_TASK)
+ scsi_tm_req->task_mid = cpu_to_le16(target_taskid);
+ int_to_scsilun(lun, (struct scsi_lun *)scsi_tm_req->lun);
+}
+
+int leapraid_issue_tm(struct leapraid_adapter *adapter, u16 hdl, uint channel,
+ uint id, uint lun, u8 type,
+ u16 target_taskid, u8 tr_method)
+{
+ struct leapraid_scsi_tm_req *scsi_tm_req;
+ struct leapraid_scsiio_req *scsiio_req;
+ struct leapraid_io_req_tracker *io_req_tracker = NULL;
+ u16 msix_task = 0;
+ bool issue_reset = false;
+ u32 db;
+ int rc;
+
+ lockdep_assert_held(&adapter->driver_cmds.tm_cmd.mutex);
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.host_removing ||
+ adapter->access_ctrl.pcie_recovering) {
+ dev_info(&adapter->pdev->dev,
+ "%s %s: host is recovering, skip tm command!\n",
+ __func__, adapter->adapter_attr.name);
+ return FAILED;
+ }
+
+ db = leapraid_readl(&adapter->iomem_base->db);
+ if (db & LEAPRAID_DB_USED) {
+ dev_info(&adapter->pdev->dev,
+ "%s unexpected db status, issuing hard reset!\n",
+ adapter->adapter_attr.name);
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ return (!rc) ? SUCCESS : FAILED;
+ }
+
+ if ((db & LEAPRAID_DB_MASK) == LEAPRAID_DB_FAULT) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ return (!rc) ? SUCCESS : FAILED;
+ }
+
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK)
+ io_req_tracker = leapraid_get_io_tracker_from_taskid(adapter,
+ target_taskid);
+
+ adapter->driver_cmds.tm_cmd.status = LEAPRAID_CMD_PENDING;
+ scsi_tm_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.tm_cmd.hp_taskid);
+ leapraid_build_tm_req(scsi_tm_req, hdl, lun, type, tr_method,
+ target_taskid);
+ memset((void *)(&adapter->driver_cmds.tm_cmd.reply), 0,
+ sizeof(struct leapraid_scsi_tm_rep));
+ leapraid_set_tm_flg(adapter, hdl);
+ init_completion(&adapter->driver_cmds.tm_cmd.done);
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK &&
+ io_req_tracker &&
+ io_req_tracker->msix_io < adapter->adapter_attr.rq_cnt)
+ msix_task = io_req_tracker->msix_io;
+ else
+ msix_task = 0;
+ leapraid_fire_hpr_task(adapter,
+ adapter->driver_cmds.tm_cmd.hp_taskid,
+ msix_task);
+ wait_for_completion_timeout(&adapter->driver_cmds.tm_cmd.done,
+ LEAPRAID_TM_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.tm_cmd.status & LEAPRAID_CMD_DONE)) {
+ issue_reset =
+ leapraid_check_reset(
+ adapter->driver_cmds.tm_cmd.status);
+ if (issue_reset) {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto out;
+ }
+ }
+
+ leapraid_sync_irqs(adapter, false);
+
+ switch (type) {
+ case LEAPRAID_TM_TASKTYPE_TARGET_RESET:
+ case LEAPRAID_TM_TASKTYPE_ABRT_TASK_SET:
+ case LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET:
+ rc = leapraid_tm_post_processing(adapter, hdl, channel, id, lun,
+ type, target_taskid);
+ break;
+ case LEAPRAID_TM_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ scsiio_req = leapraid_get_task_desc(adapter, target_taskid);
+ if (le16_to_cpu(scsiio_req->dev_hdl) != hdl)
+ break;
+ dev_err(&adapter->pdev->dev, "%s abort failed, hdl=0x%04x\n",
+ adapter->adapter_attr.name, hdl);
+ rc = FAILED;
+ break;
+ case LEAPRAID_TM_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+out:
+ leapraid_clear_tm_flg(adapter, hdl);
+ adapter->driver_cmds.tm_cmd.status = LEAPRAID_CMD_NOT_USED;
+ return rc;
+}
+
+int leapraid_issue_locked_tm(struct leapraid_adapter *adapter, u16 hdl,
+ uint channel, uint id, uint lun, u8 type,
+ u16 target_taskid, u8 tr_method)
+{
+ int rc;
+
+ mutex_lock(&adapter->driver_cmds.tm_cmd.mutex);
+ rc = leapraid_issue_tm(adapter, hdl, channel, id, lun, type,
+ target_taskid, tr_method);
+ mutex_unlock(&adapter->driver_cmds.tm_cmd.mutex);
+
+ return rc;
+}
+
+void leapraid_smart_fault_detect(struct leapraid_adapter *adapter, u16 hdl)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_target *starget;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_hdl(adapter, hdl);
+ if (!sas_dev) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ goto out;
+ }
+
+ starget = sas_dev->starget;
+ starget_priv = starget->hostdata;
+ if ((starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER) ||
+ (starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME)) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ goto out;
+ }
+
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ leapraid_async_turn_on_led(adapter, hdl);
+out:
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+}
+
+static void leapraid_process_sense_data(struct leapraid_adapter *adapter,
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct scsi_cmnd *scmd, u16 taskid)
+{
+ struct sense_info data;
+ const void *sense_data;
+ u32 sz;
+
+ if (!(scsiio_rep->scsi_state & LEAPRAID_SCSI_STATE_AUTOSENSE_VALID))
+ return;
+
+ sense_data = leapraid_get_sense_buffer(adapter, taskid);
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_rep->sense_count));
+
+ memcpy(scmd->sense_buffer, sense_data, sz);
+ leapraid_get_sense_data(scmd->sense_buffer, &data);
+ if (data.asc == ASC_FAILURE_PREDICTION_THRESHOLD_EXCEEDED)
+ leapraid_smart_fault_detect(adapter,
+ le16_to_cpu(scsiio_rep->dev_hdl));
+}
+
+static void leapraid_handle_data_underrun(
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct scsi_cmnd *scmd, u32 xfer_cnt)
+{
+ u8 scsi_status = scsiio_rep->scsi_status;
+ u8 scsi_state = scsiio_rep->scsi_state;
+
+ scmd->result = (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) | scsi_status;
+
+ if (scsi_state & LEAPRAID_SCSI_STATE_AUTOSENSE_VALID)
+ return;
+
+ if (xfer_cnt < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR <<
+ LEAPRAID_SCSI_HOST_SHIFT;
+ } else if (scsi_state & (LEAPRAID_SCSI_STATE_AUTOSENSE_FAILED |
+ LEAPRAID_SCSI_STATE_NO_SCSI_STATUS)) {
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ } else if (scsi_state & LEAPRAID_SCSI_STATE_TERMINATED) {
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ } else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
+ scsiio_rep->scsi_state = LEAPRAID_SCSI_STATE_AUTOSENSE_VALID;
+ scsiio_rep->scsi_status = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ LEAPRAID_SCSI_ASC_INVALID_CMD_CODE,
+ LEAPRAID_SCSI_ASCQ_DEFAULT);
+ scmd->result = (DRIVER_SENSE << LEAPRAID_SCSI_DRIVER_SHIFT) |
+ (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) |
+ SAM_STAT_CHECK_CONDITION;
+ }
+}
+
+static void leapraid_handle_success_status(
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct scsi_cmnd *scmd,
+ u32 response_code)
+{
+ u8 scsi_status = scsiio_rep->scsi_status;
+ u8 scsi_state = scsiio_rep->scsi_state;
+
+ scmd->result = (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) | scsi_status;
+
+ if (response_code == LEAPRAID_TM_RSP_INVALID_FRAME ||
+ (scsi_state & (LEAPRAID_SCSI_STATE_AUTOSENSE_FAILED |
+ LEAPRAID_SCSI_STATE_NO_SCSI_STATUS)))
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ else if (scsi_state & LEAPRAID_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+}
+
+static void leapraid_scsiio_done_dispatch(struct leapraid_adapter *adapter,
+ struct leapraid_scsiio_rep *scsiio_rep,
+ struct leapraid_sdev_priv *sdev_priv,
+ struct scsi_cmnd *scmd,
+ u16 taskid, u32 response_code)
+{
+ u8 scsi_status = scsiio_rep->scsi_status;
+ u8 scsi_state = scsiio_rep->scsi_state;
+ u16 adapter_status;
+ u32 xfer_cnt;
+ u32 sz;
+
+ adapter_status = le16_to_cpu(scsiio_rep->adapter_status) &
+ LEAPRAID_ADAPTER_STATUS_MASK;
+
+ xfer_cnt = le32_to_cpu(scsiio_rep->transfer_count);
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
+
+ if (adapter_status == LEAPRAID_ADAPTER_STATUS_SCSI_DATA_UNDERRUN &&
+ xfer_cnt == 0 &&
+ (scsi_status == LEAPRAID_SCSI_STATUS_BUSY ||
+ scsi_status == LEAPRAID_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == LEAPRAID_SCSI_STATUS_TASK_SET_FULL)) {
+ adapter_status = LEAPRAID_ADAPTER_STATUS_SUCCESS;
+ }
+
+ switch (adapter_status) {
+ case LEAPRAID_ADAPTER_STATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_BUSY:
+ case LEAPRAID_ADAPTER_STATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_RESIDUAL_MISMATCH:
+ if (xfer_cnt == 0 || scmd->underflow > xfer_cnt)
+ scmd->result = DID_SOFT_ERROR <<
+ LEAPRAID_SCSI_HOST_SHIFT;
+ else
+ scmd->result = (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) |
+ scsi_status;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_ADAPTER_TERMINATED:
+ if (sdev_priv->block) {
+ scmd->result = DID_TRANSPORT_DISRUPTED <<
+ LEAPRAID_SCSI_HOST_SHIFT;
+ return;
+ }
+
+ if (scmd->device->channel == RAID_CHANNEL &&
+ scsi_state == (LEAPRAID_SCSI_STATE_TERMINATED |
+ LEAPRAID_SCSI_STATE_NO_SCSI_STATUS)) {
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+ }
+
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_TASK_TERMINATED:
+ case LEAPRAID_ADAPTER_STATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_DATA_UNDERRUN:
+ leapraid_handle_data_underrun(scsiio_rep, scmd, xfer_cnt);
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ leapraid_handle_success_status(scsiio_rep, scmd,
+ response_code);
+ break;
+ case LEAPRAID_ADAPTER_STATUS_SCSI_RECOVERED_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_SUCCESS:
+ leapraid_handle_success_status(scsiio_rep, scmd,
+ response_code);
+ break;
+
+ case LEAPRAID_ADAPTER_STATUS_SCSI_PROTOCOL_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_INTERNAL_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_SCSI_IO_DATA_ERROR:
+ case LEAPRAID_ADAPTER_STATUS_SCSI_TASK_MGMT_FAILED:
+ default:
+ scmd->result = DID_SOFT_ERROR << LEAPRAID_SCSI_HOST_SHIFT;
+ break;
+ }
+
+ if (!scmd->result)
+ return;
+
+ scsi_print_command(scmd);
+ dev_warn(&adapter->pdev->dev,
+ "scsiio warn: hdl=0x%x, status are: 0x%x, 0x%x, 0x%x\n",
+ le16_to_cpu(scsiio_rep->dev_hdl), adapter_status,
+ scsi_status, scsi_state);
+
+ if (scsi_state & LEAPRAID_SCSI_STATE_AUTOSENSE_VALID) {
+ struct scsi_sense_hdr sshdr;
+
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_rep->sense_count));
+ if (scsi_normalize_sense(scmd->sense_buffer, sz,
+ &sshdr)) {
+ dev_warn(&adapter->pdev->dev,
+ "sense: key=0x%x asc=0x%x ascq=0x%x\n",
+ sshdr.sense_key, sshdr.asc,
+ sshdr.ascq);
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "sense: invalid sense data\n");
+ }
+ }
+}
+
+u8 leapraid_scsiio_done(struct leapraid_adapter *adapter, u16 taskid,
+ u8 msix_index, u32 rep)
+{
+ struct leapraid_scsiio_rep *scsiio_rep = NULL;
+ struct leapraid_sdev_priv *sdev_priv = NULL;
+ struct scsi_cmnd *scmd = NULL;
+ u32 response_code = 0;
+
+ if (likely(taskid != adapter->driver_cmds.driver_scsiio_cmd.taskid))
+ scmd = leapraid_get_scmd_from_taskid(adapter, taskid);
+ else
+ scmd = adapter->driver_cmds.internal_scmd;
+ if (!scmd)
+ return 1;
+
+ scsiio_rep = leapraid_get_reply_vaddr(adapter, rep);
+ if (!scsiio_rep) {
+ scmd->result = DID_OK << LEAPRAID_SCSI_HOST_SHIFT;
+ goto out;
+ }
+
+ sdev_priv = scmd->device->hostdata;
+ if (!sdev_priv ||
+ !sdev_priv->starget_priv ||
+ sdev_priv->starget_priv->deleted) {
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+ goto out;
+ }
+
+ if (scsiio_rep->scsi_state & LEAPRAID_SCSI_STATE_RESPONSE_INFO_VALID)
+ response_code = le32_to_cpu(scsiio_rep->resp_info) & 0xFF;
+
+ leapraid_process_sense_data(adapter, scsiio_rep, scmd, taskid);
+ leapraid_scsiio_done_dispatch(adapter, scsiio_rep, sdev_priv, scmd,
+ taskid, response_code);
+
+out:
+ scsi_dma_unmap(scmd);
+ if (unlikely(taskid == adapter->driver_cmds.driver_scsiio_cmd.taskid)) {
+ adapter->driver_cmds.driver_scsiio_cmd.status =
+ LEAPRAID_CMD_DONE;
+ complete(&adapter->driver_cmds.driver_scsiio_cmd.done);
+ return 0;
+ }
+ leapraid_free_taskid(adapter, taskid);
+ scmd->scsi_done(scmd);
+ return 0;
+}
+
+static void leapraid_probe_raid(struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_volume *raid_volume, *raid_volume_next;
+ int rc;
+
+ list_for_each_entry_safe(raid_volume, raid_volume_next,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (raid_volume->starget)
+ continue;
+
+ rc = scsi_add_device(adapter->shost, RAID_CHANNEL,
+ raid_volume->id, 0);
+ if (rc)
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ }
+}
+
+static void leapraid_sas_dev_make_active(struct leapraid_adapter *adapter,
+ struct leapraid_sas_dev *sas_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ if (!list_empty(&sas_dev->list)) {
+ list_del_init(&sas_dev->list);
+ leapraid_sdev_put(sas_dev);
+ }
+
+ leapraid_sdev_get(sas_dev);
+ list_add_tail(&sas_dev->list, &adapter->dev_topo.sas_dev_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_probe_sas(struct leapraid_adapter *adapter)
+{
+ struct leapraid_sas_dev *sas_dev;
+ bool added;
+
+ for (;;) {
+ sas_dev = leapraid_get_next_sas_dev_from_init_list(adapter);
+ if (!sas_dev)
+ break;
+
+ added = leapraid_transport_port_add(adapter,
+ sas_dev->hdl,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+
+ if (!added)
+ goto remove_dev;
+
+ if (!sas_dev->starget &&
+ !adapter->scan_dev_desc.driver_loading) {
+ leapraid_transport_port_remove(adapter,
+ sas_dev->sas_addr,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ goto remove_dev;
+ }
+
+ leapraid_sas_dev_make_active(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ continue;
+
+remove_dev:
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+}
+
+static bool leapraid_get_boot_dev(struct leapraid_boot_dev *boot_dev,
+ void **pdev, u32 *pchnl)
+{
+ if (boot_dev->dev) {
+ *pdev = boot_dev->dev;
+ *pchnl = boot_dev->chnl;
+ return true;
+ }
+ return false;
+}
+
+static void leapraid_probe_boot_dev(struct leapraid_adapter *adapter)
+{
+ void *dev = NULL;
+ u32 chnl;
+
+ if (leapraid_get_boot_dev(&adapter->boot_devs.requested_boot_dev, &dev,
+ &chnl))
+ goto boot_dev_found;
+
+ if (leapraid_get_boot_dev(&adapter->boot_devs.requested_alt_boot_dev,
+ &dev, &chnl))
+ goto boot_dev_found;
+
+ if (leapraid_get_boot_dev(&adapter->boot_devs.current_boot_dev, &dev,
+ &chnl))
+ goto boot_dev_found;
+
+ return;
+
+boot_dev_found:
+ switch (chnl) {
+ case RAID_CHANNEL:
+ {
+ struct leapraid_raid_volume *raid_volume =
+ (struct leapraid_raid_volume *)dev;
+
+ if (raid_volume->starget)
+ return;
+
+ /* TODO eedp */
+
+ if (scsi_add_device(adapter->shost, RAID_CHANNEL,
+ raid_volume->id, 0))
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ break;
+ }
+ default:
+ {
+ struct leapraid_sas_dev *sas_dev =
+ (struct leapraid_sas_dev *)dev;
+ struct leapraid_sas_port *sas_port;
+ unsigned long flags;
+
+ if (sas_dev->starget)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ list_move_tail(&sas_dev->list,
+ &adapter->dev_topo.sas_dev_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ if (!sas_dev->card_port)
+ return;
+
+ sas_port = leapraid_transport_port_add(adapter, sas_dev->hdl,
+ sas_dev->parent_sas_addr,
+ sas_dev->card_port);
+ if (!sas_port)
+ leapraid_sas_dev_remove(adapter, sas_dev);
+ break;
+ }
+ }
+}
+
+static void leapraid_probe_devices(struct leapraid_adapter *adapter)
+{
+ leapraid_probe_boot_dev(adapter);
+
+ if (adapter->adapter_attr.raid_support) {
+ leapraid_probe_raid(adapter);
+ leapraid_probe_sas(adapter);
+ } else {
+ leapraid_probe_sas(adapter);
+ }
+}
+
+void leapraid_scan_dev_done(struct leapraid_adapter *adapter)
+{
+ if (adapter->scan_dev_desc.wait_scan_dev_done) {
+ adapter->scan_dev_desc.wait_scan_dev_done = false;
+ leapraid_probe_devices(adapter);
+ }
+
+ leapraid_check_scheduled_fault_start(adapter);
+ leapraid_fw_log_start(adapter);
+ adapter->scan_dev_desc.driver_loading = false;
+ leapraid_smart_polling_start(adapter);
+}
+
+static void leapraid_ir_shutdown(struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_act_req *raid_act_req;
+ struct leapraid_raid_act_rep *raid_act_rep;
+ struct leapraid_driver_cmd *raid_action_cmd;
+
+ if (!adapter || !adapter->adapter_attr.raid_support)
+ return;
+
+ if (list_empty(&adapter->dev_topo.raid_volume_list))
+ return;
+
+ if (leapraid_pci_removed(adapter))
+ return;
+
+ raid_action_cmd = &adapter->driver_cmds.raid_action_cmd;
+
+ mutex_lock(&raid_action_cmd->mutex);
+ raid_action_cmd->status = LEAPRAID_CMD_PENDING;
+
+ raid_act_req = leapraid_get_task_desc(adapter,
+ raid_action_cmd->inter_taskid);
+ memset(raid_act_req, 0, sizeof(struct leapraid_raid_act_req));
+ raid_act_req->func = LEAPRAID_FUNC_RAID_ACTION;
+ raid_act_req->act = LEAPRAID_RAID_ACT_SYSTEM_SHUTDOWN_INITIATED;
+
+ dev_info(&adapter->pdev->dev, "ir shutdown start\n");
+ init_completion(&raid_action_cmd->done);
+ leapraid_fire_task(adapter, raid_action_cmd->inter_taskid);
+ wait_for_completion_timeout(&raid_action_cmd->done,
+ LEAPRAID_RAID_ACTION_CMD_TIMEOUT * HZ);
+
+ if (!(raid_action_cmd->status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: timeout waiting for ir shutdown\n", __func__);
+ goto out;
+ }
+
+ if (raid_action_cmd->status & LEAPRAID_CMD_REPLY_VALID) {
+ raid_act_rep = (void *)(&raid_action_cmd->reply);
+ dev_info(&adapter->pdev->dev,
+ "ir shutdown done, adapter status=0x%04x\n",
+ le16_to_cpu(raid_act_rep->adapter_status));
+ }
+
+out:
+ raid_action_cmd->status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&raid_action_cmd->mutex);
+}
+
+static const struct pci_device_id leapraid_pci_table[] = {
+ { PCI_DEVICE(LEAPRAID_VENDOR_ID, LEAPRAID_DEVID_HBA) },
+ { PCI_DEVICE(LEAPRAID_VENDOR_ID, LEAPRAID_DEVID_RAID) },
+ { 0, }
+};
+
+static inline bool leapraid_is_scmd_permitted(struct leapraid_adapter *adapter,
+ struct scsi_cmnd *scmd)
+{
+ u8 opcode;
+
+ if (adapter->access_ctrl.pcie_recovering ||
+ adapter->access_ctrl.adapter_thermal_alert)
+ return false;
+
+ if (adapter->access_ctrl.host_removing) {
+ if (leapraid_pci_removed(adapter))
+ return false;
+
+ opcode = scmd->cmnd[0];
+ if (opcode == SYNCHRONIZE_CACHE || opcode == START_STOP)
+ return true;
+ else
+ return false;
+ }
+ return true;
+}
+
+static bool leapraid_should_queuecommand(struct leapraid_adapter *adapter,
+ struct leapraid_sdev_priv *sdev_priv,
+ struct scsi_cmnd *scmd, int *rc)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ if (!sdev_priv || !sdev_priv->starget_priv)
+ goto no_connect;
+
+ if (!leapraid_is_scmd_permitted(adapter, scmd))
+ goto no_connect;
+
+ starget_priv = sdev_priv->starget_priv;
+ if (starget_priv->hdl == LEAPRAID_INVALID_DEV_HANDLE)
+ goto no_connect;
+
+ if (sdev_priv->block &&
+ scmd->device->host->shost_state == SHOST_RECOVERY &&
+ scmd->cmnd[0] == TEST_UNIT_READY) {
+ scsi_build_sense_buffer(0, scmd->sense_buffer, UNIT_ATTENTION,
+ LEAPRAID_SCSI_ASC_POWER_ON_RESET,
+ LEAPRAID_SCSI_ASCQ_POWER_ON_RESET);
+ scmd->result = (DRIVER_SENSE << LEAPRAID_SCSI_DRIVER_SHIFT) |
+ (DID_OK << LEAPRAID_SCSI_HOST_SHIFT) |
+ SAM_STAT_CHECK_CONDITION;
+ goto done_out;
+ }
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->reset_desc.adapter_link_resetting) {
+ *rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ } else if (starget_priv->deleted || sdev_priv->deleted) {
+ goto no_connect;
+ } else if (starget_priv->tm_busy || sdev_priv->block) {
+ *rc = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
+ return true;
+
+no_connect:
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+done_out:
+ if (likely(scmd != adapter->driver_cmds.internal_scmd))
+ scmd->scsi_done(scmd);
+out:
+ return false;
+}
+
+static u32 build_scsiio_req_control(struct scsi_cmnd *scmd,
+ struct leapraid_sdev_priv *sdev_priv)
+{
+ u32 control;
+
+ switch (scmd->sc_data_direction) {
+ case DMA_FROM_DEVICE:
+ control = LEAPRAID_SCSIIO_CTRL_READ;
+ break;
+ case DMA_TO_DEVICE:
+ control = LEAPRAID_SCSIIO_CTRL_WRITE;
+ break;
+ default:
+ control = LEAPRAID_SCSIIO_CTRL_NODATATRANSFER;
+ break;
+ }
+
+ control |= LEAPRAID_SCSIIO_CTRL_SIMPLEQ;
+
+ if (sdev_priv->ncq &&
+ (IOPRIO_PRIO_CLASS(req_get_ioprio(scmd->request)) ==
+ IOPRIO_CLASS_RT))
+ control |= LEAPRAID_SCSIIO_CTRL_CMDPRI;
+ if (scmd->cmd_len == 32)
+ control |= 4 << LEAPRAID_SCSIIO_CTRL_ADDCDBLEN_SHIFT;
+
+ return control;
+}
+
+int leapraid_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct leapraid_adapter *adapter = shost_priv(scmd->device->host);
+ struct leapraid_sdev_priv *sdev_priv = scmd->device->hostdata;
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_scsiio_req *scsiio_req;
+ u32 control;
+ u16 taskid;
+ u16 hdl;
+ int rc = 0;
+
+ if (!leapraid_should_queuecommand(adapter, sdev_priv, scmd, &rc))
+ goto out;
+
+ starget_priv = sdev_priv->starget_priv;
+ hdl = starget_priv->hdl;
+ control = build_scsiio_req_control(scmd, sdev_priv);
+
+ if (unlikely(scmd == adapter->driver_cmds.internal_scmd))
+ taskid = adapter->driver_cmds.driver_scsiio_cmd.taskid;
+ else
+ taskid = leapraid_alloc_scsiio_taskid(adapter, scmd);
+ scsiio_req = leapraid_get_task_desc(adapter, taskid);
+
+ scsiio_req->func = LEAPRAID_FUNC_SCSIIO_REQ;
+ if (sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER)
+ scsiio_req->func = LEAPRAID_FUNC_RAID_SCSIIO_PASSTHROUGH;
+ else
+ scsiio_req->func = LEAPRAID_FUNC_SCSIIO_REQ;
+
+ scsiio_req->dev_hdl = cpu_to_le16(hdl);
+ scsiio_req->data_len = cpu_to_le32(scsi_bufflen(scmd));
+ scsiio_req->ctrl = cpu_to_le32(control);
+ scsiio_req->io_flg = cpu_to_le16(scmd->cmd_len);
+ scsiio_req->msg_flg = 0;
+ scsiio_req->sense_buffer_len = SCSI_SENSE_BUFFERSIZE;
+ scsiio_req->sense_buffer_low_add =
+ leapraid_get_sense_buffer_dma(adapter, taskid);
+ scsiio_req->sgl_offset0 =
+ offsetof(struct leapraid_scsiio_req, sgl) /
+ LEAPRAID_DWORDS_BYTE_SIZE;
+ int_to_scsilun(sdev_priv->lun, (struct scsi_lun *)scsiio_req->lun);
+ memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
+ if (scsiio_req->data_len) {
+ if (leapraid_build_scmd_ieee_sg(adapter, scmd, taskid)) {
+ leapraid_free_taskid(adapter, taskid);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ } else {
+ leapraid_build_ieee_nodata_sg(adapter, &scsiio_req->sgl);
+ }
+
+ if (likely(scsiio_req->func == LEAPRAID_FUNC_SCSIIO_REQ)) {
+ leapraid_fire_scsi_io(adapter, taskid,
+ le16_to_cpu(scsiio_req->dev_hdl));
+ } else {
+ leapraid_fire_task(adapter, taskid);
+ }
+ dev_dbg(&adapter->pdev->dev,
+ "LEAPRAID_SCSIIO: Send Descriptor taskid %d, req type 0x%x\n",
+ taskid, scsiio_req->func);
+out:
+ return rc;
+}
+
+
+static int leapraid_error_handler(struct scsi_cmnd *scmd,
+ const char *str, u8 type)
+{
+ struct leapraid_adapter *adapter = shost_priv(scmd->device->host);
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct leapraid_starget_priv *starget_priv = starget->hostdata;
+ struct leapraid_io_req_tracker *io_req_tracker = NULL;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_sas_dev *sas_dev = NULL;
+ u16 hdl;
+ int rc;
+
+ dev_info(&adapter->pdev->dev,
+ "EH enter: type=%s, scmd=0x%p, req tag=%d\n", str, scmd,
+ scmd->request->tag);
+ scsi_print_command(scmd);
+
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK) {
+ io_req_tracker = leapraid_get_scmd_priv(scmd);
+ dev_info(&adapter->pdev->dev,
+ "EH ABORT: scmd=0x%p, pending=%u ms, tout=%u ms, req tag=%d\n",
+ scmd,
+ jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
+ (scmd->request->timeout / HZ) * 1000,
+ scmd->request->tag);
+ }
+
+ if (leapraid_pci_removed(adapter) ||
+ adapter->access_ctrl.host_removing) {
+ dev_err(&adapter->pdev->dev,
+ "EH %s failed: %s scmd=0x%p\n", str,
+ (adapter->access_ctrl.host_removing ?
+ "shost removing!" : "pci_dev removed!"), scmd);
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK)
+ if (io_req_tracker && io_req_tracker->taskid)
+ leapraid_free_taskid(adapter,
+ io_req_tracker->taskid);
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+#ifdef FAST_IO_FAIL
+ rc = FAST_IO_FAIL;
+#else
+ rc = FAILED;
+#endif
+ goto out;
+ }
+
+ sdev_priv = scmd->device->hostdata;
+ if (!sdev_priv || !sdev_priv->starget_priv) {
+ dev_warn(&adapter->pdev->dev,
+ "EH %s: sdev or starget gone, scmd=0x%p\n",
+ str, scmd);
+ scmd->result = DID_NO_CONNECT << LEAPRAID_SCSI_HOST_SHIFT;
+ scmd->scsi_done(scmd);
+ rc = SUCCESS;
+ goto out;
+ }
+
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK) {
+ if (!io_req_tracker) {
+ dev_warn(&adapter->pdev->dev,
+ "EH ABORT: no io tracker, scmd 0x%p\n", scmd);
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ rc = SUCCESS;
+ goto out;
+ }
+
+ if (sdev_priv->starget_priv->flg &
+ LEAPRAID_TGT_FLG_RAID_MEMBER ||
+ sdev_priv->starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME) {
+ dev_err(&adapter->pdev->dev,
+ "EH ABORT: skip RAID/VOLUME target, scmd=0x%p\n",
+ scmd);
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ rc = FAILED;
+ goto out;
+ }
+
+ hdl = sdev_priv->starget_priv->hdl;
+ } else {
+ hdl = 0;
+ if (sdev_priv->starget_priv->flg &
+ LEAPRAID_TGT_FLG_RAID_MEMBER) {
+ sas_dev = leapraid_get_sas_dev_from_tgt(adapter,
+ starget_priv);
+ if (sas_dev)
+ hdl = sas_dev->volume_hdl;
+ } else {
+ hdl = sdev_priv->starget_priv->hdl;
+ }
+
+ if (!hdl) {
+ dev_err(&adapter->pdev->dev,
+ "EH %s failed: target handle is 0, scmd=0x%p\n",
+ str, scmd);
+ scmd->result = DID_RESET << LEAPRAID_SCSI_HOST_SHIFT;
+ rc = FAILED;
+ goto out;
+ }
+ }
+
+ dev_info(&adapter->pdev->dev,
+ "EH issue TM: type=%s, scmd=0x%p, hdl=0x%x\n",
+ str, scmd, hdl);
+
+ rc = leapraid_issue_locked_tm(adapter, hdl, scmd->device->channel,
+ scmd->device->id,
+ (type == LEAPRAID_TM_TASKTYPE_TARGET_RESET ?
+ 0 : scmd->device->lun),
+ type,
+ (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK ?
+ io_req_tracker->taskid : 0),
+ LEAPRAID_TM_MSGFLAGS_LINK_RESET);
+
+out:
+ if (type == LEAPRAID_TM_TASKTYPE_ABORT_TASK) {
+ dev_info(&adapter->pdev->dev,
+ "EH ABORT result: %s, scmd=0x%p\n",
+ ((rc == SUCCESS) ? "success" : "failed"), scmd);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "EH %s result: %s, scmd=0x%p\n",
+ str, ((rc == SUCCESS) ? "success" : "failed"), scmd);
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ }
+ return rc;
+}
+
+static int leapraid_eh_abort_handler(struct scsi_cmnd *scmd)
+{
+ return leapraid_error_handler(scmd, "ABORT TASK",
+ LEAPRAID_TM_TASKTYPE_ABORT_TASK);
+}
+
+static int leapraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+ return leapraid_error_handler(scmd, "UNIT RESET",
+ LEAPRAID_TM_TASKTYPE_LOGICAL_UNIT_RESET);
+}
+
+static int leapraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
+{
+ return leapraid_error_handler(scmd, "TARGET RESET",
+ LEAPRAID_TM_TASKTYPE_TARGET_RESET);
+}
+
+static int leapraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct leapraid_adapter *adapter = shost_priv(scmd->device->host);
+ int rc;
+
+ dev_info(&adapter->pdev->dev,
+ "EH HOST RESET enter: scmd=%p, req tag=%d\n",
+ scmd,
+ scmd->request->tag);
+ scsi_print_command(scmd);
+
+ if (adapter->scan_dev_desc.driver_loading ||
+ adapter->access_ctrl.host_removing) {
+ dev_err(&adapter->pdev->dev,
+ "EH HOST RESET failed: %s scmd=0x%p\n",
+ (adapter->access_ctrl.host_removing ?
+ "shost removing!" : "driver loading!"), scmd);
+ rc = FAILED;
+ goto out;
+ }
+
+ dev_info(&adapter->pdev->dev, "%s:%d issuing hard reset\n",
+ __func__, __LINE__);
+ if (leapraid_hard_reset_handler(adapter, FULL_RESET) < 0)
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+
+out:
+ dev_info(&adapter->pdev->dev, "EH HOST RESET result: %s, scmd=0x%p\n",
+ ((rc == SUCCESS) ? "success" : "failed"), scmd);
+ return rc;
+}
+
+static int leapraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct leapraid_raid_volume *raid_volume;
+ struct leapraid_starget_priv *stgt_priv;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct leapraid_adapter *adapter;
+ struct leapraid_sas_dev *sas_dev;
+ struct scsi_target *tgt;
+ struct Scsi_Host *shost;
+ unsigned long flags;
+
+ sdev_priv = kzalloc(sizeof(*sdev_priv), GFP_KERNEL);
+ if (!sdev_priv)
+ return -ENOMEM;
+
+ sdev_priv->lun = sdev->lun;
+ sdev_priv->flg = LEAPRAID_DEVICE_FLG_INIT;
+ tgt = scsi_target(sdev);
+ stgt_priv = tgt->hostdata;
+ stgt_priv->num_luns++;
+ sdev_priv->starget_priv = stgt_priv;
+ sdev->hostdata = sdev_priv;
+ if ((stgt_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER))
+ sdev->no_uld_attach = LEAPRAID_NO_ULD_ATTACH;
+
+ shost = dev_to_shost(&tgt->dev);
+ adapter = shost_priv(shost);
+ if (tgt->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_id(adapter,
+ tgt->id,
+ tgt->channel);
+ if (raid_volume)
+ raid_volume->sdev = sdev;
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock,
+ flags);
+ }
+
+ if (!(stgt_priv->flg & LEAPRAID_TGT_FLG_VOLUME)) {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter,
+ stgt_priv->sas_address,
+ stgt_priv->card_port);
+ if (sas_dev && !sas_dev->starget) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: assign starget to sas_dev\n", __func__);
+ sas_dev->starget = tgt;
+ }
+
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ }
+ return 0;
+}
+
+static int leapraid_slave_cfg_volume(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ struct leapraid_raid_volume *raid_volume;
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_sdev_priv *sdev_priv;
+ unsigned long flags;
+ int qd;
+ u16 hdl;
+
+ sdev_priv = sdev->hostdata;
+ starget_priv = sdev_priv->starget_priv;
+ hdl = starget_priv->hdl;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_hdl(adapter, hdl);
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+ if (!raid_volume) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: raid_volume not found, hdl=0x%x\n",
+ __func__, hdl);
+ return 1;
+ }
+
+ if (leapraid_get_volume_cap(adapter, raid_volume)) {
+ sdev_printk(KERN_ERR, sdev,
+ "%s: failed to get volume cap, hdl=0x%x\n",
+ __func__, hdl);
+ return 1;
+ }
+
+ qd = (raid_volume->dev_info & LEAPRAID_DEVTYP_SSP_TGT) ?
+ LEAPRAID_SAS_QUEUE_DEPTH : LEAPRAID_SATA_QUEUE_DEPTH;
+ if (raid_volume->vol_type != LEAPRAID_VOL_TYPE_RAID0)
+ qd = LEAPRAID_RAID_QUEUE_DEPTH;
+
+ sdev_printk(KERN_INFO, sdev,
+ "raid volume: hdl=0x%04x, wwid=0x%016llx\n",
+ raid_volume->hdl, (unsigned long long)raid_volume->wwid);
+
+ if (shost->max_sectors > LEAPRAID_MAX_SECTORS)
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ LEAPRAID_MAX_SECTORS);
+
+ leapraid_adjust_sdev_queue_depth(sdev, qd);
+ return 0;
+}
+
+static int leapraid_slave_configure_extra(struct scsi_device *sdev,
+ struct leapraid_sas_dev **psas_dev,
+ u16 vol_hdl, u64 volume_wwid,
+ bool *is_target_ssp, int *qd)
+{
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct Scsi_Host *shost = sdev->host;
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ unsigned long flags;
+
+ sdev_priv = sdev->hostdata;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ *is_target_ssp = false;
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr(adapter,
+ sdev_priv->starget_priv->sas_address,
+ sdev_priv->starget_priv->card_port);
+ if (!sas_dev) {
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: sas_dev not found, sas=0x%llx\n",
+ __func__, sdev_priv->starget_priv->sas_address);
+ return 1;
+ }
+
+ *psas_dev = sas_dev;
+ sas_dev->volume_hdl = vol_hdl;
+ sas_dev->volume_wwid = volume_wwid;
+ if (sas_dev->dev_info & LEAPRAID_DEVTYP_SSP_TGT) {
+ *qd = (sas_dev->port_type > 1) ?
+ adapter->adapter_attr.wideport_max_queue_depth :
+ adapter->adapter_attr.narrowport_max_queue_depth;
+ *is_target_ssp = true;
+ if (sas_dev->dev_info & LEAPRAID_DEVTYP_SEP)
+ sdev_priv->sep = true;
+ } else {
+ *qd = adapter->adapter_attr.sata_max_queue_depth;
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "sdev: dev name=0x%016llx, sas addr=0x%016llx\n",
+ (unsigned long long)sas_dev->dev_name,
+ (unsigned long long)sas_dev->sas_addr);
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ return 0;
+}
+
+static int leapraid_slave_configure(struct scsi_device *sdev)
+{
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_sdev_priv *sdev_priv;
+ struct Scsi_Host *shost = sdev->host;
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_adapter *adapter;
+ u16 hdl, vol_hdl = 0;
+ bool is_target_ssp = false;
+ u64 volume_wwid = 0;
+ int qd = 1;
+
+ adapter = shost_priv(shost);
+ sdev_priv = sdev->hostdata;
+ sdev_priv->flg &= ~LEAPRAID_DEVICE_FLG_INIT;
+ starget_priv = sdev_priv->starget_priv;
+ hdl = starget_priv->hdl;
+ if (starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME)
+ return leapraid_slave_cfg_volume(sdev);
+
+ if (starget_priv->flg & LEAPRAID_TGT_FLG_RAID_MEMBER) {
+ if (leapraid_cfg_get_volume_hdl(adapter, hdl, &vol_hdl)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: get volume hdl failed, hdl=0x%x\n",
+ __func__, hdl);
+ return 1;
+ }
+
+ if (vol_hdl && leapraid_cfg_get_volume_wwid(adapter, vol_hdl,
+ &volume_wwid)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: get wwid failed, volume_hdl=0x%x\n",
+ __func__, vol_hdl);
+ return 1;
+ }
+ }
+
+ if (leapraid_slave_configure_extra(sdev, &sas_dev, vol_hdl,
+ volume_wwid, &is_target_ssp, &qd)) {
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: slave_configure_extra failed\n", __func__);
+ return 1;
+ }
+
+ leapraid_adjust_sdev_queue_depth(sdev, qd);
+ if (is_target_ssp)
+ sas_read_port_mode_page(sdev);
+
+ return 0;
+}
+
+static void leapraid_slave_destroy(struct scsi_device *sdev)
+{
+ struct leapraid_adapter *adapter;
+ struct Scsi_Host *shost;
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_starget_priv *starget_priv;
+ struct scsi_target *stgt;
+ unsigned long flags;
+
+ if (!sdev->hostdata)
+ return;
+
+ stgt = scsi_target(sdev);
+ starget_priv = stgt->hostdata;
+ starget_priv->num_luns--;
+ shost = dev_to_shost(&stgt->dev);
+ adapter = shost_priv(shost);
+ if (!(starget_priv->flg & LEAPRAID_TGT_FLG_VOLUME)) {
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_from_tgt(adapter,
+ starget_priv);
+ if (sas_dev && !starget_priv->num_luns)
+ sas_dev->starget = NULL;
+ if (sas_dev)
+ leapraid_sdev_put(sas_dev);
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+ }
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+static int leapraid_target_alloc_raid(struct scsi_target *tgt)
+{
+ struct leapraid_starget_priv *starget_priv;
+ struct leapraid_raid_volume *raid_volume;
+ struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ unsigned long flags;
+
+ starget_priv = (struct leapraid_starget_priv *)tgt->hostdata;
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_id(adapter, tgt->id,
+ tgt->channel);
+ if (raid_volume) {
+ starget_priv->hdl = raid_volume->hdl;
+ starget_priv->sas_address = raid_volume->wwid;
+ starget_priv->flg |= LEAPRAID_TGT_FLG_VOLUME;
+ raid_volume->starget = tgt;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+ return 0;
+}
+
+static int leapraid_target_alloc_sas(struct scsi_target *tgt)
+{
+ struct sas_rphy *rphy;
+ struct Scsi_Host *shost;
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_adapter *adapter;
+ struct leapraid_starget_priv *starget_priv;
+ unsigned long flags;
+
+ shost = dev_to_shost(&tgt->dev);
+ adapter = shost_priv(shost);
+ starget_priv = (struct leapraid_starget_priv *)tgt->hostdata;
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ rphy = dev_to_rphy(tgt->dev.parent);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(adapter,
+ rphy->identify.sas_address,
+ rphy);
+ if (sas_dev) {
+ starget_priv->sas_dev = sas_dev;
+ starget_priv->card_port = sas_dev->card_port;
+ starget_priv->sas_address = sas_dev->sas_addr;
+ starget_priv->hdl = sas_dev->hdl;
+ sas_dev->channel = tgt->channel;
+ sas_dev->id = tgt->id;
+ sas_dev->starget = tgt;
+ if (test_bit(sas_dev->hdl,
+ (unsigned long *)adapter->dev_topo.pd_hdls))
+ starget_priv->flg |= LEAPRAID_TGT_FLG_RAID_MEMBER;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ return 0;
+}
+
+static int leapraid_target_alloc(struct scsi_target *tgt)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ starget_priv = kzalloc(sizeof(*starget_priv), GFP_KERNEL);
+ if (!starget_priv)
+ return -ENOMEM;
+
+ tgt->hostdata = starget_priv;
+ starget_priv->starget = tgt;
+ starget_priv->hdl = LEAPRAID_INVALID_DEV_HANDLE;
+ if (tgt->channel == RAID_CHANNEL)
+ return leapraid_target_alloc_raid(tgt);
+
+ return leapraid_target_alloc_sas(tgt);
+}
+
+static void leapraid_target_destroy_raid(struct scsi_target *tgt)
+{
+ struct leapraid_raid_volume *raid_volume;
+ struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->dev_topo.raid_volume_lock, flags);
+ raid_volume = leapraid_raid_volume_find_by_id(adapter, tgt->id,
+ tgt->channel);
+ if (raid_volume) {
+ raid_volume->starget = NULL;
+ raid_volume->sdev = NULL;
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.raid_volume_lock, flags);
+}
+
+static void leapraid_target_destroy_sas(struct scsi_target *tgt)
+{
+ struct leapraid_adapter *adapter;
+ struct leapraid_sas_dev *sas_dev;
+ struct leapraid_starget_priv *starget_priv;
+ struct Scsi_Host *shost;
+ unsigned long flags;
+
+ shost = dev_to_shost(&tgt->dev);
+ adapter = shost_priv(shost);
+ starget_priv = tgt->hostdata;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_from_tgt(adapter,
+ starget_priv);
+ if (sas_dev &&
+ sas_dev->starget == tgt &&
+ sas_dev->id == tgt->id &&
+ sas_dev->channel == tgt->channel)
+ sas_dev->starget = NULL;
+
+ if (sas_dev) {
+ starget_priv->sas_dev = NULL;
+ leapraid_sdev_put(sas_dev);
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+}
+
+static void leapraid_target_destroy(struct scsi_target *tgt)
+{
+ struct leapraid_starget_priv *starget_priv;
+
+ starget_priv = tgt->hostdata;
+ if (!starget_priv)
+ return;
+
+ if (tgt->channel == RAID_CHANNEL) {
+ leapraid_target_destroy_raid(tgt);
+ goto out;
+ }
+
+ leapraid_target_destroy_sas(tgt);
+
+out:
+ kfree(starget_priv);
+ tgt->hostdata = NULL;
+}
+
+static bool leapraid_scan_check_status(struct leapraid_adapter *adapter,
+ bool *need_hard_reset)
+{
+ u32 adapter_state;
+
+ if (adapter->scan_dev_desc.scan_start) {
+ adapter_state = leapraid_get_adapter_state(adapter);
+ if (adapter_state == LEAPRAID_DB_FAULT) {
+ *need_hard_reset = true;
+ return true;
+ }
+ return false;
+ }
+
+ if (adapter->driver_cmds.scan_dev_cmd.status & LEAPRAID_CMD_RESET) {
+ dev_err(&adapter->pdev->dev,
+ "device scan: aborted due to reset\n");
+ adapter->driver_cmds.scan_dev_cmd.status =
+ LEAPRAID_CMD_NOT_USED;
+ adapter->scan_dev_desc.driver_loading = false;
+ return true;
+ }
+
+ if (adapter->scan_dev_desc.scan_start_failed) {
+ dev_err(&adapter->pdev->dev,
+ "device scan: failed with adapter_status=0x%08x\n",
+ adapter->scan_dev_desc.scan_start_failed);
+ adapter->scan_dev_desc.driver_loading = false;
+ adapter->scan_dev_desc.wait_scan_dev_done = false;
+ adapter->access_ctrl.host_removing = true;
+ return true;
+ }
+
+ dev_info(&adapter->pdev->dev, "device scan: SUCCESS\n");
+ adapter->driver_cmds.scan_dev_cmd.status = LEAPRAID_CMD_NOT_USED;
+ leapraid_scan_dev_done(adapter);
+ return true;
+}
+
+static int leapraid_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ bool need_hard_reset = false;
+
+ if (time >= (LEAPRAID_SCAN_DEV_CMD_TIMEOUT * HZ)) {
+ adapter->driver_cmds.scan_dev_cmd.status =
+ LEAPRAID_CMD_NOT_USED;
+ dev_err(&adapter->pdev->dev,
+ "device scan: failed with timeout 300s\n");
+ adapter->scan_dev_desc.driver_loading = false;
+ return 1;
+ }
+
+ if (!leapraid_scan_check_status(adapter, &need_hard_reset))
+ return 0;
+
+ if (need_hard_reset) {
+ adapter->driver_cmds.scan_dev_cmd.status =
+ LEAPRAID_CMD_NOT_USED;
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ if (leapraid_hard_reset_handler(adapter, PART_RESET))
+ adapter->scan_dev_desc.driver_loading = false;
+ }
+
+ return 1;
+}
+
+static void leapraid_scan_start(struct Scsi_Host *shost)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+
+ adapter->scan_dev_desc.scan_start = true;
+ leapraid_scan_dev(adapter, true);
+}
+
+static int leapraid_calc_max_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost;
+ int max_depth;
+
+ shost = sdev->host;
+ max_depth = shost->can_queue;
+
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+
+ return qdepth;
+}
+
+static int leapraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ qdepth = leapraid_calc_max_queue_depth(sdev, qdepth);
+ scsi_change_queue_depth(sdev, qdepth);
+ return sdev->queue_depth;
+}
+
+void leapraid_adjust_sdev_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ leapraid_change_queue_depth(sdev, qdepth);
+}
+
+
+
+static int leapraid_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads = 0;
+ int sectors = 0;
+ sector_t cylinders;
+
+ if (scsi_partsize(bdev, capacity, geom))
+ return 0;
+
+ if ((ulong)capacity >= LEAPRAID_LARGE_DISK_THRESHOLD) {
+ heads = LEAPRAID_LARGE_DISK_HEADS;
+ sectors = LEAPRAID_LARGE_DISK_SECTORS;
+ } else {
+ heads = LEAPRAID_SMALL_DISK_HEADS;
+ sectors = LEAPRAID_SMALL_DISK_SECTORS;
+ }
+
+ cylinders = capacity;
+ sector_div(cylinders, heads * sectors);
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+ return 0;
+}
+
+static ssize_t fw_queue_depth_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%02d\n",
+ adapter->adapter_attr.features.req_slot);
+}
+
+static ssize_t host_sas_address_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct leapraid_adapter *adapter = shost_priv(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)adapter->dev_topo.card.sas_address);
+}
+
+static DEVICE_ATTR_RO(fw_queue_depth);
+static DEVICE_ATTR_RO(host_sas_address);
+
+static struct device_attribute *leapraid_shost_attrs[] = {
+ &dev_attr_fw_queue_depth,
+ &dev_attr_host_sas_address,
+ NULL,
+};
+
+static ssize_t sas_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)sas_device_priv_data->starget_priv->sas_address);
+}
+
+static ssize_t sas_device_handle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%04x\n",
+ sas_device_priv_data->starget_priv->hdl);
+}
+
+static ssize_t sas_ncq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sas_device_priv_data->ncq);
+}
+
+static ssize_t sas_ncq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct leapraid_sdev_priv *sas_device_priv_data = sdev->hostdata;
+ unsigned char *vpd_pg89;
+ int ncq_op = 0;
+ bool ncq_supported = false;
+
+ if (kstrtoint(buf, 0, &ncq_op))
+ goto out;
+
+ vpd_pg89 = kmalloc(LEAPRAID_VPD_PG89_MAX_LEN, GFP_KERNEL);
+ if (!vpd_pg89)
+ goto out;
+
+ if (!scsi_device_supports_vpd(sdev) ||
+ scsi_get_vpd_page(sdev, LEAPRAID_VPD_PAGE_ATA_INFO,
+ vpd_pg89, LEAPRAID_VPD_PG89_MAX_LEN)) {
+ kfree(vpd_pg89);
+ goto out;
+ }
+
+ ncq_supported = (vpd_pg89[LEAPRAID_VPD_PG89_NCQ_BYTE_IDX] >>
+ LEAPRAID_VPD_PG89_NCQ_BIT_SHIFT) &
+ LEAPRAID_VPD_PG89_NCQ_BIT_MASK;
+ kfree(vpd_pg89);
+ if (ncq_supported)
+ sas_device_priv_data->ncq = ncq_op;
+ return strlen(buf);
+out:
+ return -EINVAL;
+}
+
+static DEVICE_ATTR_RO(sas_address);
+static DEVICE_ATTR_RO(sas_device_handle);
+
+static DEVICE_ATTR_RW(sas_ncq);
+
+static struct device_attribute *leapraid_sdev_attrs[] = {
+ &dev_attr_sas_address,
+ &dev_attr_sas_device_handle,
+ &dev_attr_sas_ncq,
+ NULL,
+};
+
+static struct scsi_host_template leapraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "LEAPIO RAID Host",
+ .proc_name = LEAPRAID_DRIVER_NAME,
+ .queuecommand = leapraid_queuecommand,
+ .eh_abort_handler = leapraid_eh_abort_handler,
+ .eh_device_reset_handler = leapraid_eh_device_reset_handler,
+ .eh_target_reset_handler = leapraid_eh_target_reset_handler,
+ .eh_host_reset_handler = leapraid_eh_host_reset_handler,
+ .slave_alloc = leapraid_slave_alloc,
+ .slave_destroy = leapraid_slave_destroy,
+ .slave_configure = leapraid_slave_configure,
+ .target_alloc = leapraid_target_alloc,
+ .target_destroy = leapraid_target_destroy,
+ .scan_finished = leapraid_scan_finished,
+ .scan_start = leapraid_scan_start,
+ .change_queue_depth = leapraid_change_queue_depth,
+ .bios_param = leapraid_bios_param,
+ .can_queue = LEAPRAID_CAN_QUEUE_MIN,
+ .this_id = LEAPRAID_THIS_ID_NONE,
+ .sg_tablesize = LEAPRAID_SG_DEPTH,
+ .max_sectors = LEAPRAID_DEF_MAX_SECTORS,
+ .max_segment_size = LEAPRAID_MAX_SEGMENT_SIZE,
+ .cmd_per_lun = LEAPRAID_CMD_PER_LUN,
+ .shost_attrs = leapraid_shost_attrs,
+ .sdev_attrs = leapraid_sdev_attrs,
+ .track_queue_depth = 1,
+};
+
+static void leapraid_lock_init(struct leapraid_adapter *adapter)
+{
+ mutex_init(&adapter->reset_desc.adapter_reset_mutex);
+ mutex_init(&adapter->reset_desc.host_diag_mutex);
+ mutex_init(&adapter->access_ctrl.pci_access_lock);
+
+ spin_lock_init(&adapter->reset_desc.adapter_reset_lock);
+ spin_lock_init(&adapter->dynamic_task_desc.task_lock);
+ spin_lock_init(&adapter->dev_topo.sas_dev_lock);
+ spin_lock_init(&adapter->dev_topo.topo_node_lock);
+ spin_lock_init(&adapter->fw_evt_s.fw_evt_lock);
+ spin_lock_init(&adapter->dev_topo.raid_volume_lock);
+}
+
+static void leapraid_list_init(struct leapraid_adapter *adapter)
+{
+ INIT_LIST_HEAD(&adapter->dev_topo.sas_dev_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.card_port_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.sas_dev_init_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.exp_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.enc_list);
+ INIT_LIST_HEAD(&adapter->fw_evt_s.fw_evt_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.raid_volume_list);
+ INIT_LIST_HEAD(&adapter->dev_topo.card.sas_port_list);
+}
+
+static int leapraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct leapraid_adapter *adapter = NULL;
+ struct Scsi_Host *shost = NULL;
+ int rc;
+
+ shost = scsi_host_alloc(&leapraid_driver_template,
+ sizeof(struct leapraid_adapter));
+ if (!shost)
+ return -ENODEV;
+
+ adapter = shost_priv(shost);
+ memset(adapter, 0, sizeof(struct leapraid_adapter));
+ adapter->adapter_attr.id = leapraid_ids++;
+
+ adapter->adapter_attr.enable_mp = enable_mp;
+
+ adapter = shost_priv(shost);
+ INIT_LIST_HEAD(&adapter->list);
+ spin_lock(&leapraid_adapter_lock);
+ list_add_tail(&adapter->list, &leapraid_adapter_list);
+ spin_unlock(&leapraid_adapter_lock);
+
+ adapter->shost = shost;
+ adapter->pdev = pdev;
+ adapter->fw_log_desc.open_pcie_trace = open_pcie_trace;
+ leapraid_lock_init(adapter);
+ leapraid_list_init(adapter);
+ sprintf(adapter->adapter_attr.name, "%s%d",
+ LEAPRAID_DRIVER_NAME, adapter->adapter_attr.id);
+
+ shost->max_cmd_len = LEAPRAID_MAX_CDB_LEN;
+ shost->max_lun = LEAPRAID_MAX_LUNS;
+ shost->transportt = leapraid_transport_template;
+ shost->unique_id = adapter->adapter_attr.id;
+
+ snprintf(adapter->fw_evt_s.fw_evt_name,
+ sizeof(adapter->fw_evt_s.fw_evt_name),
+ "fw_event_%s%d", LEAPRAID_DRIVER_NAME,
+ adapter->adapter_attr.id);
+ adapter->fw_evt_s.fw_evt_thread =
+ alloc_ordered_workqueue(adapter->fw_evt_s.fw_evt_name, 0);
+ if (!adapter->fw_evt_s.fw_evt_thread) {
+ rc = -ENODEV;
+ goto evt_wq_fail;
+ }
+
+ adapter->scan_dev_desc.driver_loading = true;
+ if ((leapraid_ctrl_init(adapter))) {
+ rc = -ENODEV;
+ goto ctrl_init_fail;
+ }
+
+
+ rc = scsi_add_host(shost, &pdev->dev);
+ if (rc) {
+ spin_lock(&leapraid_adapter_lock);
+ list_del(&adapter->list);
+ spin_unlock(&leapraid_adapter_lock);
+ goto scsi_add_shost_fail;
+ }
+
+ scsi_scan_host(shost);
+ return 0;
+
+scsi_add_shost_fail:
+ leapraid_remove_ctrl(adapter);
+ctrl_init_fail:
+ destroy_workqueue(adapter->fw_evt_s.fw_evt_thread);
+evt_wq_fail:
+ spin_lock(&leapraid_adapter_lock);
+ list_del(&adapter->list);
+ spin_unlock(&leapraid_adapter_lock);
+ scsi_host_put(shost);
+ return rc;
+}
+
+static void leapraid_cleanup_lists(struct leapraid_adapter *adapter)
+{
+ struct leapraid_raid_volume *raid_volume, *next_raid_volume;
+ struct leapraid_starget_priv *starget_priv_data;
+ struct leapraid_sas_port *leapraid_port, *next_port;
+ struct leapraid_card_port *port, *port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+
+ list_for_each_entry_safe(raid_volume, next_raid_volume,
+ &adapter->dev_topo.raid_volume_list, list) {
+ if (raid_volume->starget) {
+ starget_priv_data = raid_volume->starget->hostdata;
+ starget_priv_data->deleted = true;
+ scsi_remove_target(&raid_volume->starget->dev);
+ }
+ pr_info("removing hdl=0x%04x, wwid=0x%016llx\n",
+ raid_volume->hdl,
+ (unsigned long long)raid_volume->wwid);
+ leapraid_raid_volume_remove(adapter, raid_volume);
+ }
+
+ list_for_each_entry_safe(leapraid_port, next_port,
+ &adapter->dev_topo.card.sas_port_list,
+ port_list) {
+ if (leapraid_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ leapraid_sas_dev_remove_by_sas_address(adapter,
+ leapraid_port->remote_identify.sas_address,
+ leapraid_port->card_port);
+ else if (leapraid_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ leapraid_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ leapraid_exp_rm(adapter,
+ leapraid_port->remote_identify.sas_address,
+ leapraid_port->card_port);
+ }
+
+ list_for_each_entry_safe(port, port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (port->vphys_mask)
+ list_for_each_entry_safe(vphy, vphy_next,
+ &port->vphys_list, list) {
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+ list_del(&port->list);
+ kfree(port);
+ }
+
+ if (adapter->dev_topo.card.phys_num) {
+ kfree(adapter->dev_topo.card.card_phy);
+ adapter->dev_topo.card.card_phy = NULL;
+ adapter->dev_topo.card.phys_num = 0;
+ }
+}
+
+static void leapraid_remove(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "unable to remove!\n");
+ return;
+ }
+
+ while (adapter->scan_dev_desc.driver_loading)
+ ssleep(1);
+
+ while (adapter->access_ctrl.shost_recovering)
+ ssleep(1);
+
+ adapter->access_ctrl.host_removing = true;
+
+ leapraid_wait_cmds_done(adapter);
+
+ leapraid_smart_polling_stop(adapter);
+ leapraid_free_internal_scsi_cmd(adapter);
+
+ if (leapraid_pci_removed(adapter)) {
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+ }
+ leapraid_clean_active_fw_evt(adapter);
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ wq = adapter->fw_evt_s.fw_evt_thread;
+ adapter->fw_evt_s.fw_evt_thread = NULL;
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ leapraid_ir_shutdown(adapter);
+ sas_remove_host(shost);
+ leapraid_cleanup_lists(adapter);
+ leapraid_remove_ctrl(adapter);
+ spin_lock(&leapraid_adapter_lock);
+ list_del(&adapter->list);
+ spin_unlock(&leapraid_adapter_lock);
+ scsi_host_put(shost);
+}
+
+static void leapraid_shutdown(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "unable to shutdown!\n");
+ return;
+ }
+
+ adapter->access_ctrl.host_removing = true;
+ leapraid_wait_cmds_done(adapter);
+ leapraid_clean_active_fw_evt(adapter);
+
+ spin_lock_irqsave(&adapter->fw_evt_s.fw_evt_lock, flags);
+ wq = adapter->fw_evt_s.fw_evt_thread;
+ adapter->fw_evt_s.fw_evt_thread = NULL;
+ spin_unlock_irqrestore(&adapter->fw_evt_s.fw_evt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ leapraid_ir_shutdown(adapter);
+ leapraid_disable_controller(adapter);
+}
+
+static pci_ers_result_t leapraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "failed to error detected for device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pr_err("%s: pci error detected, state=%d\n",
+ adapter->adapter_attr.name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ adapter->access_ctrl.pcie_recovering = true;
+ scsi_block_requests(adapter->shost);
+ leapraid_smart_polling_stop(adapter);
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_disable_controller(adapter);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ adapter->access_ctrl.pcie_recovering = true;
+ leapraid_smart_polling_stop(adapter);
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ leapraid_mq_polling_pause(adapter);
+ leapraid_clean_active_scsi_cmds(adapter);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t leapraid_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "failed to enable mmio for device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ dev_info(&pdev->dev, "%s: pci error mmio enabled\n",
+ adapter->adapter_attr.name);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t leapraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ int rc;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "failed to slot reset for device\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ dev_err(&pdev->dev, "%s pci error slot reset\n",
+ adapter->adapter_attr.name);
+
+ adapter->access_ctrl.pcie_recovering = false;
+ adapter->pdev = pdev;
+ pci_restore_state(pdev);
+ if (leapraid_set_pcie_and_notification(adapter))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ dev_info(&pdev->dev, "%s: hard reset triggered by pci slot reset\n",
+ adapter->adapter_attr.name);
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ rc = leapraid_hard_reset_handler(adapter, FULL_RESET);
+ dev_info(&pdev->dev, "%s hard reset: %s\n",
+ adapter->adapter_attr.name, (rc == 0) ? "success" : "failed");
+
+ return (rc == 0) ? PCI_ERS_RESULT_RECOVERED :
+ PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void leapraid_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev, "failed to resume\n");
+ return;
+ }
+
+ dev_err(&pdev->dev, "PCI error resume!\n");
+ pci_aer_clear_nonfatal_status(pdev);
+ leapraid_check_scheduled_fault_start(adapter);
+ leapraid_fw_log_start(adapter);
+ scsi_unblock_requests(adapter->shost);
+ leapraid_smart_polling_start(adapter);
+}
+
+MODULE_DEVICE_TABLE(pci, leapraid_pci_table);
+static struct pci_error_handlers leapraid_err_handler = {
+ .error_detected = leapraid_pci_error_detected,
+ .mmio_enabled = leapraid_pci_mmio_enabled,
+ .slot_reset = leapraid_pci_slot_reset,
+ .resume = leapraid_pci_resume,
+};
+
+#ifdef CONFIG_PM
+static int leapraid_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ pci_power_t device_state;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "suspend failed, invalid host or adapter\n");
+ return -ENXIO;
+ }
+
+ leapraid_smart_polling_stop(adapter);
+ leapraid_check_scheduled_fault_stop(adapter);
+ leapraid_fw_log_stop(adapter);
+ flush_scheduled_work();
+ scsi_block_requests(shost);
+ device_state = pci_choose_state(pdev, state);
+ leapraid_ir_shutdown(adapter);
+
+ dev_info(&pdev->dev, "entering PCI power state D%d, (slot=%s)\n",
+ device_state, pci_name(pdev));
+
+ pci_save_state(pdev);
+ leapraid_disable_controller(adapter);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+static int leapraid_resume(struct pci_dev *pdev)
+{
+ struct leapraid_adapter *adapter = pdev_to_adapter(pdev);
+ struct Scsi_Host *shost = pdev_to_shost(pdev);
+ pci_power_t device_state = pdev->current_state;
+ int rc;
+
+ if (!shost || !adapter) {
+ dev_err(&pdev->dev,
+ "resume failed, invalid host or adapter\n");
+ return -ENXIO;
+ }
+
+ dev_info(&pdev->dev,
+ "resuming device %s, previous state D%d\n",
+ pci_name(pdev), device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ adapter->pdev = pdev;
+ rc = leapraid_set_pcie_and_notification(adapter);
+ if (rc)
+ return rc;
+
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, PART_RESET);
+ scsi_unblock_requests(shost);
+ leapraid_check_scheduled_fault_start(adapter);
+ leapraid_fw_log_start(adapter);
+ leapraid_smart_polling_start(adapter);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver leapraid_driver = {
+ .name = LEAPRAID_DRIVER_NAME,
+ .id_table = leapraid_pci_table,
+ .probe = leapraid_probe,
+ .remove = leapraid_remove,
+ .shutdown = leapraid_shutdown,
+ .err_handler = &leapraid_err_handler,
+#ifdef CONFIG_PM
+ .suspend = leapraid_suspend,
+ .resume = leapraid_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init leapraid_init(void)
+{
+ int error;
+
+ pr_info("%s version %s loaded\n", LEAPRAID_DRIVER_NAME,
+ LEAPRAID_DRIVER_VERSION);
+
+ leapraid_transport_template =
+ sas_attach_transport(&leapraid_transport_functions);
+ if (!leapraid_transport_template)
+ return -ENODEV;
+
+ leapraid_ids = 0;
+
+ leapraid_ctl_init();
+
+ error = pci_register_driver(&leapraid_driver);
+ if (error)
+ sas_release_transport(leapraid_transport_template);
+
+ return error;
+}
+
+static void __exit leapraid_exit(void)
+{
+ pr_info("leapraid version %s unloading\n",
+ LEAPRAID_DRIVER_VERSION);
+
+ leapraid_ctl_exit();
+ pci_unregister_driver(&leapraid_driver);
+ sas_release_transport(leapraid_transport_template);
+}
+
+module_init(leapraid_init);
+module_exit(leapraid_exit);
diff --git a/drivers/scsi/leapraid/leapraid_transport.c b/drivers/scsi/leapraid/leapraid_transport.c
new file mode 100644
index 000000000000..d224449732a3
--- /dev/null
+++ b/drivers/scsi/leapraid/leapraid_transport.c
@@ -0,0 +1,1256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 LeapIO Tech Inc.
+ *
+ * LeapRAID Storage and RAID Controller driver.
+ */
+
+#include <scsi/scsi_host.h>
+
+#include "leapraid_func.h"
+
+static struct leapraid_topo_node *leapraid_transport_topo_node_by_sas_addr(
+ struct leapraid_adapter *adapter,
+ u64 sas_addr,
+ struct leapraid_card_port *card_port)
+{
+ if (adapter->dev_topo.card.sas_address == sas_addr)
+ return &adapter->dev_topo.card;
+ else
+ return leapraid_exp_find_by_sas_address(adapter,
+ sas_addr,
+ card_port);
+}
+
+static u8 leapraid_get_port_id_by_expander(struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_topo_node *topo_node_exp;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_for_each_entry(topo_node_exp, &adapter->dev_topo.exp_list, list) {
+ if (topo_node_exp->rphy == rphy) {
+ port_id = topo_node_exp->card_port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ return port_id;
+}
+
+static u8 leapraid_get_port_id_by_end_dev(struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_sas_dev *sas_dev;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ spin_lock_irqsave(&adapter->dev_topo.sas_dev_lock, flags);
+ sas_dev = leapraid_hold_lock_get_sas_dev_by_addr_and_rphy(adapter,
+ rphy->identify.sas_address,
+ rphy);
+ if (sas_dev) {
+ port_id = sas_dev->card_port->port_id;
+ leapraid_sdev_put(sas_dev);
+ }
+ spin_unlock_irqrestore(&adapter->dev_topo.sas_dev_lock, flags);
+
+ return port_id;
+}
+
+static u8 leapraid_transport_get_port_id_by_rphy(
+ struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy)
+{
+ if (!rphy)
+ return 0xFF;
+
+ switch (rphy->identify.device_type) {
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ return leapraid_get_port_id_by_expander(adapter, rphy);
+ case SAS_END_DEVICE:
+ return leapraid_get_port_id_by_end_dev(adapter, rphy);
+ default:
+ return 0xFF;
+ }
+}
+
+static enum sas_linkrate leapraid_transport_convert_phy_link_rate(u8 link_rate)
+{
+ unsigned int i;
+
+ #define SAS_RATE_12G SAS_LINK_RATE_12_0_GBPS
+
+ const struct linkrate_map {
+ u8 in;
+ enum sas_linkrate out;
+ } linkrate_table[] = {
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_1_5,
+ SAS_LINK_RATE_1_5_GBPS
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_3_0,
+ SAS_LINK_RATE_3_0_GBPS
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_6_0,
+ SAS_LINK_RATE_6_0_GBPS
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_12_0,
+ SAS_RATE_12G
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_PHY_DISABLED,
+ SAS_PHY_DISABLED
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED,
+ SAS_LINK_RATE_FAILED
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_PORT_SELECTOR,
+ SAS_SATA_PORT_SELECTOR
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_SMP_RESETTING,
+ SAS_LINK_RATE_UNKNOWN
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE,
+ SAS_LINK_RATE_UNKNOWN
+ },
+ {
+ LEAPRAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE,
+ SAS_LINK_RATE_UNKNOWN
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(linkrate_table); i++) {
+ if (linkrate_table[i].in == link_rate)
+ return linkrate_table[i].out;
+ }
+
+ return SAS_LINK_RATE_UNKNOWN;
+}
+
+static void leapraid_set_identify_protocol_flags(u32 dev_info,
+ struct sas_identify *identify)
+{
+ unsigned int i;
+
+ const struct protocol_mapping {
+ u32 mask;
+ u32 *target;
+ u32 protocol;
+ } mappings[] = {
+ {
+ LEAPRAID_DEVTYP_SSP_INIT,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_SSP
+ },
+ {
+ LEAPRAID_DEVTYP_STP_INIT,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_STP
+ },
+ {
+ LEAPRAID_DEVTYP_SMP_INIT,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_SMP
+ },
+ {
+ LEAPRAID_DEVTYP_SATA_HOST,
+ &identify->initiator_port_protocols,
+ SAS_PROTOCOL_SATA
+ },
+ {
+ LEAPRAID_DEVTYP_SSP_TGT,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_SSP
+ },
+ {
+ LEAPRAID_DEVTYP_STP_TGT,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_STP
+ },
+ {
+ LEAPRAID_DEVTYP_SMP_TGT,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_SMP
+ },
+ {
+ LEAPRAID_DEVTYP_SATA_DEV,
+ &identify->target_port_protocols,
+ SAS_PROTOCOL_SATA
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(mappings); i++)
+ if ((dev_info & mappings[i].mask) && mappings[i].target)
+ *mappings[i].target |= mappings[i].protocol;
+}
+
+static int leapraid_transport_set_identify(struct leapraid_adapter *adapter,
+ u16 hdl,
+ struct sas_identify *identify)
+{
+ union cfg_param_1 cfgp1 = {0};
+ union cfg_param_2 cfgp2 = {0};
+ struct leapraid_sas_dev_p0 sas_dev_pg0;
+ u32 dev_info;
+
+ if ((adapter->access_ctrl.shost_recovering &&
+ !adapter->scan_dev_desc.driver_loading) ||
+ adapter->access_ctrl.pcie_recovering)
+ return -EFAULT;
+
+ cfgp1.form = LEAPRAID_SAS_DEV_CFG_PGAD_HDL;
+ cfgp2.handle = hdl;
+ if ((leapraid_op_config_page(adapter, &sas_dev_pg0, cfgp1,
+ cfgp2, GET_SAS_DEVICE_PG0)))
+ return -ENXIO;
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ dev_info = le32_to_cpu(sas_dev_pg0.dev_info);
+ identify->sas_address = le64_to_cpu(sas_dev_pg0.sas_address);
+ identify->phy_identifier = sas_dev_pg0.phy_num;
+
+ switch (dev_info & LEAPRAID_DEVTYP_MASK_DEV_TYPE) {
+ case LEAPRAID_DEVTYP_NO_DEV:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case LEAPRAID_DEVTYP_END_DEV:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case LEAPRAID_DEVTYP_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case LEAPRAID_DEVTYP_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+
+ leapraid_set_identify_protocol_flags(dev_info, identify);
+
+ return 0;
+}
+
+static void leapraid_transport_exp_set_edev(struct leapraid_adapter *adapter,
+ void *data_out,
+ struct sas_expander_device *edev)
+{
+ struct leapraid_smp_passthrough_rep *smp_passthrough_rep;
+ struct leapraid_rep_manu_reply *rep_manu_reply;
+ u8 *component_id;
+ ssize_t __maybe_unused ret;
+
+ smp_passthrough_rep =
+ (void *)(&adapter->driver_cmds.transport_cmd.reply);
+ if (le16_to_cpu(smp_passthrough_rep->resp_data_len) !=
+ sizeof(struct leapraid_rep_manu_reply))
+ return;
+
+ rep_manu_reply = data_out + sizeof(struct leapraid_rep_manu_request);
+ ret = strscpy(edev->vendor_id, rep_manu_reply->vendor_identification,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ ret = strscpy(edev->product_id, rep_manu_reply->product_identification,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ ret = strscpy(edev->product_rev,
+ rep_manu_reply->product_revision_level,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = rep_manu_reply->sas_format & 1;
+ if (edev->level) {
+ ret = strscpy(edev->component_vendor_id,
+ rep_manu_reply->component_vendor_identification,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+
+ component_id = (u8 *)&rep_manu_reply->component_id;
+ edev->component_id = component_id[0] << 8 | component_id[1];
+ edev->component_revision_id =
+ rep_manu_reply->component_revision_level;
+ }
+}
+
+static int leapraid_transport_exp_report_manu(struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct sas_expander_device *edev,
+ u8 port_id)
+{
+ struct leapraid_smp_passthrough_req *smp_passthrough_req;
+ struct leapraid_rep_manu_request *rep_manu_request;
+ dma_addr_t h2c_dma_addr;
+ dma_addr_t c2h_dma_addr;
+ bool issue_reset = false;
+ void *data_out = NULL;
+ size_t c2h_size;
+ size_t h2c_size;
+ void *psge;
+ int rc = 0;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering) {
+ return -EFAULT;
+ }
+
+ mutex_lock(&adapter->driver_cmds.transport_cmd.mutex);
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_PENDING;
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto out;
+
+ h2c_size = sizeof(struct leapraid_rep_manu_request);
+ c2h_size = sizeof(struct leapraid_rep_manu_reply);
+ data_out = dma_alloc_coherent(&adapter->pdev->dev,
+ h2c_size + c2h_size,
+ &h2c_dma_addr,
+ GFP_ATOMIC);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rep_manu_request = data_out;
+ rep_manu_request->smp_frame_type =
+ SMP_REPORT_MANUFACTURER_INFORMATION_FRAME_TYPE;
+ rep_manu_request->function = SMP_REPORT_MANUFACTURER_INFORMATION_FUNC;
+ rep_manu_request->allocated_response_length = 0;
+ rep_manu_request->request_length = 0;
+
+ smp_passthrough_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ memset(smp_passthrough_req, 0,
+ sizeof(struct leapraid_smp_passthrough_req));
+ smp_passthrough_req->func = LEAPRAID_FUNC_SMP_PASSTHROUGH;
+ smp_passthrough_req->physical_port = port_id;
+ smp_passthrough_req->sas_address = cpu_to_le64(sas_address);
+ smp_passthrough_req->req_data_len = cpu_to_le16(h2c_size);
+ psge = &smp_passthrough_req->sgl;
+ c2h_dma_addr = h2c_dma_addr + sizeof(struct leapraid_rep_manu_request);
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr, h2c_size,
+ c2h_dma_addr, c2h_size);
+
+ init_completion(&adapter->driver_cmds.transport_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.transport_cmd.done,
+ LEAPRAID_TRANSPORT_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.transport_cmd.status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: smp passthrough to exp timeout\n",
+ __func__);
+ if (!(adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_RESET))
+ issue_reset = true;
+
+ goto hard_reset;
+ }
+
+ if (adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_REPLY_VALID)
+ leapraid_transport_exp_set_edev(adapter, data_out, edev);
+
+hard_reset:
+ if (issue_reset) {
+ dev_info(&adapter->pdev->dev, "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ }
+out:
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_NOT_USED;
+ if (data_out)
+ dma_free_coherent(&adapter->pdev->dev, h2c_size + c2h_size,
+ data_out, h2c_dma_addr);
+
+ mutex_unlock(&adapter->driver_cmds.transport_cmd.mutex);
+ return rc;
+}
+
+static void leapraid_transport_del_port(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port)
+{
+ dev_info(&sas_port->port->dev,
+ "remove port: sas addr=0x%016llx\n",
+ (unsigned long long)sas_port->remote_identify.sas_address);
+ switch (sas_port->remote_identify.device_type) {
+ case SAS_END_DEVICE:
+ leapraid_sas_dev_remove_by_sas_address(adapter,
+ sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ leapraid_exp_rm(adapter, sas_port->remote_identify.sas_address,
+ sas_port->card_port);
+ break;
+ default:
+ break;
+ }
+}
+
+static void leapraid_transport_del_phy(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_card_phy *card_phy)
+{
+ dev_info(&card_phy->phy->dev,
+ "remove phy: sas addr=0x%016llx, phy=%d\n",
+ (unsigned long long)sas_port->remote_identify.sas_address,
+ card_phy->phy_id);
+ list_del(&card_phy->port_siblings);
+ sas_port->phys_num--;
+ sas_port_delete_phy(sas_port->port, card_phy->phy);
+ card_phy->phy_is_assigned = false;
+}
+
+static void leapraid_transport_add_phy(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_card_phy *card_phy)
+{
+ dev_info(&card_phy->phy->dev,
+ "add phy: sas addr=0x%016llx, phy=%d\n",
+ (unsigned long long)sas_port->remote_identify.sas_address,
+ card_phy->phy_id);
+ list_add_tail(&card_phy->port_siblings, &sas_port->phy_list);
+ sas_port->phys_num++;
+ sas_port_add_phy(sas_port->port, card_phy->phy);
+ card_phy->phy_is_assigned = true;
+}
+
+void leapraid_transport_attach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_phy *card_phy,
+ u64 sas_address,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_sas_port *sas_port;
+ struct leapraid_card_phy *card_phy_srch;
+
+ if (card_phy->phy_is_assigned)
+ return;
+
+ if (!card_port)
+ return;
+
+ list_for_each_entry(sas_port, &topo_node->sas_port_list, port_list) {
+ if (sas_port->remote_identify.sas_address != sas_address)
+ continue;
+
+ if (sas_port->card_port != card_port)
+ continue;
+
+ list_for_each_entry(card_phy_srch, &sas_port->phy_list,
+ port_siblings) {
+ if (card_phy_srch == card_phy)
+ return;
+ }
+ leapraid_transport_add_phy(adapter, sas_port, card_phy);
+ return;
+ }
+}
+
+void leapraid_transport_detach_phy_to_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_phy *target_card_phy)
+{
+ struct leapraid_sas_port *sas_port, *sas_port_next;
+ struct leapraid_card_phy *cur_card_phy;
+
+ if (!target_card_phy->phy_is_assigned)
+ return;
+
+ list_for_each_entry_safe(sas_port, sas_port_next,
+ &topo_node->sas_port_list, port_list) {
+ list_for_each_entry(cur_card_phy, &sas_port->phy_list,
+ port_siblings) {
+ if (cur_card_phy != target_card_phy)
+ continue;
+
+ if (sas_port->phys_num == 1 &&
+ !adapter->access_ctrl.shost_recovering)
+ leapraid_transport_del_port(adapter, sas_port);
+ else
+ leapraid_transport_del_phy(adapter, sas_port,
+ target_card_phy);
+ return;
+ }
+ }
+}
+
+static void leapraid_detach_phy_from_old_port(struct leapraid_adapter *adapter,
+ struct leapraid_topo_node *topo_node,
+ u64 sas_address,
+ struct leapraid_card_port *card_port)
+{
+ int i;
+
+ for (i = 0; i < topo_node->phys_num; i++) {
+ if (topo_node->card_phy[i].remote_identify.sas_address !=
+ sas_address ||
+ topo_node->card_phy[i].card_port != card_port)
+ continue;
+ if (topo_node->card_phy[i].phy_is_assigned)
+ leapraid_transport_detach_phy_to_port(adapter,
+ topo_node,
+ &topo_node->card_phy[i]);
+ }
+}
+
+static struct leapraid_sas_port *leapraid_prepare_sas_port(
+ struct leapraid_adapter *adapter,
+ u16 handle, u64 sas_address,
+ struct leapraid_card_port *card_port,
+ struct leapraid_topo_node **out_topo_node)
+{
+ struct leapraid_topo_node *topo_node;
+ struct leapraid_sas_port *sas_port;
+ unsigned long flags;
+
+ sas_port = kzalloc(sizeof(*sas_port), GFP_KERNEL);
+ if (!sas_port)
+ return NULL;
+
+ INIT_LIST_HEAD(&sas_port->port_list);
+ INIT_LIST_HEAD(&sas_port->phy_list);
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node = leapraid_transport_topo_node_by_sas_addr(adapter,
+ sas_address,
+ card_port);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (!topo_node) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to find parent node for sas addr 0x%016llx!\n",
+ __func__, sas_address);
+ kfree(sas_port);
+ return NULL;
+ }
+
+ if (leapraid_transport_set_identify(adapter, handle,
+ &sas_port->remote_identify)) {
+ kfree(sas_port);
+ return NULL;
+ }
+
+ if (sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ kfree(sas_port);
+ return NULL;
+ }
+
+ sas_port->card_port = card_port;
+ *out_topo_node = topo_node;
+
+ return sas_port;
+}
+
+static int leapraid_bind_phys_and_vphy(struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_port *card_port,
+ struct leapraid_vphy **out_vphy)
+{
+ struct leapraid_vphy *vphy = NULL;
+ int i;
+
+ for (i = 0; i < topo_node->phys_num; i++) {
+ if (topo_node->card_phy[i].remote_identify.sas_address !=
+ sas_port->remote_identify.sas_address ||
+ topo_node->card_phy[i].card_port != card_port)
+ continue;
+
+ list_add_tail(&topo_node->card_phy[i].port_siblings,
+ &sas_port->phy_list);
+ sas_port->phys_num++;
+
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num) {
+ if (!topo_node->card_phy[i].vphy) {
+ card_port->phy_mask |= BIT(i);
+ continue;
+ }
+
+ vphy = leapraid_get_vphy_by_phy(card_port, i);
+ if (!vphy)
+ return -1;
+ }
+ }
+
+ *out_vphy = vphy;
+ return sas_port->phys_num ? 0 : -1;
+}
+
+static struct sas_rphy *leapraid_create_and_register_rphy(
+ struct leapraid_adapter *adapter,
+ struct leapraid_sas_port *sas_port,
+ struct leapraid_topo_node *topo_node,
+ struct leapraid_card_port *card_port,
+ struct leapraid_vphy *vphy)
+{
+ struct leapraid_sas_dev *sas_dev = NULL;
+ struct leapraid_card_phy *card_phy;
+ struct sas_port *port;
+ struct sas_rphy *rphy;
+
+ if (!topo_node->parent_dev)
+ return NULL;
+
+ port = sas_port_alloc_num(topo_node->parent_dev);
+ if (sas_port_add(port))
+ return NULL;
+
+ list_for_each_entry(card_phy, &sas_port->phy_list, port_siblings) {
+ sas_port_add_phy(port, card_phy->phy);
+ card_phy->phy_is_assigned = true;
+ card_phy->card_port = card_port;
+ }
+
+ if (sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ sas_dev = leapraid_get_sas_dev_by_addr(adapter,
+ sas_port->remote_identify.sas_address,
+ card_port);
+ if (!sas_dev)
+ return NULL;
+ sas_dev->pend_sas_rphy_add = 1;
+ rphy = sas_end_device_alloc(port);
+ sas_dev->rphy = rphy;
+
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num) {
+ if (!vphy)
+ card_port->sas_address = sas_dev->sas_addr;
+ else
+ vphy->sas_address = sas_dev->sas_addr;
+ }
+
+ } else {
+ rphy = sas_expander_alloc(port,
+ sas_port->remote_identify.device_type);
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num)
+ card_port->sas_address =
+ sas_port->remote_identify.sas_address;
+ }
+
+ rphy->identify = sas_port->remote_identify;
+
+ if (sas_rphy_add(rphy))
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to add rphy\n", __func__);
+
+ if (sas_dev) {
+ sas_dev->pend_sas_rphy_add = 0;
+ leapraid_sdev_put(sas_dev);
+ }
+
+ sas_port->port = port;
+ return rphy;
+}
+
+struct leapraid_sas_port *leapraid_transport_port_add(
+ struct leapraid_adapter *adapter,
+ u16 hdl, u64 sas_address,
+ struct leapraid_card_port *card_port)
+{
+ struct leapraid_card_phy *card_phy, *card_phy_next;
+ struct leapraid_topo_node *topo_node = NULL;
+ struct leapraid_sas_port *sas_port = NULL;
+ struct leapraid_vphy *vphy = NULL;
+ struct sas_rphy *rphy = NULL;
+ unsigned long flags;
+
+ if (!card_port)
+ return NULL;
+
+ sas_port = leapraid_prepare_sas_port(adapter, hdl, sas_address,
+ card_port, &topo_node);
+ if (!sas_port)
+ return NULL;
+
+ leapraid_detach_phy_from_old_port(adapter,
+ topo_node,
+ sas_port->remote_identify.sas_address,
+ card_port);
+
+ if (leapraid_bind_phys_and_vphy(adapter, sas_port, topo_node,
+ card_port, &vphy))
+ goto out_fail;
+
+ rphy = leapraid_create_and_register_rphy(adapter, sas_port, topo_node,
+ card_port, vphy);
+ if (!rphy)
+ goto out_fail;
+
+ dev_info(&rphy->dev,
+ "%s: added dev: hdl=0x%04x, sas addr=0x%016llx\n",
+ __func__, hdl,
+ (unsigned long long)sas_port->remote_identify.sas_address);
+
+ sas_port->rphy = rphy;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ list_add_tail(&sas_port->port_list, &topo_node->sas_port_list);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (sas_port->remote_identify.device_type ==
+ LEAPRAID_DEVTYP_EDGE_EXPANDER ||
+ sas_port->remote_identify.device_type ==
+ LEAPRAID_DEVTYP_FANOUT_EXPANDER)
+ leapraid_transport_exp_report_manu(adapter,
+ sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy),
+ card_port->port_id);
+
+ return sas_port;
+
+out_fail:
+ list_for_each_entry_safe(card_phy, card_phy_next,
+ &sas_port->phy_list, port_siblings)
+ list_del(&card_phy->port_siblings);
+ kfree(sas_port);
+ return NULL;
+}
+
+static struct leapraid_sas_port *leapraid_find_and_remove_sas_port(
+ struct leapraid_topo_node *topo_node,
+ u64 sas_address,
+ struct leapraid_card_port *remove_card_port,
+ bool *found)
+{
+ struct leapraid_sas_port *sas_port, *sas_port_next;
+
+ list_for_each_entry_safe(sas_port, sas_port_next,
+ &topo_node->sas_port_list, port_list) {
+ if (sas_port->remote_identify.sas_address != sas_address)
+ continue;
+
+ if (sas_port->card_port != remove_card_port)
+ continue;
+
+ *found = true;
+ list_del(&sas_port->port_list);
+ return sas_port;
+ }
+ return NULL;
+}
+
+static void leapraid_cleanup_card_port_and_vphys(
+ struct leapraid_adapter *adapter,
+ u64 sas_address,
+ struct leapraid_card_port *remove_card_port)
+{
+ struct leapraid_card_port *card_port, *card_port_next;
+ struct leapraid_vphy *vphy, *vphy_next;
+
+ if (remove_card_port->vphys_mask) {
+ list_for_each_entry_safe(vphy, vphy_next,
+ &remove_card_port->vphys_list, list) {
+ if (vphy->sas_address != sas_address)
+ continue;
+
+ dev_info(&adapter->pdev->dev,
+ "%s: remove vphy: %p from port: %p, port_id=%d\n",
+ __func__, vphy, remove_card_port,
+ remove_card_port->port_id);
+
+ remove_card_port->vphys_mask &= ~vphy->phy_mask;
+ list_del(&vphy->list);
+ kfree(vphy);
+ }
+
+ if (!remove_card_port->vphys_mask &&
+ !remove_card_port->sas_address) {
+ dev_info(&adapter->pdev->dev,
+ "%s: remove empty hba_port: %p, port_id=%d\n",
+ __func__,
+ remove_card_port,
+ remove_card_port->port_id);
+ list_del(&remove_card_port->list);
+ kfree(remove_card_port);
+ remove_card_port = NULL;
+ }
+ }
+
+ list_for_each_entry_safe(card_port, card_port_next,
+ &adapter->dev_topo.card_port_list, list) {
+ if (card_port != remove_card_port)
+ continue;
+
+ if (card_port->sas_address != sas_address)
+ continue;
+
+ if (!remove_card_port->vphys_mask) {
+ dev_info(&adapter->pdev->dev,
+ "%s: remove hba_port: %p, port_id=%d\n",
+ __func__, card_port, card_port->port_id);
+ list_del(&card_port->list);
+ kfree(card_port);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: clear sas_address of hba_port: %p, port_id=%d\n",
+ __func__, card_port, card_port->port_id);
+ remove_card_port->sas_address = 0;
+ }
+ break;
+ }
+}
+
+static void leapraid_clear_topo_node_phys(struct leapraid_topo_node *topo_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < topo_node->phys_num; i++) {
+ if (topo_node->card_phy[i].remote_identify.sas_address ==
+ sas_address) {
+ memset(&topo_node->card_phy[i].remote_identify, 0,
+ sizeof(struct sas_identify));
+ topo_node->card_phy[i].vphy = false;
+ }
+ }
+}
+
+void leapraid_transport_port_remove(struct leapraid_adapter *adapter,
+ u64 sas_address, u64 sas_address_parent,
+ struct leapraid_card_port *remove_card_port)
+{
+ struct leapraid_card_phy *card_phy, *card_phy_next;
+ struct leapraid_sas_port *sas_port = NULL;
+ struct leapraid_topo_node *topo_node;
+ unsigned long flags;
+ bool found = false;
+
+ if (!remove_card_port)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+
+ topo_node = leapraid_transport_topo_node_by_sas_addr(adapter,
+ sas_address_parent,
+ remove_card_port);
+ if (!topo_node) {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return;
+ }
+
+ sas_port = leapraid_find_and_remove_sas_port(topo_node, sas_address,
+ remove_card_port, &found);
+
+ if (!found) {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return;
+ }
+
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num &&
+ adapter->adapter_attr.enable_mp)
+ leapraid_cleanup_card_port_and_vphys(adapter, sas_address,
+ remove_card_port);
+
+ leapraid_clear_topo_node_phys(topo_node, sas_address);
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ list_for_each_entry_safe(card_phy, card_phy_next,
+ &sas_port->phy_list, port_siblings) {
+ card_phy->phy_is_assigned = false;
+ if (!adapter->access_ctrl.host_removing)
+ sas_port_delete_phy(sas_port->port, card_phy->phy);
+
+ list_del(&card_phy->port_siblings);
+ }
+
+ if (!adapter->access_ctrl.host_removing)
+ sas_port_delete(sas_port->port);
+
+ dev_info(&adapter->pdev->dev,
+ "%s: removed sas_port for sas addr=0x%016llx\n",
+ __func__, (unsigned long long)sas_address);
+
+ kfree(sas_port);
+}
+
+static void leapraid_init_sas_or_exp_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct sas_phy *phy,
+ struct leapraid_sas_phy_p0 *phy_pg0,
+ struct leapraid_exp_p1 *exp_pg1)
+{
+ if (exp_pg1 && phy_pg0)
+ return;
+
+ if (!exp_pg1 && !phy_pg0)
+ return;
+
+ phy->identify = card_phy->identify;
+ phy->identify.phy_identifier = card_phy->phy_id;
+ phy->negotiated_linkrate = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->neg_link_rate &
+ LEAPRAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->neg_link_rate &
+ LEAPRAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->hw_link_rate &
+ LEAPRAID_SAS_HWRATE_MIN_RATE_MASK) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->hw_link_rate &
+ LEAPRAID_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->hw_link_rate >> 4) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->hw_link_rate >> 4);
+ phy->minimum_linkrate = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->p_link_rate &
+ LEAPRAID_SAS_PRATE_MIN_RATE_MASK) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->p_link_rate &
+ LEAPRAID_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = phy_pg0 ?
+ leapraid_transport_convert_phy_link_rate(
+ phy_pg0->p_link_rate >> 4) :
+ leapraid_transport_convert_phy_link_rate(
+ exp_pg1->p_link_rate >> 4);
+ phy->hostdata = card_phy->card_port;
+}
+
+void leapraid_transport_add_card_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_sas_phy_p0 *phy_pg0,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+
+ INIT_LIST_HEAD(&card_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, card_phy->phy_id);
+ if (!phy) {
+ dev_err(&adapter->pdev->dev,
+ "%s sas_phy_alloc failed!\n", __func__);
+ return;
+ }
+
+ if ((leapraid_transport_set_identify(adapter, card_phy->hdl,
+ &card_phy->identify))) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy handle identify failed!\n", __func__);
+ sas_phy_free(phy);
+ return;
+ }
+
+ card_phy->attached_hdl = le16_to_cpu(phy_pg0->attached_dev_hdl);
+ if (card_phy->attached_hdl) {
+ if (leapraid_transport_set_identify(adapter,
+ card_phy->attached_hdl,
+ &card_phy->remote_identify)) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy attached handle identify failed!\n",
+ __func__);
+ sas_phy_free(phy);
+ return;
+ }
+ }
+
+ leapraid_init_sas_or_exp_phy(adapter, card_phy, phy, phy_pg0, NULL);
+
+ if ((sas_phy_add(phy))) {
+ sas_phy_free(phy);
+ return;
+ }
+
+ card_phy->phy = phy;
+}
+
+int leapraid_transport_add_exp_phy(struct leapraid_adapter *adapter,
+ struct leapraid_card_phy *card_phy,
+ struct leapraid_exp_p1 *exp_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+
+ INIT_LIST_HEAD(&card_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, card_phy->phy_id);
+ if (!phy) {
+ dev_err(&adapter->pdev->dev,
+ "%s sas_phy_alloc failed!\n", __func__);
+ return -EFAULT;
+ }
+
+ if ((leapraid_transport_set_identify(adapter, card_phy->hdl,
+ &card_phy->identify))) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy hdl identify failed!\n", __func__);
+ sas_phy_free(phy);
+ return -EFAULT;
+ }
+
+ card_phy->attached_hdl = le16_to_cpu(exp_pg1->attached_dev_hdl);
+ if (card_phy->attached_hdl) {
+ if (leapraid_transport_set_identify(adapter,
+ card_phy->attached_hdl,
+ &card_phy->remote_identify)) {
+ dev_err(&adapter->pdev->dev,
+ "%s set phy attached hdl identify failed!\n",
+ __func__);
+ sas_phy_free(phy);
+ }
+ }
+
+ leapraid_init_sas_or_exp_phy(adapter, card_phy, phy, NULL, exp_pg1);
+
+ if ((sas_phy_add(phy))) {
+ sas_phy_free(phy);
+ return -EFAULT;
+ }
+
+ card_phy->phy = phy;
+ return 0;
+}
+
+void leapraid_transport_update_links(struct leapraid_adapter *adapter,
+ u64 sas_address, u16 hdl, u8 phy_index,
+ u8 link_rate, struct leapraid_card_port *target_card_port)
+{
+ struct leapraid_topo_node *topo_node;
+ struct leapraid_card_phy *card_phy;
+ struct leapraid_card_port *card_port = NULL;
+ unsigned long flags;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering)
+ return;
+
+ spin_lock_irqsave(&adapter->dev_topo.topo_node_lock, flags);
+ topo_node = leapraid_transport_topo_node_by_sas_addr(adapter,
+ sas_address,
+ target_card_port);
+ if (!topo_node) {
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock,
+ flags);
+ return;
+ }
+
+ card_phy = &topo_node->card_phy[phy_index];
+ card_phy->attached_hdl = hdl;
+ spin_unlock_irqrestore(&adapter->dev_topo.topo_node_lock, flags);
+
+ if (hdl && link_rate >= LEAPRAID_SAS_NEG_LINK_RATE_1_5) {
+ leapraid_transport_set_identify(adapter, hdl,
+ &card_phy->remote_identify);
+ if (topo_node->hdl <= adapter->dev_topo.card.phys_num &&
+ adapter->adapter_attr.enable_mp) {
+ list_for_each_entry(card_port,
+ &adapter->dev_topo.card_port_list,
+ list) {
+ if (card_port->sas_address == sas_address &&
+ card_port == target_card_port)
+ card_port->phy_mask |=
+ BIT(card_phy->phy_id);
+ }
+ }
+ leapraid_transport_attach_phy_to_port(adapter, topo_node,
+ card_phy,
+ card_phy->remote_identify.sas_address,
+ target_card_port);
+ } else {
+ memset(&card_phy->remote_identify, 0,
+ sizeof(struct sas_identify));
+ }
+
+ if (card_phy->phy)
+ card_phy->phy->negotiated_linkrate =
+ leapraid_transport_convert_phy_link_rate(link_rate);
+}
+
+static int leapraid_dma_map_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t *dma_addr,
+ size_t *dma_len, void **p)
+{
+ if (buf->sg_cnt > 1) {
+ *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+ GFP_KERNEL);
+ if (!*p)
+ return -ENOMEM;
+
+ *dma_len = buf->payload_len;
+ } else {
+ if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+ return -ENOMEM;
+
+ *dma_addr = sg_dma_address(buf->sg_list);
+ *dma_len = sg_dma_len(buf->sg_list);
+ *p = NULL;
+ }
+ return 0;
+}
+
+static void leapraid_dma_unmap_buffer(struct device *dev,
+ struct bsg_buffer *buf,
+ dma_addr_t dma_addr,
+ void *p)
+{
+ if (p)
+ dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+ else
+ dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
+static void leapraid_build_smp_task(struct leapraid_adapter *adapter,
+ struct sas_rphy *rphy,
+ dma_addr_t h2c_dma_addr, size_t h2c_size,
+ dma_addr_t c2h_dma_addr, size_t c2h_size)
+{
+ struct leapraid_smp_passthrough_req *smp_passthrough_req;
+ void *psge;
+
+ smp_passthrough_req =
+ leapraid_get_task_desc(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ memset(smp_passthrough_req, 0, sizeof(*smp_passthrough_req));
+
+ smp_passthrough_req->func = LEAPRAID_FUNC_SMP_PASSTHROUGH;
+ smp_passthrough_req->physical_port =
+ leapraid_transport_get_port_id_by_rphy(adapter, rphy);
+ smp_passthrough_req->sas_address = (rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(adapter->dev_topo.card.sas_address);
+ smp_passthrough_req->req_data_len =
+ cpu_to_le16(h2c_size - LEAPRAID_SMP_FRAME_HEADER_SIZE);
+ psge = &smp_passthrough_req->sgl;
+ leapraid_build_ieee_sg(adapter, psge, h2c_dma_addr,
+ h2c_size - LEAPRAID_SMP_FRAME_HEADER_SIZE,
+ c2h_dma_addr,
+ c2h_size - LEAPRAID_SMP_FRAME_HEADER_SIZE);
+}
+
+static int leapraid_send_smp_req(struct leapraid_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev,
+ "%s: sending smp request\n", __func__);
+ init_completion(&adapter->driver_cmds.transport_cmd.done);
+ leapraid_fire_task(adapter,
+ adapter->driver_cmds.transport_cmd.inter_taskid);
+ wait_for_completion_timeout(&adapter->driver_cmds.transport_cmd.done,
+ LEAPRAID_TRANSPORT_CMD_TIMEOUT * HZ);
+ if (!(adapter->driver_cmds.transport_cmd.status & LEAPRAID_CMD_DONE)) {
+ dev_err(&adapter->pdev->dev, "%s: timeout\n", __func__);
+ if (!(adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_RESET)) {
+ dev_info(&adapter->pdev->dev,
+ "%s:%d call hard_reset\n",
+ __func__, __LINE__);
+ leapraid_hard_reset_handler(adapter, FULL_RESET);
+ return -ETIMEDOUT;
+ }
+ }
+
+ dev_info(&adapter->pdev->dev, "%s: smp request complete\n", __func__);
+ if (!(adapter->driver_cmds.transport_cmd.status &
+ LEAPRAID_CMD_REPLY_VALID)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: smp request no reply\n", __func__);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void leapraid_handle_smp_rep(struct leapraid_adapter *adapter,
+ struct bsg_job *job, void *addr_in,
+ unsigned int *reslen)
+{
+ struct leapraid_smp_passthrough_rep *smp_passthrough_rep;
+
+ smp_passthrough_rep =
+ (void *)(&adapter->driver_cmds.transport_cmd.reply);
+
+ dev_info(&adapter->pdev->dev, "%s: response data len=%d\n",
+ __func__, le16_to_cpu(smp_passthrough_rep->resp_data_len));
+
+ memcpy(job->reply, smp_passthrough_rep, sizeof(*smp_passthrough_rep));
+ job->reply_len = sizeof(*smp_passthrough_rep);
+ *reslen = le16_to_cpu(smp_passthrough_rep->resp_data_len);
+
+ if (addr_in)
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, addr_in,
+ job->reply_payload.payload_len);
+}
+
+static void leapraid_transport_smp_handler(struct bsg_job *job,
+ struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct leapraid_adapter *adapter = shost_priv(shost);
+ dma_addr_t c2h_dma_addr;
+ dma_addr_t h2c_dma_addr;
+ void *addr_in = NULL;
+ void *addr_out = NULL;
+ size_t c2h_size;
+ size_t h2c_size;
+ int rc;
+ unsigned int reslen = 0;
+
+ if (adapter->access_ctrl.shost_recovering ||
+ adapter->access_ctrl.pcie_recovering) {
+ rc = -EFAULT;
+ goto done;
+ }
+
+ rc = mutex_lock_interruptible(&adapter->driver_cmds.transport_cmd.mutex);
+ if (rc)
+ goto done;
+
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_PENDING;
+ rc = leapraid_dma_map_buffer(&adapter->pdev->dev,
+ &job->request_payload,
+ &h2c_dma_addr, &h2c_size, &addr_out);
+ if (rc)
+ goto release_lock;
+
+ if (addr_out)
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, addr_out,
+ job->request_payload.payload_len);
+
+ rc = leapraid_dma_map_buffer(&adapter->pdev->dev, &job->reply_payload,
+ &c2h_dma_addr, &c2h_size, &addr_in);
+ if (rc)
+ goto free_req_buf;
+
+ rc = leapraid_check_adapter_is_op(adapter);
+ if (rc)
+ goto free_rep_buf;
+
+ leapraid_build_smp_task(adapter, rphy, h2c_dma_addr,
+ h2c_size, c2h_dma_addr, c2h_size);
+
+ rc = leapraid_send_smp_req(adapter);
+ if (rc)
+ goto free_rep_buf;
+
+ leapraid_handle_smp_rep(adapter, job, addr_in, &reslen);
+
+free_rep_buf:
+ leapraid_dma_unmap_buffer(&adapter->pdev->dev, &job->reply_payload,
+ c2h_dma_addr, addr_in);
+free_req_buf:
+ leapraid_dma_unmap_buffer(&adapter->pdev->dev, &job->request_payload,
+ h2c_dma_addr, addr_out);
+release_lock:
+ adapter->driver_cmds.transport_cmd.status = LEAPRAID_CMD_NOT_USED;
+ mutex_unlock(&adapter->driver_cmds.transport_cmd.mutex);
+done:
+ bsg_job_done(job, rc, reslen);
+}
+
+struct sas_function_template leapraid_transport_functions = {
+ .smp_handler = leapraid_transport_smp_handler,
+};
+
+struct scsi_transport_template *leapraid_transport_template;
--
2.25.1
2
1