Kernel
Threads by month
- ----- 2025 -----
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
December 2021
- 27 participants
- 170 discussions

[PATCH openEuler-5.10 01/64] arm64: Revert feature: Add memmap parameter and register pmem
by Zheng Zengkai 29 Dec '21
by Zheng Zengkai 29 Dec '21
29 Dec '21
From: Zhuling <zhuling8(a)huawei.com>
euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4O31I?from=project-issue
CVE: NA
--------------------------------
The reserve memory of PMEM conflicts with
"add memmap interface to reserved memory for mremap syscall usage",
need to rollback, resubmit after adaptation.
Feature related commit:
1.PMEM function commit: 94dc364f5eda10f49449ba573dc3322e1ea92280
2.PMEM feature config commit: 36d7a831e15ceb84e937122c87d01c14242dc377
Signed-off-by: Zhuling <zhuling8(a)huawei.com>
Reviewed-by: Sang Yan <sangyan(a)huawei.com>
Reviewed-by: Kefeng Wang <wangkefeng.wang(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
---
arch/arm64/Kconfig | 21 ------
arch/arm64/configs/openeuler_defconfig | 3 -
arch/arm64/kernel/Makefile | 1 -
arch/arm64/kernel/pmem.c | 35 ----------
arch/arm64/kernel/setup.c | 10 ---
arch/arm64/mm/init.c | 94 --------------------------
drivers/nvdimm/Kconfig | 5 --
drivers/nvdimm/Makefile | 1 -
8 files changed, 170 deletions(-)
delete mode 100644 arch/arm64/kernel/pmem.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index df90a6e05ad2..2df4b310eb23 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1321,27 +1321,6 @@ config RODATA_FULL_DEFAULT_ENABLED
This requires the linear region to be mapped down to pages,
which may adversely affect performance in some cases.
-config ARM64_PMEM_RESERVE
- bool "Reserve memory for persistent storage"
- default n
- help
- Use memmap=nn[KMG]!ss[KMG](memmap=100K!0x1a0000000) reserve
- memory for persistent storage.
-
- Say y here to enable this feature.
-
-config ARM64_PMEM_LEGACY_DEVICE
- bool "Create persistent storage"
- depends on BLK_DEV
- depends on LIBNVDIMM
- select ARM64_PMEM_RESERVE
- help
- Use reserved memory for persistent storage when the kernel
- restart or update. the data in PMEM will not be lost and
- can be loaded faster.
-
- Say y if unsure.
-
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 17bc8750bba7..b5fc851f1949 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -416,8 +416,6 @@ CONFIG_ARM64_CPU_PARK=y
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_UNMAP_KERNEL_AT_EL0=y
CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
-CONFIG_ARM64_PMEM_RESERVE=y
-CONFIG_ARM64_PMEM_LEGACY_DEVICE=y
# CONFIG_ARM64_SW_TTBR0_PAN is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
CONFIG_ARM64_ILP32=y
@@ -6026,7 +6024,6 @@ CONFIG_ND_BTT=m
CONFIG_BTT=y
CONFIG_OF_PMEM=m
CONFIG_NVDIMM_KEYS=y
-CONFIG_PMEM_LEGACY=m
CONFIG_DAX_DRIVER=y
CONFIG_DAX=y
CONFIG_DEV_DAX=m
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index f6153250b631..169d90f11cf5 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -68,7 +68,6 @@ obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_ARM64_MTE) += mte.o
obj-$(CONFIG_MPAM) += mpam/
-obj-$(CONFIG_ARM64_PMEM_LEGACY_DEVICE) += pmem.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
diff --git a/arch/arm64/kernel/pmem.c b/arch/arm64/kernel/pmem.c
deleted file mode 100644
index 16eaf706f671..000000000000
--- a/arch/arm64/kernel/pmem.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright(c) 2021 Huawei Technologies Co., Ltd
- *
- * Derived from x86 and arm64 implement PMEM.
- */
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-
-static int found(struct resource *res, void *data)
-{
- return 1;
-}
-
-static int __init register_e820_pmem(void)
-{
- struct platform_device *pdev;
- int rc;
-
- rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
- IORESOURCE_MEM, 0, -1, NULL, found);
- if (rc <= 0)
- return 0;
-
- /*
- * See drivers/nvdimm/e820.c for the implementation, this is
- * simply here to trigger the module to load on demand.
- */
- pdev = platform_device_alloc("e820_pmem", -1);
-
- return platform_device_add(pdev);
-}
-device_initcall(register_e820_pmem);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 88ec49504001..7cd042536d3b 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -70,10 +70,6 @@ static int __init arm64_enable_cpu0_hotplug(char *str)
__setup("arm64_cpu0_hotplug", arm64_enable_cpu0_hotplug);
#endif
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-extern struct resource pmem_res;
-#endif
-
phys_addr_t __fdt_pointer __initdata;
/*
@@ -288,12 +284,6 @@ static void __init request_standard_resources(void)
request_resource(res, &pin_memory_resource);
#endif
}
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
- if (pmem_res.end && pmem_res.start)
- request_resource(&iomem_resource, &pmem_res);
-#endif
-
}
static int __init reserve_memblock_reserved_regions(void)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3b9401ee9c58..e8d446164c76 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -55,7 +55,6 @@
*/
s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
-phys_addr_t start_at, mem_size;
#ifdef CONFIG_PIN_MEMORY
struct resource pin_memory_resource = {
@@ -112,18 +111,6 @@ static void __init reserve_pin_memory_res(void)
*/
phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static unsigned long long pmem_size, pmem_start;
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-struct resource pmem_res = {
- .name = "Persistent Memory (legacy)",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_MEM,
- .desc = IORES_DESC_PERSISTENT_MEMORY_LEGACY
-};
-#endif
-
#ifndef CONFIG_KEXEC_CORE
static void __init reserve_crashkernel(void)
{
@@ -417,83 +404,6 @@ static int __init reserve_park_mem(void)
}
#endif
-static bool __init is_mem_valid(unsigned long long mem_size, unsigned long long mem_start)
-{
- if (!memblock_is_region_memory(mem_start, mem_size)) {
- pr_warn("cannot reserve mem: region is not memory!\n");
- return false;
- }
-
- if (memblock_is_region_reserved(mem_start, mem_size)) {
- pr_warn("cannot reserve mem: region overlaps reserved memory!\n");
- return false;
- }
-
- if (!IS_ALIGNED(mem_start, SZ_2M)) {
- pr_warn("cannot reserve mem: base address is not 2MB aligned!\n");
- return false;
- }
-
- return true;
-}
-
-static int __init parse_memmap_one(char *p)
-{
- char *oldp;
-
- if (!p)
- return -EINVAL;
-
- oldp = p;
- mem_size = memparse(p, &p);
- if (p == oldp)
- return -EINVAL;
-
- if (!mem_size)
- return -EINVAL;
-
- mem_size = PAGE_ALIGN(mem_size);
-
- if (*p == '!') {
- start_at = memparse(p+1, &p);
-
- pmem_start = start_at;
- pmem_size = mem_size;
- } else
- pr_info("Unrecognized memmap option, please check the parameter.\n");
-
- return *p == '\0' ? 0 : -EINVAL;
-}
-
-static int __init parse_memmap_opt(char *str)
-{
- while (str) {
- char *k = strchr(str, ',');
-
- if (k)
- *k++ = 0;
- parse_memmap_one(str);
- str = k;
- }
-
- return 0;
-}
-early_param("memmap", parse_memmap_opt);
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-static void __init reserve_pmem(void)
-{
- if (!is_mem_valid(mem_size, start_at))
- return;
-
- memblock_remove(pmem_start, pmem_size);
- pr_info("pmem reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
- pmem_start, pmem_start + pmem_size, pmem_size >> 20);
- pmem_res.start = pmem_start;
- pmem_res.end = pmem_start + pmem_size - 1;
-}
-#endif
-
void __init arm64_memblock_init(void)
{
const s64 linear_region_size = BIT(vabits_actual - 1);
@@ -668,10 +578,6 @@ void __init bootmem_init(void)
reserve_quick_kexec();
#endif
-#ifdef CONFIG_ARM64_PMEM_RESERVE
- reserve_pmem();
-#endif
-
reserve_pin_memory_res();
memblock_dump_all();
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index ce4de75262b9..b7d1eb38b27d 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -132,8 +132,3 @@ config NVDIMM_TEST_BUILD
infrastructure.
endif
-
-config PMEM_LEGACY
- tristate "Pmem_legacy"
- select X86_PMEM_LEGACY if X86
- select ARM64_PMEM_LEGACY_DEVICE if ARM64
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 6f8dc9242a81..04077532f7ed 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -3,7 +3,6 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
-obj-$(CONFIG_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_OF_PMEM) += of_pmem.o
obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
--
2.20.1
1
63

[PATCH openEuler-1.0-LTS 00/93] Enabling AMD Milan series processor support
by Laibin Qiu 28 Dec '21
by Laibin Qiu 28 Dec '21
28 Dec '21
bugzilla: https://gitee.com/openeuler/kernel/issues/I4MKP4
Babu Moger (1):
KVM: SVM: Clear the CR4 register on reset
David Edmondson (1):
KVM: x86: clflushopt should be treated as a no-op by emulation
Fenghua Yu (2):
x86/cpufeatures: Enumerate MOVDIRI instruction
x86/cpufeatures: Enumerate MOVDIR64B instruction
Haiyan Song (2):
perf vendor events intel: Add Icelake V1.00 event file
perf vendor events intel: Add Tremontx event file v1.02
Isaac Vaughn (1):
EDAC/amd64: Add PCI device IDs for family 17h, model 70h
Jan H. Schönherr (1):
x86/mce: Fix use of uninitialized MCE message string
Jim Mattson (1):
kvm: x86: Expose RDPID in KVM_GET_SUPPORTED_CPUID
John Allen (2):
kvm/svm: PKU not currently supported
x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE
John Garry (4):
perf jevents: Add support for Hisi hip08 DDRC PMU aliasing
perf jevents: Add support for Hisi hip08 HHA PMU aliasing
perf jevents: Add support for Hisi hip08 L3C PMU aliasing
perf vendor events arm64: Fix Hisi hip08 DDRC PMU eventname
Kai Huang (3):
kvm: x86: Move kvm_set_mmio_spte_mask() from x86.c to mmu.c
kvm: x86: Fix reserved bits related calculation errors caused by MKTME
kvm: x86: Fix L1TF mitigation for shadow MMU
Kan Liang (1):
perf vendor events intel: Add uncore_upi JSON support
Kim Phillips (17):
perf/amd/uncore: Prepare L3 thread mask code for Family 19h
perf/amd/uncore: Make L3 thread mask code more readable
perf/amd/uncore: Add support for Family 19h L3 PMU
arch/x86/amd/ibs: Fix re-arming IBS Fetch
perf/x86/amd/ibs: Fix raw sample data accumulation
perf/amd/uncore: Set all slices and threads to restore perf stat -a
behaviour
perf/x86/amd/ibs: Don't include randomized bits in get_ibs_op_count()
perf/amd/uncore: Prepare to scale for more attributes that vary per
family
perf/amd/uncore: Allow F17h user threadmask and slicemask
specification
perf/amd/uncore: Allow F19h user coreid, threadmask, and sliceid
specification
perf vendor events amd: Add L3 cache events for Family 17h
perf vendor events amd: Remove redundant '['
perf vendor events amd: Enable Family 19h users by matching Zen2
events
x86/cpu/amd: Call init_amd_zn() om Family 19h processors too
perf/x86/amd/ibs: Support 27-bit extended Op/cycle counter
tools/power turbostat: Support AMD Family 19h
perf vendor events amd: Add L2 Prefetch events for zen1
Krish Sadhukhan (1):
KVM: SVM: Replace hard-coded value with #define
Like Xu (1):
perf/x86/amd: Don't touch the AMD64_EVENTSEL_HOSTONLY bit inside the
guest
Liu Jingqi (2):
KVM: x86: expose MOVDIRI CPU feature into VM.
KVM: x86: expose MOVDIR64B CPU feature into VM.
Maciej S. Szmigiero (1):
KVM: mmu: Fix SPTE encoding of MMIO generation upper half
Marcel Bocu (1):
x86/amd_nb: Add PCI device IDs for family 17h, model 70h
Martin Liška (1):
perf vendor events amd: perf PMU events for AMD Family 17h
Nathan Chancellor (2):
crypto: ccp - Remove forward declaration
perf/amd/uncore: Fix sysfs type mismatch
Paolo Bonzini (3):
KVM: x86: only do L1TF workaround on affected processors
KVM: x86: assign two bits to track SPTE kinds
KVM: x86: fix overlap between SPTE_MMIO_MASK and generation
Rasmus Villemoes (1):
build_bug.h: add wrapper for _Static_assert
Sean Christopherson (13):
KVM: nVMX: Allocate and configure VM{READ,WRITE} bitmaps iff
enable_shadow_vmcs
KVM: x86: Add requisite includes to kvm_cache_regs.h
KVM: x86: Add requisite includes to hyperv.h
KVM: x86: Use a u64 when passing the MMIO gen around
KVM: Explicitly define the "memslot update in-progress" bit
KVM: x86: Refactor the MMIO SPTE generation handling
KVM: x86: Rename access permissions cache member in struct
kvm_vcpu_arch
KVM: x86/mmu: Add explicit access mask for MMIO SPTEs
KVM: x86/mmu: Consolidate "is MMIO SPTE" code
KVM: x86/mmu: Apply max PA check for MMIO sptes to 32-bit KVM
KVM: x86/mmu: Set mmio_value to '0' if reserved #PF can't be generated
KVM: Remove the hack to trigger memslot generation wraparound
KVM: Move the memslot update in-progress flag to bit 63
Sebastian Andrzej Siewior (1):
x86/pkeys: Don't check if PKRU is zero before writing it
Tom Lendacky (1):
KVM: SVM: Override default MMIO mask if memory encryption is enabled
Vijay Thakkar (3):
perf vendor events amd: Restrict model detection for zen1 based
processors
perf vendor events amd: Add Zen2 events
perf vendor events amd: Update Zen1 events to V2
Woods, Brian (2):
hwmon/k10temp, x86/amd_nb: Consolidate shared device IDs
x86/amd_nb: Add PCI device IDs for family 17h, model 30h
Yazen Ghannam (24):
EDAC/amd64: Drop some family checks for newer systems
x86/amd_nb: Add Family 19h PCI IDs
EDAC/mce_amd: Always load on SMCA systems
x86/MCE/AMD, EDAC/mce_amd: Add new MP5, NBIO, and PCIE SMCA bank types
x86/MCE/AMD, EDAC/mce_amd: Add new McaTypes for CS, PSP, and SMU units
x86/MCE/AMD, EDAC/mce_amd: Add new error descriptions for some SMCA
bank types
x86/MCE/AMD, EDAC/mce_amd: Add new Load Store unit McaType
EDAC/amd64: Use a macro for iterating over Unified Memory Controllers
EDAC/amd64: Support more than two controllers for chip selects
handling
EDAC/amd64: Initialize DIMM info for systems with more than two
channels
EDAC/amd64: Add Family 17h Model 30h PCI IDs
EDAC/amd64: Support more than two Unified Memory Controllers
EDAC/amd64: Set maximum channel layer size depending on family
EDAC/amd64: Recognize x16 symbol size
EDAC/amd64: Adjust printed chip select sizes when interleaved
EDAC/amd64: Find Chip Select memory size using Address Mask
EDAC/amd64: Cache secondary Chip Select registers
EDAC/amd64: Support asymmetric dual-rank DIMMs
EDAC/amd64: Set grain per DIMM
EDAC/amd64: Make struct amd64_family_type global
EDAC/amd64: Gather hardware information early
EDAC/amd64: Save max number of controllers to family type
EDAC/amd64: Add family ops for Family 19h Models 00h-0Fh
EDAC/amd64: Handle three rank interleaving mode
Documentation/virtual/kvm/mmu.txt | 13 +-
arch/x86/events/amd/ibs.c | 93 +-
arch/x86/events/amd/uncore.c | 179 ++--
arch/x86/events/perf_event.h | 3 +-
arch/x86/include/asm/cpufeatures.h | 4 +-
arch/x86/include/asm/kvm_host.h | 10 +-
arch/x86/include/asm/mce.h | 7 +
arch/x86/include/asm/microcode_amd.h | 2 +-
arch/x86/include/asm/msr-index.h | 1 +
arch/x86/include/asm/perf_event.h | 16 +-
arch/x86/kernel/amd_nb.c | 15 +-
arch/x86/kernel/cpu/amd.c | 3 +-
arch/x86/kernel/cpu/mce/amd.c | 28 +-
arch/x86/kernel/cpu/mce/core.c | 4 +-
arch/x86/kvm/cpuid.c | 6 +-
arch/x86/kvm/emulate.c | 8 +-
arch/x86/kvm/hyperv.h | 2 +
arch/x86/kvm/kvm_cache_regs.h | 2 +
arch/x86/kvm/mmu.c | 215 +++--
arch/x86/kvm/mmu.h | 2 +-
arch/x86/kvm/svm.c | 53 +-
arch/x86/kvm/vmx.c | 51 +-
arch/x86/kvm/x86.c | 33 +-
arch/x86/kvm/x86.h | 4 +-
arch/x86/mm/pkeys.c | 7 -
drivers/crypto/ccp/sp-platform.c | 53 +-
drivers/edac/amd64_edac.c | 609 ++++++++----
drivers/edac/amd64_edac.h | 31 +-
drivers/edac/mce_amd.c | 134 ++-
drivers/hwmon/k10temp.c | 9 +-
include/linux/build_bug.h | 19 +
include/linux/kvm_host.h | 21 +
include/linux/pci_ids.h | 5 +
.../arm64/hisilicon/hip08/uncore-ddrc.json | 44 +
.../arm64/hisilicon/hip08/uncore-hha.json | 51 +
.../arm64/hisilicon/hip08/uncore-l3c.json | 37 +
.../pmu-events/arch/x86/amdzen1/branch.json | 23 +
.../pmu-events/arch/x86/amdzen1/cache.json | 312 ++++++
.../pmu-events/arch/x86/amdzen1/core.json | 125 +++
.../arch/x86/amdzen1/floating-point.json | 224 +++++
.../pmu-events/arch/x86/amdzen1/memory.json | 184 ++++
.../pmu-events/arch/x86/amdzen1/other.json | 56 ++
.../pmu-events/arch/x86/amdzen2/branch.json | 52 +
.../pmu-events/arch/x86/amdzen2/cache.json | 338 +++++++
.../pmu-events/arch/x86/amdzen2/core.json | 130 +++
.../arch/x86/amdzen2/floating-point.json | 140 +++
.../pmu-events/arch/x86/amdzen2/memory.json | 341 +++++++
.../pmu-events/arch/x86/amdzen2/other.json | 115 +++
.../pmu-events/arch/x86/icelake/cache.json | 552 +++++++++++
.../arch/x86/icelake/floating-point.json | 102 ++
.../pmu-events/arch/x86/icelake/frontend.json | 424 +++++++++
.../pmu-events/arch/x86/icelake/memory.json | 410 ++++++++
.../pmu-events/arch/x86/icelake/other.json | 121 +++
.../pmu-events/arch/x86/icelake/pipeline.json | 892 ++++++++++++++++++
.../arch/x86/icelake/virtual-memory.json | 236 +++++
tools/perf/pmu-events/arch/x86/mapfile.csv | 6 +
.../pmu-events/arch/x86/tremontx/cache.json | 111 +++
.../arch/x86/tremontx/frontend.json | 26 +
.../pmu-events/arch/x86/tremontx/memory.json | 26 +
.../pmu-events/arch/x86/tremontx/other.json | 26 +
.../arch/x86/tremontx/pipeline.json | 111 +++
.../arch/x86/tremontx/uncore-memory.json | 73 ++
.../arch/x86/tremontx/uncore-other.json | 431 +++++++++
.../arch/x86/tremontx/uncore-power.json | 11 +
.../arch/x86/tremontx/virtual-memory.json | 86 ++
tools/perf/pmu-events/jevents.c | 5 +
tools/power/x86/turbostat/turbostat.c | 34 +-
virt/kvm/kvm_main.c | 36 +-
68 files changed, 6981 insertions(+), 552 deletions(-)
create mode 100644 tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
create mode 100644 tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
create mode 100644 tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/branch.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/cache.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/core.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/other.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/branch.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/cache.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/core.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/other.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/cache.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/floating-point.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/frontend.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/other.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/pipeline.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/cache.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/frontend.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/other.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/pipeline.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/uncore-power.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/virtual-memory.json
--
2.22.0
1
93

[PATCH openEuler-1.0-LTS] netfilter: fix regression in looped (broad|multi)cast's MAC handling
by Yang Yingliang 28 Dec '21
by Yang Yingliang 28 Dec '21
28 Dec '21
From: Ignacy Gawędzki <ignacy.gawedzki(a)green-communications.fr>
mainline inclusion
from mainline-v5.16-rc7
commit ebb966d3bdfed581ecccbb4a7432341baf7619b4
category: bugfix
bugzilla: NA
CVE: NA
--------------------------------
In commit 5648b5e1169f ("netfilter: nfnetlink_queue: fix OOB when mac
header was cleared"), the test for non-empty MAC header introduced in
commit 2c38de4c1f8da7 ("netfilter: fix looped (broad|multi)cast's MAC
handling") has been replaced with a test for a set MAC header.
This breaks the case when the MAC header has been reset (using
skb_reset_mac_header), as is the case with looped-back multicast
packets. As a result, the packets ending up in NFQUEUE get a bogus
hwaddr interpreted from the first bytes of the IP header.
This patch adds a test for a non-empty MAC header in addition to the
test for a set MAC header. The same two tests are also implemented in
nfnetlink_log.c, where the initial code of commit 2c38de4c1f8da7
("netfilter: fix looped (broad|multi)cast's MAC handling") has not been
touched, but where supposedly the same situation may happen.
Fixes: 5648b5e1169f ("netfilter: nfnetlink_queue: fix OOB when mac header was cleared")
Signed-off-by: Ignacy Gawędzki <ignacy.gawedzki(a)green-communications.fr>
Reviewed-by: Florian Westphal <fw(a)strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo(a)netfilter.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Yue Haibing <yuehaibing(a)huawei.com>
Reviewed-by: Wei Yongjun <weiyongjun1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
net/netfilter/nfnetlink_log.c | 3 ++-
net/netfilter/nfnetlink_queue.c | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 25298b3eb8546..17ca9a681d47b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -509,7 +509,8 @@ __build_packet_message(struct nfnl_log_net *log,
goto nla_put_failure;
if (indev && skb->dev &&
- skb->mac_header != skb->network_header) {
+ skb_mac_header_was_set(skb) &&
+ skb_mac_header_len(skb) != 0) {
struct nfulnl_msg_packet_hw phw;
int len;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index eb5a052d3b252..8955431f2ab26 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -566,7 +566,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
goto nla_put_failure;
if (indev && entskb->dev &&
- skb_mac_header_was_set(entskb)) {
+ skb_mac_header_was_set(entskb) &&
+ skb_mac_header_len(entskb) != 0) {
struct nfqnl_msg_packet_hw phw;
int len;
--
2.25.1
1
0
euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4OBDE
CVE: NA
-------------------------------------------------
This config enables dangerous old drivers. It is selected by
CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT which is suggested by upstream
community that "modern distros should consider turning it off".
CONFIG_DRM_VM was selected by CONFIG_DRM_LEGACY and can be turned
off now.
Other distros has disabled this config:
https://src.fedoraproject.org/rpms/kernel/blob/rawhide/f/kernel-aarch64-rhe…
Signed-off-by: Liu Zixian <liuzixian4(a)huawei.com>
---
arch/arm64/configs/openeuler_defconfig | 10 ++--------
arch/x86/configs/openeuler_defconfig | 10 ++--------
2 files changed, 4 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 17bc8750b..6949824e8 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -4673,7 +4673,6 @@ CONFIG_DRM_TTM_DMA_PAGE_POOL=y
CONFIG_DRM_VRAM_HELPER=y
CONFIG_DRM_TTM_HELPER=y
CONFIG_DRM_GEM_SHMEM_HELPER=y
-CONFIG_DRM_VM=y
CONFIG_DRM_SCHED=m
#
@@ -4719,7 +4718,7 @@ CONFIG_DRM_AMD_DC_DCN=y
# CONFIG_HSA_AMD is not set
CONFIG_DRM_NOUVEAU=m
-CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y
+# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
CONFIG_NOUVEAU_DEBUG=5
CONFIG_NOUVEAU_DEBUG_DEFAULT=3
# CONFIG_NOUVEAU_DEBUG_MMU is not set
@@ -4817,12 +4816,7 @@ CONFIG_DRM_CIRRUS_QEMU=m
# CONFIG_DRM_LIMA is not set
# CONFIG_DRM_PANFROST is not set
# CONFIG_DRM_TIDSS is not set
-CONFIG_DRM_LEGACY=y
-# CONFIG_DRM_TDFX is not set
-# CONFIG_DRM_R128 is not set
-# CONFIG_DRM_MGA is not set
-# CONFIG_DRM_VIA is not set
-# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_LEGACY is not set
CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
#
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 7b6083018..dbae04231 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -5040,7 +5040,6 @@ CONFIG_DRM_TTM_DMA_PAGE_POOL=y
CONFIG_DRM_VRAM_HELPER=m
CONFIG_DRM_TTM_HELPER=m
CONFIG_DRM_GEM_SHMEM_HELPER=y
-CONFIG_DRM_VM=y
CONFIG_DRM_SCHED=m
#
@@ -5085,7 +5084,7 @@ CONFIG_DRM_AMD_DC_DCN=y
# CONFIG_HSA_AMD is not set
CONFIG_DRM_NOUVEAU=m
-CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y
+# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
CONFIG_NOUVEAU_DEBUG=5
CONFIG_NOUVEAU_DEBUG_DEFAULT=3
# CONFIG_NOUVEAU_DEBUG_MMU is not set
@@ -5226,12 +5225,7 @@ CONFIG_DRM_CIRRUS_QEMU=m
# CONFIG_TINYDRM_ST7735R is not set
# CONFIG_DRM_XEN is not set
# CONFIG_DRM_VBOXVIDEO is not set
-CONFIG_DRM_LEGACY=y
-# CONFIG_DRM_TDFX is not set
-# CONFIG_DRM_R128 is not set
-# CONFIG_DRM_MGA is not set
-# CONFIG_DRM_VIA is not set
-# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_LEGACY is not set
CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
#
--
2.27.0
1
0
尊敬的女士/先生:
您好!
感谢您选择与华为合作及在双方合作中给予的支持!
为了更好地实现共同成长、共同发展,我们目前在开展“2021年华为生态健康度调
研”。为此,我们委托独立的第三方调研机构广州尼尔森市场研究有限公司(以下简称
尼尔森),来开展相关调研,以了解您对华为生态系统的评价和建议,并分析改进。本
次调研内容仅供项目研究使用,对外会严格保密。
问卷答题时长约5-10分钟。参与答题请点击问卷链接:
https://csurveys.nielseniq.cn/wix/p1611532.aspx
您对本次调查有任何疑问或需要任何帮助,可以通过Ecosurvey(a)huawei.xn--com,-os0g9z677h027f
为工作人员将会解答您的问题。
感谢您的参与!
2021华为生态健康度调研项目组
1
0
From: sdlzx <hdu_sdlzx(a)163.com>
redhat inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4OBDE
CVE: NA
-------------------------------------------------
This config enables dangerous old drivers. It is selected by
CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT which is suggested by upstream
community that "modern distros should consider turning it off".
CONFIG_DRM_VM was selected by CONFIG_DRM_LEGACY and can be turned
off now.
Other distros has disabled this config:
https://src.fedoraproject.org/rpms/kernel/blob/rawhide/f/kernel-aarch64-rhe…
Signed-off-by: sdlzx <hdu_sdlzx(a)163.com>
---
arch/arm64/configs/openeuler_defconfig | 10 ++--------
arch/x86/configs/openeuler_defconfig | 10 ++--------
2 files changed, 4 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 17bc8750b..6949824e8 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -4673,7 +4673,6 @@ CONFIG_DRM_TTM_DMA_PAGE_POOL=y
CONFIG_DRM_VRAM_HELPER=y
CONFIG_DRM_TTM_HELPER=y
CONFIG_DRM_GEM_SHMEM_HELPER=y
-CONFIG_DRM_VM=y
CONFIG_DRM_SCHED=m
#
@@ -4719,7 +4718,7 @@ CONFIG_DRM_AMD_DC_DCN=y
# CONFIG_HSA_AMD is not set
CONFIG_DRM_NOUVEAU=m
-CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y
+# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
CONFIG_NOUVEAU_DEBUG=5
CONFIG_NOUVEAU_DEBUG_DEFAULT=3
# CONFIG_NOUVEAU_DEBUG_MMU is not set
@@ -4817,12 +4816,7 @@ CONFIG_DRM_CIRRUS_QEMU=m
# CONFIG_DRM_LIMA is not set
# CONFIG_DRM_PANFROST is not set
# CONFIG_DRM_TIDSS is not set
-CONFIG_DRM_LEGACY=y
-# CONFIG_DRM_TDFX is not set
-# CONFIG_DRM_R128 is not set
-# CONFIG_DRM_MGA is not set
-# CONFIG_DRM_VIA is not set
-# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_LEGACY is not set
CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
#
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 7b6083018..dbae04231 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -5040,7 +5040,6 @@ CONFIG_DRM_TTM_DMA_PAGE_POOL=y
CONFIG_DRM_VRAM_HELPER=m
CONFIG_DRM_TTM_HELPER=m
CONFIG_DRM_GEM_SHMEM_HELPER=y
-CONFIG_DRM_VM=y
CONFIG_DRM_SCHED=m
#
@@ -5085,7 +5084,7 @@ CONFIG_DRM_AMD_DC_DCN=y
# CONFIG_HSA_AMD is not set
CONFIG_DRM_NOUVEAU=m
-CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y
+# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
CONFIG_NOUVEAU_DEBUG=5
CONFIG_NOUVEAU_DEBUG_DEFAULT=3
# CONFIG_NOUVEAU_DEBUG_MMU is not set
@@ -5226,12 +5225,7 @@ CONFIG_DRM_CIRRUS_QEMU=m
# CONFIG_TINYDRM_ST7735R is not set
# CONFIG_DRM_XEN is not set
# CONFIG_DRM_VBOXVIDEO is not set
-CONFIG_DRM_LEGACY=y
-# CONFIG_DRM_TDFX is not set
-# CONFIG_DRM_R128 is not set
-# CONFIG_DRM_MGA is not set
-# CONFIG_DRM_VIA is not set
-# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_LEGACY is not set
CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
#
--
2.27.0
2
1
尊敬的女士/先生:
您好!
感谢您选择与华为合作及在双方合作中给予的支持!
为了更好地实现共同成长、共同发展,我们目前在开展“2021年华为生态健康度调
研”。为此,我们委托独立的第三方调研机构广州尼尔森市场研究有限公司(以下简称
尼尔森),来开展相关调研,以了解您对华为生态系统的评价和建议,并分析改进。本
次调研内容仅供项目研究使用,对外会严格保密。
问卷答题时长约5-10分钟。参与答题请点击问卷链接:
https://csurveys.nielseniq.cn/wix/p1611532.aspx
您对本次调查有任何疑问或需要任何帮助,可以通过Ecosurvey(a)huawei.xn--com,-os0g9z677h027f
为工作人员将会解答您的问题。
感谢您的参与!
2021华为生态健康度调研项目组
1
0

[PATCH OLK-5.10 00/13] Introduce AESNI/AVX and AESNI/AVX2 accelerated implementation for SM4 algorithm
by shenzijun 28 Dec '21
by shenzijun 28 Dec '21
28 Dec '21
From: 沈子俊 <shenzijun(a)kylinos.cn>
This patchset support test GCM/CCM mode for SM4. The GCM/CCM mode of
SM4 is defined in the RFC 8998 specification:
https://datatracker.ietf.org/doc/html/rfc8998
This patchset extracts the public SM4 algorithm as a separate library,
At the same time, the acceleration implementation of SM4 in arm64 was
adjusted to adapt to this SM4 library. Then introduces an accelerated
implementation of the AESNI/AVX and AESNI/AVX2 on x86_64. The
AESNI/AVX2 implementation reuses the function of AESNI/AVX.
The optimization supports the four modes of SM4, ECB, CBC, CFB, and
CTR. Since CBC and CFB do not support multiple block parallel
encryption, the optimization effect is not obvious. And all selftests
have passed already.
The main algorithm implementation comes from SM4 AES-NI work by
libgcrypt and Markku-Juhani O. Saarinen at:
https://github.com/mjosaarinen/sm4ni
Finally, add the configuration for accelerated of SM4. And introduce
some bugfix about AESNI/AVX2 accelerated implementation.
Benchmark on Intel i5-6200U 2.30GHz, performance data of three
implementation methods, pure software sm4-generic, aesni/avx
acceleration, and aesni/avx2 acceleration, the data comes from
the 218 mode and 518 mode of tcrypt. The abscissas are blocks of
different lengths. The data is tabulated and the unit is Mb/s:
block-size | 16 64 128 256 1024 1420 4096
sm4-generic
ECB enc | 60.94 70.41 72.27 73.02 73.87 73.58 73.59
ECB dec | 61.87 70.53 72.15 73.09 73.89 73.92 73.86
CBC enc | 56.71 66.31 68.05 69.84 70.02 70.12 70.24
CBC dec | 54.54 65.91 68.22 69.51 70.63 70.79 70.82
CFB enc | 57.21 67.24 69.10 70.25 70.73 70.52 71.42
CFB dec | 57.22 64.74 66.31 67.24 67.40 67.64 67.58
CTR enc | 59.47 68.64 69.91 71.02 71.86 71.61 71.95
CTR dec | 59.94 68.77 69.95 71.00 71.84 71.55 71.95
sm4-aesni-avx
ECB enc | 44.95 177.35 292.06 316.98 339.48 322.27 330.59
ECB dec | 45.28 178.66 292.31 317.52 339.59 322.52 331.16
CBC enc | 57.75 67.68 69.72 70.60 71.48 71.63 71.74
CBC dec | 44.32 176.83 284.32 307.24 328.61 312.61 325.82
CFB enc | 57.81 67.64 69.63 70.55 71.40 71.35 71.70
CFB dec | 43.14 167.78 282.03 307.20 328.35 318.24 325.95
CTR enc | 42.35 163.32 279.11 302.93 320.86 310.56 317.93
CTR dec | 42.39 162.81 278.49 302.37 321.11 310.33 318.37
sm4-aesni-avx2
ECB enc | 45.19 177.41 292.42 316.12 339.90 322.53 330.54
ECB dec | 44.83 178.90 291.45 317.31 339.85 322.55 331.07
CBC enc | 57.66 67.62 69.73 70.55 71.58 71.66 71.77
CBC dec | 44.34 176.86 286.10 501.68 559.58 483.87 527.46
CFB enc | 57.43 67.60 69.61 70.52 71.43 71.28 71.65
CFB dec | 43.12 167.75 268.09 499.33 558.35 490.36 524.73
CTR enc | 42.42 163.39 256.17 493.95 552.45 481.58 517.19
CTR dec | 42.49 163.11 256.36 493.34 552.62 481.49 516.83
From the benchmark data, it can be seen that when the block size is
1024, compared to AVX acceleration, the performance achieved by AVX2
has increased by about 70%, it is also 7.7 times of the pure software
implementation of sm4-generic.
沈子俊 (13):
crypto: tcrypt - Fix missing return value check
crypto: testmgr - Add GCM/CCM mode test of SM4 algorithm
crypto: tcrypt - add GCM/CCM mode test for SM4 algorithm
crypto: sm4 - create SM4 library based on sm4 generic code
crypto: arm64/sm4-ce - Make dependent on sm4 library instead of
sm4-generic
crypto: x86/sm4 - add AES-NI/AVX/x86_64 implementation
crypto: tcrypt - add the asynchronous speed test for SM4
crypto: x86/sm4 - export reusable AESNI/AVX functions
crypto: x86/sm4 - add AES-NI/AVX2/x86_64 implementation
Add the configuration for accelerated of SM4
crypto: x86/sm4 - Fix frame pointer stack corruption
crypto: sm4 - Do not change section of ck and sbox
crypto: x86/sm4 - Fix invalid section entry size
arch/arm64/crypto/Kconfig | 2 +-
arch/arm64/crypto/sm4-ce-glue.c | 20 +-
arch/x86/configs/openeuler_defconfig | 2 +
arch/x86/crypto/Makefile | 6 +
arch/x86/crypto/sm4-aesni-avx-asm_64.S | 594 ++++++++++++++++++++++++
arch/x86/crypto/sm4-aesni-avx2-asm_64.S | 501 ++++++++++++++++++++
arch/x86/crypto/sm4-avx.h | 24 +
arch/x86/crypto/sm4_aesni_avx2_glue.c | 169 +++++++
arch/x86/crypto/sm4_aesni_avx_glue.c | 487 +++++++++++++++++++
crypto/Kconfig | 44 ++
crypto/sm4_generic.c | 180 +------
crypto/tcrypt.c | 99 +++-
crypto/testmgr.c | 29 ++
crypto/testmgr.h | 148 ++++++
include/crypto/sm4.h | 25 +-
lib/crypto/Kconfig | 3 +
lib/crypto/Makefile | 3 +
lib/crypto/sm4.c | 176 +++++++
18 files changed, 2325 insertions(+), 187 deletions(-)
create mode 100644 arch/x86/crypto/sm4-aesni-avx-asm_64.S
create mode 100644 arch/x86/crypto/sm4-aesni-avx2-asm_64.S
create mode 100644 arch/x86/crypto/sm4-avx.h
create mode 100644 arch/x86/crypto/sm4_aesni_avx2_glue.c
create mode 100644 arch/x86/crypto/sm4_aesni_avx_glue.c
create mode 100644 lib/crypto/sm4.c
--
2.30.0
3
16

28 Dec '21
Ramaxel inclusion
category: features
bugzilla: https://gitee.com/openeuler/kernel/issues/I4JXCG
CVE: NA
Changes from v2:
1. Split scmd_tmout_nonpt into two parameters:
scmd_tmout_vd/scmd_tmout_rawdisk
2. Return -ETIME instead of -EINVAL when command is timeout.
3. Add one module parameters: max_io_force.
Changes from v1:
1. Add more debug infor.
2. Remove some unnecessary module parameters.
3. Report disks by the order of channel/target id.
4. Add host_reset handler.
5. Use get_unaligned_be24.
Signed-off-by: Yanling Song <songyl(a)ramaxel.com>
Reviewed-by: Jiang Yu<yujiang(a)ramaxel.com>
---
drivers/scsi/spraid/Kconfig | 6 +-
drivers/scsi/spraid/spraid.h | 142 ++-
drivers/scsi/spraid/spraid_main.c | 1633 +++++++++++++++--------------
3 files changed, 968 insertions(+), 813 deletions(-)
diff --git a/drivers/scsi/spraid/Kconfig b/drivers/scsi/spraid/Kconfig
index 83962efaab07..bfbba3db8db0 100644
--- a/drivers/scsi/spraid/Kconfig
+++ b/drivers/scsi/spraid/Kconfig
@@ -5,7 +5,9 @@
config RAMAXEL_SPRAID
tristate "Ramaxel spraid Adapter"
depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
depends on ARM64 || X86_64
- default m
help
- This driver supports Ramaxel spraid driver.
+ This driver supports Ramaxel SPRxxx serial
+ raid controller, which has PCIE Gen4 interface
+ with host and supports SAS/SATA Hdd/ssd.
diff --git a/drivers/scsi/spraid/spraid.h b/drivers/scsi/spraid/spraid.h
index da46d8e1b4b6..983d7af2faa8 100644
--- a/drivers/scsi/spraid/spraid.h
+++ b/drivers/scsi/spraid/spraid.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef __SPRAID_H_
#define __SPRAID_H_
@@ -24,7 +25,7 @@
#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
#define SPRAID_AQ_DEPTH 128
-#define SPRAID_NR_AEN_COMMANDS 1
+#define SPRAID_NR_AEN_COMMANDS 16
#define SPRAID_AQ_BLK_MQ_DEPTH (SPRAID_AQ_DEPTH - SPRAID_NR_AEN_COMMANDS)
#define SPRAID_AQ_MQ_TAG_DEPTH (SPRAID_AQ_BLK_MQ_DEPTH - 1)
@@ -44,7 +45,7 @@
#define SMALL_POOL_SIZE 256
#define MAX_SMALL_POOL_NUM 16
-#define MAX_CMD_PER_DEV 32
+#define MAX_CMD_PER_DEV 64
#define MAX_CDB_LEN 32
#define SPRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03))
@@ -53,7 +54,7 @@
#define PCI_VENDOR_ID_RAMAXEL_LOGIC 0x1E81
-#define SPRAID_SERVER_DEVICE_HAB_DID 0x2100
+#define SPRAID_SERVER_DEVICE_HBA_DID 0x2100
#define SPRAID_SERVER_DEVICE_RAID_DID 0x2200
#define IO_6_DEFAULT_TX_LEN 256
@@ -142,11 +143,15 @@ enum {
enum {
SPRAID_AEN_DEV_CHANGED = 0x00,
+ SPRAID_AEN_FW_ACT_START = 0x01,
SPRAID_AEN_HOST_PROBING = 0x10,
};
enum {
- SPRAID_AEN_TIMESYN = 0x07
+ SPRAID_AEN_TIMESYN = 0x00,
+ SPRAID_AEN_FW_ACT_FINISH = 0x02,
+ SPRAID_AEN_EVENT_MIN = 0x80,
+ SPRAID_AEN_EVENT_MAX = 0xff,
};
enum {
@@ -175,6 +180,16 @@ enum spraid_state {
SPRAID_DEAD,
};
+enum {
+ SPRAID_CARD_HBA,
+ SPRAID_CARD_RAID,
+};
+
+enum spraid_cmd_type {
+ SPRAID_CMD_ADM,
+ SPRAID_CMD_IOPT,
+};
+
struct spraid_completion {
__le32 result;
union {
@@ -217,8 +232,6 @@ struct spraid_dev {
struct dma_pool *prp_page_pool;
struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM];
mempool_t *iod_mempool;
- struct blk_mq_tag_set admin_tagset;
- struct request_queue *admin_q;
void __iomem *bar;
u32 max_qid;
u32 num_vecs;
@@ -232,23 +245,27 @@ struct spraid_dev {
u32 ctrl_config;
u32 online_queues;
u64 cap;
- struct device ctrl_device;
- struct cdev cdev;
int instance;
struct spraid_ctrl_info *ctrl_info;
struct spraid_dev_info *devices;
- struct spraid_ioq_ptcmd *ioq_ptcmds;
+ struct spraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct spraid_cmd *ioq_ptcmds;
struct list_head ioq_pt_list;
spinlock_t ioq_pt_lock;
- struct work_struct aen_work;
struct work_struct scan_work;
struct work_struct timesyn_work;
struct work_struct reset_work;
+ struct work_struct fw_act_work;
enum spraid_state state;
spinlock_t state_lock;
+
+ struct request_queue *bsg_queue;
};
struct spraid_sgl_desc {
@@ -347,6 +364,35 @@ struct spraid_get_info {
__u32 rsvd12[4];
};
+struct spraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
enum {
SPRAID_CMD_FLAG_SGL_METABUF = (1 << 6),
SPRAID_CMD_FLAG_SGL_METASEG = (1 << 7),
@@ -393,6 +439,7 @@ struct spraid_admin_command {
struct spraid_get_info get_info;
struct spraid_abort_cmd abort;
struct spraid_reset_cmd reset;
+ struct spraid_usr_cmd usr_cmd;
};
};
@@ -456,9 +503,6 @@ struct spraid_ioq_command {
};
};
-#define SPRAID_IOCTL_RESET_CMD _IOWR('N', 0x80, struct spraid_passthru_common_cmd)
-#define SPRAID_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct spraid_passthru_common_cmd)
-
struct spraid_passthru_common_cmd {
__u8 opcode;
__u8 flags;
@@ -494,8 +538,6 @@ struct spraid_passthru_common_cmd {
__u32 result1;
};
-#define SPRAID_IOCTL_IOQ_CMD _IOWR('N', 0x42, struct spraid_ioq_passthru_cmd)
-
struct spraid_ioq_passthru_cmd {
__u8 opcode;
__u8 flags;
@@ -560,7 +602,21 @@ struct spraid_ioq_passthru_cmd {
__u32 result1;
};
-struct spraid_ioq_ptcmd {
+struct spraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct spraid_passthru_common_cmd admcmd;
+ struct spraid_ioq_passthru_cmd ioqcmd;
+ };
+};
+
+enum {
+ SPRAID_BSG_ADM,
+ SPRAID_BSG_IOQ,
+};
+
+struct spraid_cmd {
int qid;
int cid;
u32 result0;
@@ -572,14 +628,6 @@ struct spraid_ioq_ptcmd {
struct list_head list;
};
-struct spraid_admin_request {
- struct spraid_admin_command *cmd;
- u32 result0;
- u32 result1;
- u16 flags;
- u16 status;
-};
-
struct spraid_queue {
struct spraid_dev *hdev;
spinlock_t sq_lock; /* spinlock for lock handling */
@@ -607,7 +655,6 @@ struct spraid_queue {
};
struct spraid_iod {
- struct spraid_admin_request req;
struct spraid_queue *spraidq;
enum spraid_cmd_state state;
int npages;
@@ -623,13 +670,51 @@ struct spraid_iod {
};
#define SPRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
-#define SPRAID_DEV_INFO_ATTR_HDD(attr) ((attr) & 0x02)
+#define SPRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
#define SPRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
#define SPRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
#define SPRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
#define SPRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+struct spraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct spraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
struct spraid_dev_info {
__le32 hdid;
__le16 target;
@@ -649,6 +734,11 @@ struct spraid_dev_list {
struct spraid_sdev_hostdata {
u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 rsvd[3];
};
#endif
diff --git a/drivers/scsi/spraid/spraid_main.c b/drivers/scsi/spraid/spraid_main.c
index a0a75ecb0027..c6d2a0b8e35e 100644
--- a/drivers/scsi/spraid/spraid_main.c
+++ b/drivers/scsi/spraid/spraid_main.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Linux spraid device driver
- * Copyright(c) 2021 Ramaxel Memory Technology, Ltd
- */
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+/* Ramaxel Raid SPXXX Series Linux Driver */
+
#define pr_fmt(fmt) "spraid: " fmt
#include <linux/sched/signal.h>
@@ -23,6 +23,9 @@
#include <linux/debugfs.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -31,27 +34,24 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_dbg.h>
+
#include "spraid.h"
static u32 admin_tmout = 60;
module_param(admin_tmout, uint, 0644);
MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
-static u32 scmd_tmout_pt = 30;
-module_param(scmd_tmout_pt, uint, 0644);
-MODULE_PARM_DESC(scmd_tmout_pt, "scsi commands timeout for passthrough(seconds)");
+static u32 scmd_tmout_rawdisk = 180;
+module_param(scmd_tmout_rawdisk, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_rawdisk, "scsi commands timeout for rawdisk(seconds)");
-static u32 scmd_tmout_nonpt = 180;
-module_param(scmd_tmout_nonpt, uint, 0644);
-MODULE_PARM_DESC(scmd_tmout_nonpt, "scsi commands timeout for rawdisk&raid(seconds)");
+static u32 scmd_tmout_vd = 180;
+module_param(scmd_tmout_vd, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_vd, "scsi commands timeout for vd(seconds)");
-static u32 wait_abl_tmout = 3;
-module_param(wait_abl_tmout, uint, 0644);
-MODULE_PARM_DESC(wait_abl_tmout, "wait abnormal io timeout(seconds)");
-
-static bool use_sgl_force;
-module_param(use_sgl_force, bool, 0644);
-MODULE_PARM_DESC(use_sgl_force, "force IO use sgl format, default false");
+static bool max_io_force;
+module_param(max_io_force, bool, 0644);
+MODULE_PARM_DESC(max_io_force, "force max_hw_sectors_kb = 1024, default false(performance first)");
static int ioq_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops ioq_depth_ops = {
@@ -106,16 +106,20 @@ static const struct kernel_param_ops small_pool_num_ops = {
.get = param_get_byte,
};
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
static unsigned char small_pool_num = 4;
module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644);
MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16");
static void spraid_free_queue(struct spraid_queue *spraidq);
static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result);
-static void spraid_handle_aen_vs(struct spraid_dev *hdev, u32 result);
+static void spraid_handle_aen_vs(struct spraid_dev *hdev, u32 result, u32 result1);
static DEFINE_IDA(spraid_instance_ida);
-static dev_t spraid_chr_devt;
+
static struct class *spraid_class;
#define SPRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
@@ -131,9 +135,8 @@ static struct workqueue_struct *spraid_wq;
#define SPRAID_DRV_VERSION "1.0.0.0"
#define ADMIN_TIMEOUT (admin_tmout * HZ)
-#define ADMIN_ERR_TIMEOUT 32757
-#define SPRAID_WAIT_ABNL_CMD_TIMEOUT (wait_abl_tmout * 2)
+#define SPRAID_WAIT_ABNL_CMD_TIMEOUT (3 * 2)
#define SPRAID_DMA_MSK_BIT_MAX 64
@@ -147,6 +150,13 @@ enum FW_STAT_CODE {
FW_STAT_NEED_RETRY
};
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60", "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED", "FORMATTING", "SANITIZING",
+ "INITIALIZING", "INITIALIZE_FAIL", "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
static int ioq_depth_set(const char *val, const struct kernel_param *kp)
{
int n = 0;
@@ -231,12 +241,6 @@ static int spraid_pci_enable(struct spraid_dev *hdev)
goto disable;
}
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
- if (ret < 0) {
- dev_err(hdev->dev, "Allocate one IRQ for setup admin channel failed\n");
- goto disable;
- }
-
hdev->cap = lo_hi_readq(hdev->bar + SPRAID_REG_CAP);
hdev->ioq_depth = min_t(u32, SPRAID_CAP_MQES(hdev->cap) + 1, io_queue_depth);
hdev->db_stride = 1 << SPRAID_CAP_STRIDE(hdev->cap);
@@ -246,13 +250,20 @@ static int spraid_pci_enable(struct spraid_dev *hdev)
dev_err(hdev->dev, "err, dma mask invalid[%llu], set to default\n", maskbit);
maskbit = SPRAID_DMA_MSK_BIT_MAX;
}
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(hdev->dev, "set dma mask and coherent failed\n");
goto disable;
}
dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(hdev->dev, "Allocate one IRQ for setup admin channel failed\n");
+ goto disable;
+ }
+
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -263,12 +274,6 @@ static int spraid_pci_enable(struct spraid_dev *hdev)
return ret;
}
-static inline
-struct spraid_admin_request *spraid_admin_req(struct request *req)
-{
- return blk_mq_rq_to_pdu(req);
-}
-
static int spraid_npages_prp(u32 size, struct spraid_dev *hdev)
{
u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
@@ -419,7 +424,7 @@ static void spraid_submit_cmd(struct spraid_queue *spraidq, const void *cmd)
writel(spraidq->sq_tail, spraidq->q_db);
spin_unlock_irqrestore(&spraidq->sq_lock, flags);
- dev_log_dbg(spraidq->hdev->dev, "cid[%d], qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
+ dev_log_dbg(spraidq->hdev->dev, "cid[%d] qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
acd->command_id, spraidq->qid, acd->opcode, acd->flags, le32_to_cpu(acd->hdid));
}
@@ -605,18 +610,15 @@ static void spraid_setup_rw_cmd(struct spraid_dev *hdev,
if (scmd->cmd_len == 6) {
datalength = (u32)(scmd->cmnd[4] == 0 ?
IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
- start_lba_lo = ((u32)scmd->cmnd[1] << 16) |
- ((u32)scmd->cmnd[2] << 8) | (u32)scmd->cmnd[3];
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
start_lba_lo &= 0x1FFFFF;
}
/* 10-byte READ(0x28) or WRITE(0x2A) cdb */
else if (scmd->cmd_len == 10) {
- datalength = (u32)scmd->cmnd[8] | ((u32)scmd->cmnd[7] << 8);
- start_lba_lo = ((u32)scmd->cmnd[2] << 24) |
- ((u32)scmd->cmnd[3] << 16) |
- ((u32)scmd->cmnd[4] << 8) | (u32)scmd->cmnd[5];
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
if (scmd->cmnd[1] & FUA_MASK)
control |= SPRAID_RW_FUA;
@@ -624,42 +626,26 @@ static void spraid_setup_rw_cmd(struct spraid_dev *hdev,
/* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
else if (scmd->cmd_len == 12) {
- datalength = ((u32)scmd->cmnd[6] << 24) |
- ((u32)scmd->cmnd[7] << 16) |
- ((u32)scmd->cmnd[8] << 8) | (u32)scmd->cmnd[9];
- start_lba_lo = ((u32)scmd->cmnd[2] << 24) |
- ((u32)scmd->cmnd[3] << 16) |
- ((u32)scmd->cmnd[4] << 8) | (u32)scmd->cmnd[5];
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
if (scmd->cmnd[1] & FUA_MASK)
control |= SPRAID_RW_FUA;
}
/* 16-byte READ(0x88) or WRITE(0x8A) cdb */
else if (scmd->cmd_len == 16) {
- datalength = ((u32)scmd->cmnd[10] << 24) |
- ((u32)scmd->cmnd[11] << 16) |
- ((u32)scmd->cmnd[12] << 8) | (u32)scmd->cmnd[13];
- start_lba_lo = ((u32)scmd->cmnd[6] << 24) |
- ((u32)scmd->cmnd[7] << 16) |
- ((u32)scmd->cmnd[8] << 8) | (u32)scmd->cmnd[9];
- start_lba_hi = ((u32)scmd->cmnd[2] << 24) |
- ((u32)scmd->cmnd[3] << 16) |
- ((u32)scmd->cmnd[4] << 8) | (u32)scmd->cmnd[5];
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
if (scmd->cmnd[1] & FUA_MASK)
control |= SPRAID_RW_FUA;
}
/* 32-byte READ(0x88) or WRITE(0x8A) cdb */
else if (scmd->cmd_len == 32) {
- datalength = ((u32)scmd->cmnd[28] << 24) |
- ((u32)scmd->cmnd[29] << 16) |
- ((u32)scmd->cmnd[30] << 8) | (u32)scmd->cmnd[31];
- start_lba_lo = ((u32)scmd->cmnd[16] << 24) |
- ((u32)scmd->cmnd[17] << 16) |
- ((u32)scmd->cmnd[18] << 8) | (u32)scmd->cmnd[19];
- start_lba_hi = ((u32)scmd->cmnd[12] << 24) |
- ((u32)scmd->cmnd[13] << 16) |
- ((u32)scmd->cmnd[14] << 8) | (u32)scmd->cmnd[15];
+ datalength = get_unaligned_be32(&scmd->cmnd[28]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[16]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[12]);
if (scmd->cmnd[10] & FUA_MASK)
control |= SPRAID_RW_FUA;
@@ -814,7 +800,7 @@ static void spraid_map_status(struct spraid_iod *iod, struct scsi_cmnd *scmd,
if (scmd->result & SAM_STAT_CHECK_CONDITION) {
memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
memcpy(scmd->sense_buffer, iod->sense, SCSI_SENSE_BUFFERSIZE);
- set_driver_byte(scmd, DRIVER_SENSE);
+ scmd->result = (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
}
break;
case FW_STAT_ABORTED:
@@ -825,6 +811,8 @@ static void spraid_map_status(struct spraid_iod *iod, struct scsi_cmnd *scmd,
break;
default:
set_host_byte(scmd, DID_BAD_TARGET);
+ dev_warn(iod->spraidq->hdev->dev, "[%s] cid[%d] qid[%d] bad status[0x%x]\n",
+ __func__, cqe->cmd_id, le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
break;
}
}
@@ -850,14 +838,13 @@ static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
int ret;
if (unlikely(!scmd)) {
- dev_err(hdev->dev, "err, scmd is null, return 0\n");
+ dev_err(hdev->dev, "err, scmd is null\n");
return 0;
}
if (unlikely(hdev->state != SPRAID_LIVE)) {
set_host_byte(scmd, DID_NO_CONNECT);
scmd->scsi_done(scmd);
- dev_err(hdev->dev, "[%s] err, hdev state is not live\n", __func__);
return 0;
}
@@ -894,7 +881,7 @@ static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
WRITE_ONCE(iod->state, SPRAID_CMD_IN_FLIGHT);
spraid_submit_cmd(ioq, &ioq_cmd);
elapsed = jiffies - scmd->jiffies_at_alloc;
- dev_log_dbg(hdev->dev, "cid[%d], qid[%d] submit IO cost %3ld.%3ld seconds\n",
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d] submit IO cost %3ld.%3ld seconds\n",
cid, hwq, elapsed / HZ, elapsed % HZ);
return 0;
@@ -945,6 +932,10 @@ static int spraid_slave_alloc(struct scsi_device *sdev)
scan_host:
hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb);
+ hostdata->attr = hdev->devices[idx].attr;
+ hostdata->flag = hdev->devices[idx].flag;
+ hostdata->rg_id = 0xff;
sdev->hostdata = hostdata;
up_read(&hdev->devices_rwsem);
return 0;
@@ -958,30 +949,17 @@ static void spraid_slave_destroy(struct scsi_device *sdev)
static int spraid_slave_configure(struct scsi_device *sdev)
{
- u16 idx;
- unsigned int timeout = scmd_tmout_nonpt * HZ;
+ unsigned int timeout = scmd_tmout_rawdisk * HZ;
struct spraid_dev *hdev = shost_priv(sdev->host);
struct spraid_sdev_hostdata *hostdata = sdev->hostdata;
u32 max_sec = sdev->host->max_sectors;
- if (!hostdata) {
- idx = hostdata->hdid - 1;
- if (sdev->channel == hdev->devices[idx].channel &&
- sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
- sdev->lun < hdev->devices[idx].lun) {
- if (SPRAID_DEV_INFO_ATTR_PT(hdev->devices[idx].attr))
- timeout = scmd_tmout_pt * HZ;
- else
- timeout = scmd_tmout_nonpt * HZ;
- max_sec = le16_to_cpu(hdev->devices[idx].max_io_kb) << 1;
- } else {
- dev_err(hdev->dev, "[%s] err, sdev->channel:id:lun[%d:%d:%lld];"
- "devices[%d], channel:target:lun[%d:%d:%d]\n",
- __func__, sdev->channel, sdev->id, sdev->lun,
- idx, hdev->devices[idx].channel,
- hdev->devices[idx].target,
- hdev->devices[idx].lun);
- }
+ if (hostdata) {
+ if (SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ timeout = scmd_tmout_vd * HZ;
+ else if (SPRAID_DEV_INFO_ATTR_RAWDISK(hostdata->attr))
+ timeout = scmd_tmout_rawdisk * HZ;
+ max_sec = hostdata->max_io_kb << 1;
} else {
dev_err(hdev->dev, "[%s] err, sdev->hostdata is null\n", __func__);
}
@@ -991,7 +969,9 @@ static int spraid_slave_configure(struct scsi_device *sdev)
if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
max_sec = sdev->host->max_sectors;
- blk_queue_max_hw_sectors(sdev->request_queue, max_sec);
+
+ if (!max_io_force)
+ blk_queue_max_hw_sectors(sdev->request_queue, max_sec);
dev_info(hdev->dev, "[%s] sdev->channel:id:lun[%d:%d:%lld], scmd_timeout[%d]s, maxsec[%d]\n",
__func__, sdev->channel, sdev->id, sdev->lun, timeout / HZ, max_sec);
@@ -1176,6 +1156,75 @@ static inline bool spraid_cqe_pending(struct spraid_queue *spraidq)
spraidq->cq_phase;
}
+static void spraid_sata_report_zone_handle(struct scsi_cmnd *scmd, struct spraid_iod *iod)
+{
+ int i = 0;
+ unsigned int bytes = 0;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ scsi_for_each_sg(scmd, sg, iod->nsge, i) {
+ unsigned int offset = 0;
+
+ if (bytes == 0) {
+ char *hdr;
+ u32 list_length;
+ u64 max_lba, opt_lba;
+ u16 same;
+
+ hdr = sg_virt(sg);
+
+ list_length = get_unaligned_le32(&hdr[0]);
+ same = get_unaligned_le16(&hdr[4]);
+ max_lba = get_unaligned_le64(&hdr[8]);
+ opt_lba = get_unaligned_le64(&hdr[16]);
+ put_unaligned_be32(list_length, &hdr[0]);
+ hdr[4] = same & 0xf;
+ put_unaligned_be64(max_lba, &hdr[8]);
+ put_unaligned_be64(opt_lba, &hdr[16]);
+ offset += 64;
+ bytes += 64;
+ }
+ while (offset < sg_dma_len(sg)) {
+ char *rec;
+ u8 cond, type, non_seq, reset;
+ u64 size, start, wp;
+
+ rec = sg_virt(sg) + offset;
+ type = rec[0] & 0xf;
+ cond = (rec[1] >> 4) & 0xf;
+ non_seq = (rec[1] & 2);
+ reset = (rec[1] & 1);
+ size = get_unaligned_le64(&rec[8]);
+ start = get_unaligned_le64(&rec[16]);
+ wp = get_unaligned_le64(&rec[24]);
+ rec[0] = type;
+ rec[1] = (cond << 4) | non_seq | reset;
+ put_unaligned_be64(size, &rec[8]);
+ put_unaligned_be64(start, &rec[16]);
+ put_unaligned_be64(wp, &rec[24]);
+ WARN_ON(offset + 64 > sg_dma_len(sg));
+ offset += 64;
+ bytes += 64;
+ }
+ }
+}
+
+static inline void spraid_handle_ata_cmd(struct spraid_dev *hdev, struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ if (hdev->ctrl_info->card_type != SPRAID_CARD_HBA)
+ return;
+
+ switch (scmd->cmnd[0]) {
+ case ZBC_IN:
+ dev_info(hdev->dev, "[%s] process report zone\n", __func__);
+ spraid_sata_report_zone_handle(scmd, iod);
+ break;
+ default:
+ break;
+ }
+}
+
static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq, struct spraid_completion *cqe)
{
struct spraid_dev *hdev = ioq->hdev;
@@ -1197,12 +1246,12 @@ static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq, struct spraid_com
iod = scsi_cmd_priv(scmd);
elapsed = jiffies - scmd->jiffies_at_alloc;
- dev_log_dbg(hdev->dev, "cid[%d], qid[%d] finish IO cost %3ld.%3ld seconds\n",
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
if (cmpxchg(&iod->state, SPRAID_CMD_IN_FLIGHT, SPRAID_CMD_COMPLETE) !=
SPRAID_CMD_IN_FLIGHT) {
- dev_warn(hdev->dev, "cid[%d], qid[%d] enters abnormal handler, cost %3ld.%3ld seconds\n",
+ dev_warn(hdev->dev, "cid[%d] qid[%d] enters abnormal handler, cost %3ld.%3ld seconds\n",
cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
WRITE_ONCE(iod->state, SPRAID_CMD_TMO_COMPLETE);
@@ -1215,6 +1264,8 @@ static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq, struct spraid_com
return;
}
+ spraid_handle_ata_cmd(hdev, scmd, iod);
+
spraid_map_status(iod, scmd, cqe);
if (iod->nsge) {
iod->nsge = 0;
@@ -1224,38 +1275,36 @@ static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq, struct spraid_com
scmd->scsi_done(scmd);
}
-static inline void spraid_end_admin_request(struct request *req, __le16 status,
- __le32 result0, __le32 result1)
-{
- struct spraid_admin_request *rq = spraid_admin_req(req);
-
- rq->status = le16_to_cpu(status) >> 1;
- rq->result0 = le32_to_cpu(result0);
- rq->result1 = le32_to_cpu(result1);
- blk_mq_complete_request(req);
-}
-
static void spraid_complete_adminq_cmnd(struct spraid_queue *adminq, struct spraid_completion *cqe)
{
- struct blk_mq_tags *tags = adminq->hdev->admin_tagset.tags[0];
- struct request *req;
+ struct spraid_dev *hdev = adminq->hdev;
+ struct spraid_cmd *adm_cmd;
- req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
- if (unlikely(!req)) {
+ adm_cmd = hdev->adm_cmds + cqe->cmd_id;
+ if (unlikely(adm_cmd->state == SPRAID_CMD_IDLE)) {
dev_warn(adminq->hdev->dev, "Invalid id %d completed on queue %d\n",
cqe->cmd_id, le16_to_cpu(cqe->sq_id));
return;
}
- spraid_end_admin_request(req, cqe->status, cqe->result, cqe->result1);
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
}
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid);
+
static void spraid_complete_aen(struct spraid_queue *spraidq, struct spraid_completion *cqe)
{
struct spraid_dev *hdev = spraidq->hdev;
u32 result = le32_to_cpu(cqe->result);
- dev_info(hdev->dev, "rcv aen, status[%x], result[%x]\n",
- le16_to_cpu(cqe->status) >> 1, result);
+ dev_info(hdev->dev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n",
+ cqe->cmd_id, le16_to_cpu(cqe->status) >> 1, result);
+
+ spraid_send_aen(hdev, cqe->cmd_id);
if ((le16_to_cpu(cqe->status) >> 1) != SPRAID_SC_SUCCESS)
return;
@@ -1264,22 +1313,19 @@ static void spraid_complete_aen(struct spraid_queue *spraidq, struct spraid_comp
spraid_handle_aen_notice(hdev, result);
break;
case SPRAID_AEN_VS:
- spraid_handle_aen_vs(hdev, result);
+ spraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1));
break;
default:
dev_warn(hdev->dev, "Unsupported async event type: %u\n",
result & 0x7);
break;
}
- queue_work(spraid_wq, &hdev->aen_work);
}
-static void spraid_put_ioq_ptcmd(struct spraid_dev *hdev, struct spraid_ioq_ptcmd *cmd);
-
static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq, struct spraid_completion *cqe)
{
struct spraid_dev *hdev = ioq->hdev;
- struct spraid_ioq_ptcmd *ptcmd;
+ struct spraid_cmd *ptcmd;
ptcmd = hdev->ioq_ptcmds + (ioq->qid - 1) * SPRAID_PTCMDS_PERQ +
cqe->cmd_id - SPRAID_IO_BLK_MQ_DEPTH;
@@ -1289,8 +1335,6 @@ static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq, struct sprai
ptcmd->result1 = le32_to_cpu(cqe->result1);
complete(&ptcmd->cmd_done);
-
- spraid_put_ioq_ptcmd(hdev, ptcmd);
}
static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx)
@@ -1304,7 +1348,7 @@ static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx)
return;
}
- dev_log_dbg(hdev->dev, "cid[%d], qid[%d], result[0x%x], sq_id[%d], status[0x%x]\n",
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d], result[0x%x], sq_id[%d], status[0x%x]\n",
cqe->cmd_id, spraidq->qid, le32_to_cpu(cqe->result),
le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
@@ -1452,62 +1496,119 @@ static u32 spraid_bar_size(struct spraid_dev *hdev, u32 nr_ioqs)
return (SPRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
}
-static inline void spraid_clear_spraid_request(struct request *req)
+static int spraid_alloc_admin_cmds(struct spraid_dev *hdev)
{
- if (!(req->rq_flags & RQF_DONTPREP)) {
- spraid_admin_req(req)->flags = 0;
- req->rq_flags |= RQF_DONTPREP;
+ int i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(SPRAID_AQ_BLK_MQ_DEPTH, sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ dev_err(hdev->dev, "Alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SPRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
}
+
+ dev_info(hdev->dev, "Alloc admin cmds success, num[%d]\n", SPRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
}
-static struct request *spraid_alloc_admin_request(struct request_queue *q,
- struct spraid_admin_command *cmd,
- blk_mq_req_flags_t flags)
+static void spraid_free_admin_cmds(struct spraid_dev *hdev)
{
- u32 op = COMMAND_IS_WRITE(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
- struct request *req;
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct spraid_cmd *spraid_get_cmd(struct spraid_dev *hdev, enum spraid_cmd_type type)
+{
+ struct spraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct spraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
- req = blk_mq_alloc_request(q, op, flags);
- if (IS_ERR(req))
- return req;
- req->cmd_flags |= REQ_FAILFAST_DRIVER;
- spraid_clear_spraid_request(req);
- spraid_admin_req(req)->cmd = cmd;
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IN_FLIGHT);
- return req;
+ return cmd;
}
-static int spraid_submit_admin_sync_cmd(struct request_queue *q,
- struct spraid_admin_command *cmd,
- u32 *result, void *buffer,
- u32 bufflen, u32 timeout, int at_head, blk_mq_req_flags_t flags)
+static void spraid_put_cmd(struct spraid_dev *hdev, struct spraid_cmd *cmd,
+ enum spraid_cmd_type type)
{
- struct request *req;
- int ret;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
- req = spraid_alloc_admin_request(q, cmd, flags);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- if (buffer && bufflen) {
- ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
- if (ret)
- goto out;
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+
+static int spraid_submit_admin_sync_cmd(struct spraid_dev *hdev, struct spraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct spraid_cmd *adm_cmd = spraid_get_cmd(hdev, SPRAID_CMD_ADM);
+
+ if (!adm_cmd) {
+ dev_err(hdev->dev, "err, get admin cmd failed\n");
+ return -EFAULT;
}
- blk_execute_rq(req->q, NULL, req, at_head);
- if (result)
- *result = spraid_admin_req(req)->result0;
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
- if (spraid_admin_req(req)->flags & SPRAID_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = spraid_admin_req(req)->status;
+ init_completion(&adm_cmd->cmd_done);
-out:
- blk_mq_free_request(req);
- return ret;
+ cmd->common.command_id = adm_cmd->cid;
+ spraid_submit_cmd(&hdev->queues[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ __func__, adm_cmd->cid, adm_cmd->qid, cmd->usr_cmd.opcode,
+ cmd->usr_cmd.info_0.subopcode);
+ WRITE_ONCE(adm_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+ return -ETIME;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+
+ return adm_cmd->status;
}
static int spraid_create_cq(struct spraid_dev *hdev, u16 qid,
@@ -1524,8 +1625,7 @@ static int spraid_create_cq(struct spraid_dev *hdev, u16 qid,
admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
}
static int spraid_create_sq(struct spraid_dev *hdev, u16 qid,
@@ -1542,8 +1642,7 @@ static int spraid_create_sq(struct spraid_dev *hdev, u16 qid,
admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
admin_cmd.create_sq.cqid = cpu_to_le16(qid);
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
}
static void spraid_free_queue(struct spraid_queue *spraidq)
@@ -1581,8 +1680,7 @@ static int spraid_delete_queue(struct spraid_dev *hdev, u8 op, u16 id)
admin_cmd.delete_queue.opcode = op;
admin_cmd.delete_queue.qid = cpu_to_le16(id);
- ret = spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
if (ret)
dev_err(hdev->dev, "Delete %s:[%d] failed\n",
@@ -1663,19 +1761,28 @@ static int spraid_set_features(struct spraid_dev *hdev, u32 fid, u32 dword11, vo
size_t buflen, u32 *result)
{
struct spraid_admin_command admin_cmd;
- u32 res;
int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(hdev->dev, buflen, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
memset(&admin_cmd, 0, sizeof(admin_cmd));
admin_cmd.features.opcode = SPRAID_ADMIN_SET_FEATURES;
admin_cmd.features.fid = cpu_to_le32(fid);
admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
- ret = spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, &res,
- buffer, buflen, 0, 0, 0);
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0);
- if (!ret && result)
- *result = res;
+ if (data_ptr)
+ dma_free_coherent(hdev->dev, buflen, data_ptr, data_dma);
return ret;
}
@@ -1764,8 +1871,7 @@ static int spraid_setup_io_queues(struct spraid_dev *hdev)
break;
}
dev_info(hdev->dev, "[%s] max_qid: %d, queue_count: %d, online_queue: %d, ioq_depth: %d\n",
- __func__, hdev->max_qid, hdev->queue_count,
- hdev->online_queues, hdev->ioq_depth);
+ __func__, hdev->max_qid, hdev->queue_count, hdev->online_queues, hdev->ioq_depth);
return spraid_create_io_queues(hdev);
}
@@ -1889,10 +1995,11 @@ static int spraid_get_dev_list(struct spraid_dev *hdev, struct spraid_dev_info *
u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
struct spraid_admin_command admin_cmd;
struct spraid_dev_list *list_buf;
+ dma_addr_t data_dma = 0;
u32 i, idx, hdid, ndev;
int ret = 0;
- list_buf = kmalloc(sizeof(*list_buf), GFP_KERNEL);
+ list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
if (!list_buf)
return -ENOMEM;
@@ -1901,9 +2008,9 @@ static int spraid_get_dev_list(struct spraid_dev *hdev, struct spraid_dev_info *
admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
admin_cmd.get_info.type = SPRAID_GET_INFO_DEV_LIST;
admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
- ret = spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL, list_buf,
- sizeof(*list_buf), 0, 0, 0);
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
if (ret) {
dev_err(hdev->dev, "Get device list failed, nd: %u, idx: %u, ret: %d\n",
@@ -1916,12 +2023,11 @@ static int spraid_get_dev_list(struct spraid_dev *hdev, struct spraid_dev_info *
for (i = 0; i < ndev; i++) {
hdid = le32_to_cpu(list_buf->devices[i].hdid);
- dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u target: %d, channel: %d, lun: %d, attr[%x]\n",
- i, hdid,
- le16_to_cpu(list_buf->devices[i].target),
- list_buf->devices[i].channel,
- list_buf->devices[i].lun,
- list_buf->devices[i].attr);
+ dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ i, hdid, le16_to_cpu(list_buf->devices[i].target),
+ list_buf->devices[i].channel,
+ list_buf->devices[i].lun,
+ list_buf->devices[i].attr);
if (hdid > nd || hdid == 0) {
dev_err(hdev->dev, "err, hdid[%d] invalid\n", hdid);
continue;
@@ -1936,21 +2042,29 @@ static int spraid_get_dev_list(struct spraid_dev *hdev, struct spraid_dev_info *
}
out:
- kfree(list_buf);
+ dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, data_dma);
return ret;
}
-static void spraid_send_aen(struct spraid_dev *hdev)
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid)
{
struct spraid_queue *adminq = &hdev->queues[0];
struct spraid_admin_command admin_cmd;
memset(&admin_cmd, 0, sizeof(admin_cmd));
admin_cmd.common.opcode = SPRAID_ADMIN_ASYNC_EVENT;
- admin_cmd.common.command_id = SPRAID_AQ_BLK_MQ_DEPTH;
+ admin_cmd.common.command_id = cid;
spraid_submit_cmd(adminq, &admin_cmd);
- dev_info(hdev->dev, "send aen, cid[%d]\n", SPRAID_AQ_BLK_MQ_DEPTH);
+ dev_info(hdev->dev, "send aen, cid[%d]\n", cid);
+}
+
+static inline void spraid_send_all_aen(struct spraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->aerl; i++)
+ spraid_send_aen(hdev, i + SPRAID_AQ_BLK_MQ_DEPTH);
}
static int spraid_add_device(struct spraid_dev *hdev, struct spraid_dev_info *device)
@@ -1958,6 +2072,10 @@ static int spraid_add_device(struct spraid_dev *hdev, struct spraid_dev_info *de
struct Scsi_Host *shost = hdev->shost;
struct scsi_device *sdev;
+ dev_info(hdev->dev, "add device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
sdev = scsi_device_lookup(shost, device->channel, le16_to_cpu(device->target), 0);
if (sdev) {
dev_warn(hdev->dev, "Device is already exist, channel: %d, target_id: %d, lun: %d\n",
@@ -1974,9 +2092,13 @@ static int spraid_rescan_device(struct spraid_dev *hdev, struct spraid_dev_info
struct Scsi_Host *shost = hdev->shost;
struct scsi_device *sdev;
+ dev_info(hdev->dev, "rescan device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
sdev = scsi_device_lookup(shost, device->channel, le16_to_cpu(device->target), 0);
if (!sdev) {
- dev_warn(hdev->dev, "Device is not exit, channel: %d, target_id: %d, lun: %d\n",
+ dev_warn(hdev->dev, "device is not exit rescan it, channel: %d, target_id: %d, lun: %d\n",
device->channel, le16_to_cpu(device->target), 0);
return -ENODEV;
}
@@ -1991,9 +2113,13 @@ static int spraid_remove_device(struct spraid_dev *hdev, struct spraid_dev_info
struct Scsi_Host *shost = hdev->shost;
struct scsi_device *sdev;
+ dev_info(hdev->dev, "remove device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target),
+ org_device->channel, org_device->lun, org_device->attr);
+
sdev = scsi_device_lookup(shost, org_device->channel, le16_to_cpu(org_device->target), 0);
if (!sdev) {
- dev_warn(hdev->dev, "Device is not exit, channel: %d, target_id: %d, lun: %d\n",
+ dev_warn(hdev->dev, "device is not exit remove it, channel: %d, target_id: %d, lun: %d\n",
org_device->channel, le16_to_cpu(org_device->target), 0);
return -ENODEV;
}
@@ -2029,36 +2155,54 @@ static int spraid_dev_list_init(struct spraid_dev *hdev)
return 0;
}
+static int luntarget_cmp_func(const void *l, const void *r)
+{
+ const struct spraid_dev_info *ln = l;
+ const struct spraid_dev_info *rn = r;
+
+ if (ln->channel == rn->channel)
+ return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+ return ln->channel - rn->channel;
+}
+
static void spraid_scan_work(struct work_struct *work)
{
struct spraid_dev *hdev =
container_of(work, struct spraid_dev, scan_work);
struct spraid_dev_info *devices, *org_devices;
+ struct spraid_dev_info *sortdevice;
u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
u8 flag, org_flag;
int i, ret;
+ int count = 0;
devices = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
if (!devices)
return;
+
+ sortdevice = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!sortdevice)
+ goto free_list;
+
ret = spraid_get_dev_list(hdev, devices);
if (ret)
- goto free_list;
+ goto free_all;
org_devices = hdev->devices;
for (i = 0; i < nd; i++) {
org_flag = org_devices[i].flag;
flag = devices[i].flag;
- dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n",
- i, org_flag, flag);
+ dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n", i, org_flag, flag);
if (SPRAID_DEV_INFO_FLAG_VALID(flag)) {
if (!SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
down_write(&hdev->devices_rwsem);
memcpy(&org_devices[i], &devices[i],
- sizeof(struct spraid_dev_info));
+ sizeof(struct spraid_dev_info));
+ memcpy(&sortdevice[count++], &devices[i],
+ sizeof(struct spraid_dev_info));
up_write(&hdev->devices_rwsem);
- spraid_add_device(hdev, &devices[i]);
} else if (SPRAID_DEV_INFO_FLAG_CHANGE(flag)) {
spraid_rescan_device(hdev, &devices[i]);
}
@@ -2071,6 +2215,16 @@ static void spraid_scan_work(struct work_struct *work)
}
}
}
+
+ dev_info(hdev->dev, "scan work add device count = %d\n", count);
+
+ sort(sortdevice, count, sizeof(sortdevice[0]), luntarget_cmp_func, NULL);
+
+ for (i = 0; i < count; i++)
+ spraid_add_device(hdev, &sortdevice[i]);
+
+free_all:
+ kfree(sortdevice);
free_list:
kfree(devices);
}
@@ -2083,6 +2237,15 @@ static void spraid_timesyn_work(struct work_struct *work)
spraid_configure_timestamp(hdev);
}
+static int spraid_init_ctrl_info(struct spraid_dev *hdev);
+static void spraid_fw_act_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev = container_of(work, struct spraid_dev, fw_act_work);
+
+ if (spraid_init_ctrl_info(hdev))
+ dev_err(hdev->dev, "get ctrl info failed after fw act\n");
+}
+
static void spraid_queue_scan(struct spraid_dev *hdev)
{
queue_work(spraid_wq, &hdev->scan_work);
@@ -2094,6 +2257,9 @@ static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result)
case SPRAID_AEN_DEV_CHANGED:
spraid_queue_scan(hdev);
break;
+ case SPRAID_AEN_FW_ACT_START:
+ dev_info(hdev->dev, "fw activation starting\n");
+ break;
case SPRAID_AEN_HOST_PROBING:
break;
default:
@@ -2101,25 +2267,25 @@ static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result)
}
}
-static void spraid_handle_aen_vs(struct spraid_dev *hdev, u32 result)
+static void spraid_handle_aen_vs(struct spraid_dev *hdev, u32 result, u32 result1)
{
- switch (result) {
+ switch ((result & 0xff00) >> 8) {
case SPRAID_AEN_TIMESYN:
queue_work(spraid_wq, &hdev->timesyn_work);
break;
+ case SPRAID_AEN_FW_ACT_FINISH:
+ dev_info(hdev->dev, "fw activation finish\n");
+ queue_work(spraid_wq, &hdev->fw_act_work);
+ break;
+ case SPRAID_AEN_EVENT_MIN ... SPRAID_AEN_EVENT_MAX:
+ dev_info(hdev->dev, "rcv card event[%d], param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
default:
- dev_warn(hdev->dev, "async event result: %x\n", result);
+ dev_warn(hdev->dev, "async event result: 0x%x\n", result);
}
}
-static void spraid_async_event_work(struct work_struct *work)
-{
- struct spraid_dev *hdev =
- container_of(work, struct spraid_dev, aen_work);
-
- spraid_send_aen(hdev);
-}
-
static int spraid_alloc_resources(struct spraid_dev *hdev)
{
int ret, nqueue;
@@ -2149,10 +2315,16 @@ static int spraid_alloc_resources(struct spraid_dev *hdev)
goto destroy_dma_pools;
}
+ ret = spraid_alloc_admin_cmds(hdev);
+ if (ret)
+ goto free_queues;
+
dev_info(hdev->dev, "[%s] queues num: %d\n", __func__, nqueue);
return 0;
+free_queues:
+ kfree(hdev->queues);
destroy_dma_pools:
spraid_destroy_dma_pools(hdev);
free_ctrl_info:
@@ -2164,50 +2336,18 @@ static int spraid_alloc_resources(struct spraid_dev *hdev)
static void spraid_free_resources(struct spraid_dev *hdev)
{
+ spraid_free_admin_cmds(hdev);
kfree(hdev->queues);
spraid_destroy_dma_pools(hdev);
kfree(hdev->ctrl_info);
ida_free(&spraid_instance_ida, hdev->instance);
}
-static void spraid_setup_passthrough(struct request *req, struct spraid_admin_command *cmd)
-{
- memcpy(cmd, spraid_admin_req(req)->cmd, sizeof(*cmd));
- cmd->common.flags &= ~SPRAID_CMD_FLAG_SGL_ALL;
-}
-
-static inline void spraid_clear_hreq(struct request *req)
-{
- if (!(req->rq_flags & RQF_DONTPREP)) {
- spraid_admin_req(req)->flags = 0;
- req->rq_flags |= RQF_DONTPREP;
- }
-}
-
-static blk_status_t spraid_setup_admin_cmd(struct request *req, struct spraid_admin_command *cmd)
+static void spraid_bsg_unmap_data(struct spraid_dev *hdev, struct bsg_job *job)
{
- spraid_clear_hreq(req);
-
- memset(cmd, 0, sizeof(*cmd));
- switch (req_op(req)) {
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- spraid_setup_passthrough(req, cmd);
- break;
- default:
- WARN_ON_ONCE(1);
- return BLK_STS_IOERR;
- }
-
- cmd->common.command_id = req->tag;
- return BLK_STS_OK;
-}
-
-static void spraid_unmap_data(struct spraid_dev *hdev, struct request *req)
-{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
if (iod->nsge)
dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
@@ -2215,36 +2355,36 @@ static void spraid_unmap_data(struct spraid_dev *hdev, struct request *req)
spraid_free_iod_res(hdev, iod);
}
-static blk_status_t spraid_admin_map_data(struct spraid_dev *hdev, struct request *req,
- struct spraid_admin_command *cmd)
+static int spraid_bsg_map_data(struct spraid_dev *hdev, struct bsg_job *job,
+ struct spraid_admin_command *cmd)
{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct request_queue *admin_q = req->q;
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
- blk_status_t ret = BLK_STS_IOERR;
- int nr_mapped;
- int res;
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ iod->sg = job->request_payload.sg_list;
+ iod->nsge = job->request_payload.sg_cnt;
+ iod->length = job->request_payload.payload_len;
+ iod->use_sgl = false;
+ iod->npages = -1;
+ iod->sg_drv_mgmt = false;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nsge = blk_rq_map_sg(admin_q, req, iod->sg);
if (!iod->nsge)
goto out;
- dev_info(hdev->dev, "nseg: %u, nsge: %u\n",
- blk_rq_nr_phys_segments(req), iod->nsge);
-
- ret = BLK_STS_RESOURCE;
- nr_mapped = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge, dma_dir, DMA_ATTR_NO_WARN);
- if (!nr_mapped)
+ ret = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge, dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
goto out;
- res = spraid_setup_prps(hdev, iod);
- if (res)
+ ret = spraid_setup_prps(hdev, iod);
+ if (ret)
goto unmap;
+
cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
- return BLK_STS_OK;
+
+ return 0;
unmap:
dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
@@ -2252,137 +2392,29 @@ static blk_status_t spraid_admin_map_data(struct spraid_dev *hdev, struct reques
return ret;
}
-static blk_status_t spraid_init_admin_iod(struct request *rq, struct spraid_dev *hdev)
-{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(rq);
- int nents = blk_rq_nr_phys_segments(rq);
- unsigned int size = blk_rq_payload_bytes(rq);
-
- if (nents > SPRAID_INT_PAGES || size > SPRAID_INT_BYTES(hdev)) {
- iod->sg = mempool_alloc(hdev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
- return BLK_STS_RESOURCE;
- } else {
- iod->sg = iod->inline_sg;
- }
-
- iod->nsge = 0;
- iod->use_sgl = false;
- iod->npages = -1;
- iod->length = size;
- iod->sg_drv_mgmt = true;
-
- return BLK_STS_OK;
-}
-
-static blk_status_t spraid_queue_admin_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct spraid_queue *adminq = hctx->driver_data;
- struct spraid_dev *hdev = adminq->hdev;
- struct request *req = bd->rq;
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct spraid_admin_command cmd;
- blk_status_t ret;
-
- ret = spraid_setup_admin_cmd(req, &cmd);
- if (ret)
- goto out;
-
- ret = spraid_init_admin_iod(req, hdev);
- if (ret)
- goto out;
-
- if (blk_rq_nr_phys_segments(req)) {
- ret = spraid_admin_map_data(hdev, req, &cmd);
- if (ret)
- goto cleanup_iod;
- }
-
- blk_mq_start_request(req);
- spraid_submit_cmd(adminq, &cmd);
- return BLK_STS_OK;
-
-cleanup_iod:
- spraid_free_iod_res(hdev, iod);
-out:
- return ret;
-}
-
-static blk_status_t spraid_error_status(struct request *req)
-{
- switch (spraid_admin_req(req)->status & 0x7ff) {
- case SPRAID_SC_SUCCESS:
- return BLK_STS_OK;
- default:
- return BLK_STS_IOERR;
- }
-}
-
-static void spraid_complete_admin_rq(struct request *req)
-{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct spraid_dev *hdev = iod->spraidq->hdev;
-
- if (blk_rq_nr_phys_segments(req))
- spraid_unmap_data(hdev, req);
- blk_mq_end_request(req, spraid_error_status(req));
-}
-
-static int spraid_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
-{
- struct spraid_dev *hdev = data;
- struct spraid_queue *adminq = &hdev->queues[0];
-
- WARN_ON(hctx_idx != 0);
- WARN_ON(hdev->admin_tagset.tags[0] != hctx->tags);
-
- hctx->driver_data = adminq;
- return 0;
-}
-
-static int spraid_admin_init_request(struct blk_mq_tag_set *set, struct request *req,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct spraid_dev *hdev = set->driver_data;
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct spraid_queue *adminq = &hdev->queues[0];
-
- WARN_ON(!adminq);
- iod->spraidq = adminq;
- return 0;
-}
-
-static enum blk_eh_timer_return
-spraid_admin_timeout(struct request *req, bool reserved)
-{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct spraid_queue *spraidq = iod->spraidq;
- struct spraid_dev *hdev = spraidq->hdev;
-
- dev_err(hdev->dev, "Admin cid[%d] qid[%d] timeout\n",
- req->tag, spraidq->qid);
-
- if (spraid_poll_cq(spraidq, req->tag)) {
- dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, completion polled\n",
- req->tag, spraidq->qid);
- return BLK_EH_DONE;
- }
-
- spraid_end_admin_request(req, cpu_to_le16(-EINVAL), 0, 0);
- return BLK_EH_DONE;
-}
-
static int spraid_get_ctrl_info(struct spraid_dev *hdev, struct spraid_ctrl_info *ctrl_info)
{
struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
memset(&admin_cmd, 0, sizeof(admin_cmd));
admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
admin_cmd.get_info.type = SPRAID_GET_INFO_CTRL;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(ctrl_info, data_ptr, sizeof(struct spraid_ctrl_info));
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- ctrl_info, sizeof(struct spraid_ctrl_info), 0, 0, 0);
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
}
static int spraid_init_ctrl_info(struct spraid_dev *hdev)
@@ -2416,6 +2448,11 @@ static int spraid_init_ctrl_info(struct spraid_dev *hdev)
dev_info(hdev->dev, "[%s]sn = %s\n", __func__, hdev->ctrl_info->sn);
dev_info(hdev->dev, "[%s]fr = %s\n", __func__, hdev->ctrl_info->fr);
+ if (!hdev->ctrl_info->aerl)
+ hdev->ctrl_info->aerl = 1;
+ if (hdev->ctrl_info->aerl > SPRAID_NR_AEN_COMMANDS)
+ hdev->ctrl_info->aerl = SPRAID_NR_AEN_COMMANDS;
+
return 0;
}
@@ -2444,98 +2481,51 @@ static void spraid_free_iod_ext_mem_pool(struct spraid_dev *hdev)
mempool_destroy(hdev->iod_mempool);
}
-static int spraid_submit_user_cmd(struct request_queue *q, struct spraid_admin_command *cmd,
- void __user *ubuffer, unsigned int bufflen, u32 *result,
- unsigned int timeout)
+static int spraid_user_admin_cmd(struct spraid_dev *hdev, struct bsg_job *job)
{
- struct request *req;
- struct bio *bio = NULL;
- int ret;
-
- req = spraid_alloc_admin_request(q, cmd, 0);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ struct spraid_bsg_request *bsg_req = job->request;
+ struct spraid_passthru_common_cmd *cmd = &(bsg_req->admcmd);
+ struct spraid_admin_command admin_cmd;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ u32 result[2] = {0};
+ int status;
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- spraid_admin_req(req)->flags |= SPRAID_REQ_USERCMD;
+ if (hdev->state >= SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not right\n",
+ __func__, hdev->state);
+ return -EBUSY;
+ }
- if (ubuffer && bufflen) {
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, GFP_KERNEL);
- if (ret)
- goto out;
- bio = req->bio;
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = cmd->opcode;
+ admin_cmd.common.flags = cmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15);
+
+ status = spraid_bsg_map_data(hdev, job, &admin_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
}
- blk_execute_rq(req->q, NULL, req, 0);
- if (spraid_admin_req(req)->flags & SPRAID_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = spraid_admin_req(req)->status;
- if (result) {
- result[0] = spraid_admin_req(req)->result0;
- result[1] = spraid_admin_req(req)->result1;
- }
- if (bio)
- blk_rq_unmap_user(bio);
-out:
- blk_mq_free_request(req);
- return ret;
-}
-static int spraid_user_admin_cmd(struct spraid_dev *hdev,
- struct spraid_passthru_common_cmd __user *ucmd)
-{
- struct spraid_passthru_common_cmd cmd;
- struct spraid_admin_command admin_cmd;
- u32 timeout = 0;
- int status;
-
- if (!capable(CAP_SYS_ADMIN)) {
- dev_err(hdev->dev, "Current user hasn't administrator right, reject service\n");
- return -EACCES;
- }
-
- if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
- dev_err(hdev->dev, "Copy command from user space to kernel space failed\n");
- return -EFAULT;
- }
-
- if (cmd.flags) {
- dev_err(hdev->dev, "Invalid flags in user command\n");
- return -EINVAL;
+ status = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0], &result[1], timeout);
+ if (status >= 0) {
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
}
- dev_info(hdev->dev, "user_admin_cmd opcode: 0x%x, subopcode: 0x%x\n",
- cmd.opcode, cmd.cdw2 & 0x7ff);
+ if (status)
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x] result0[0x%x] result1[0x%x]\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode, status, result[0], result[1]);
- memset(&admin_cmd, 0, sizeof(admin_cmd));
- admin_cmd.common.opcode = cmd.opcode;
- admin_cmd.common.flags = cmd.flags;
- admin_cmd.common.hdid = cpu_to_le32(cmd.nsid);
- admin_cmd.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
- admin_cmd.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
- admin_cmd.common.cdw10 = cpu_to_le32(cmd.cdw10);
- admin_cmd.common.cdw11 = cpu_to_le32(cmd.cdw11);
- admin_cmd.common.cdw12 = cpu_to_le32(cmd.cdw12);
- admin_cmd.common.cdw13 = cpu_to_le32(cmd.cdw13);
- admin_cmd.common.cdw14 = cpu_to_le32(cmd.cdw14);
- admin_cmd.common.cdw15 = cpu_to_le32(cmd.cdw15);
-
- if (cmd.timeout_ms)
- timeout = msecs_to_jiffies(cmd.timeout_ms);
-
- status = spraid_submit_user_cmd(hdev->admin_q, &admin_cmd,
- (void __user *)(uintptr_t)cmd.addr, cmd.info_1.data_len,
- &cmd.result0, timeout);
-
- dev_info(hdev->dev, "user_admin_cmd status: 0x%x, result0: 0x%x, result1: 0x%x\n",
- status, cmd.result0, cmd.result1);
-
- if (status >= 0) {
- if (put_user(cmd.result0, &ucmd->result0))
- return -EFAULT;
- if (put_user(cmd.result1, &ucmd->result1))
- return -EFAULT;
- }
+ spraid_bsg_unmap_data(hdev, job);
return status;
}
@@ -2548,8 +2538,8 @@ static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev)
INIT_LIST_HEAD(&hdev->ioq_pt_list);
spin_lock_init(&hdev->ioq_pt_lock);
- hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_ioq_ptcmd),
- GFP_KERNEL, hdev->numa_node);
+ hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
if (!hdev->ioq_ptcmds) {
dev_err(hdev->dev, "Alloc ioq_ptcmds failed\n");
@@ -2567,55 +2557,35 @@ static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev)
return 0;
}
-static struct spraid_ioq_ptcmd *spraid_get_ioq_ptcmd(struct spraid_dev *hdev)
-{
- struct spraid_ioq_ptcmd *cmd = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&hdev->ioq_pt_lock, flags);
- if (list_empty(&hdev->ioq_pt_list)) {
- spin_unlock_irqrestore(&hdev->ioq_pt_lock, flags);
- dev_err(hdev->dev, "err, ioq ptcmd list empty\n");
- return NULL;
- }
- cmd = list_entry((&hdev->ioq_pt_list)->next, struct spraid_ioq_ptcmd, list);
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&hdev->ioq_pt_lock, flags);
-
- WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE);
-
- return cmd;
-}
-
-static void spraid_put_ioq_ptcmd(struct spraid_dev *hdev, struct spraid_ioq_ptcmd *cmd)
+static void spraid_free_ioq_ptcmds(struct spraid_dev *hdev)
{
- unsigned long flags;
+ kfree(hdev->ioq_ptcmds);
+ hdev->ioq_ptcmds = NULL;
- spin_lock_irqsave(&hdev->ioq_pt_lock, flags);
- list_add(&cmd->list, (&hdev->ioq_pt_list)->next);
- spin_unlock_irqrestore(&hdev->ioq_pt_lock, flags);
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
}
static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev, struct spraid_ioq_command *cmd,
- u32 *result, void **sense, u32 timeout)
+ u32 *result, u32 *reslen, u32 timeout)
{
- struct spraid_queue *ioq;
int ret;
dma_addr_t sense_dma;
- struct spraid_ioq_ptcmd *pt_cmd = spraid_get_ioq_ptcmd(hdev);
-
- *sense = NULL;
+ struct spraid_queue *ioq;
+ void *sense_addr = NULL;
+ struct spraid_cmd *pt_cmd = spraid_get_cmd(hdev, SPRAID_CMD_IOPT);
- if (!pt_cmd)
+ if (!pt_cmd) {
+ dev_err(hdev->dev, "err, get ioq cmd failed\n");
return -EFAULT;
+ }
- dev_info(hdev->dev, "[%s] ptcmd, cid[%d], qid[%d]\n", __func__, pt_cmd->cid, pt_cmd->qid);
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
init_completion(&pt_cmd->cmd_done);
ioq = &hdev->queues[pt_cmd->qid];
ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
- pt_cmd->priv = ioq->sense + ret;
+ sense_addr = ioq->sense + ret;
sense_dma = ioq->sense_dma_addr + ret;
cmd->common.sense_addr = cpu_to_le64(sense_dma);
@@ -2625,260 +2595,87 @@ static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev, struct spraid_ioq
spraid_submit_cmd(ioq, cmd);
if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
- dev_err(hdev->dev, "[%s] cid[%d], qid[%d] timeout\n",
- __func__, pt_cmd->cid, pt_cmd->qid);
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ __func__, pt_cmd->cid, pt_cmd->qid, cmd->common.opcode,
+ (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff));
WRITE_ONCE(pt_cmd->state, SPRAID_CMD_TIMEOUT);
- return -EINVAL;
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+ return -ETIME;
}
- if (result) {
- result[0] = pt_cmd->result0;
- result[1] = pt_cmd->result1;
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
}
- if ((pt_cmd->status & 0x17f) == 0x101)
- *sense = pt_cmd->priv;
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
return pt_cmd->status;
}
-static int spraid_user_ioq_cmd(struct spraid_dev *hdev,
- struct spraid_ioq_passthru_cmd __user *ucmd)
+static int spraid_user_ioq_cmd(struct spraid_dev *hdev, struct bsg_job *job)
{
- struct spraid_ioq_passthru_cmd cmd;
+ struct spraid_bsg_request *bsg_req = (struct spraid_bsg_request *)(job->request);
+ struct spraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd);
struct spraid_ioq_command ioq_cmd;
- u32 timeout = 0;
int status = 0;
- u8 *data_ptr = NULL;
- dma_addr_t data_dma;
- enum dma_data_direction dma_dir = DMA_NONE;
- void *sense = NULL;
-
- if (!capable(CAP_SYS_ADMIN)) {
- dev_err(hdev->dev, "Current user hasn't administrator right, reject service\n");
- return -EACCES;
- }
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
- if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
- dev_err(hdev->dev, "Copy command from user space to kernel space failed\n");
- return -EFAULT;
- }
-
- if (cmd.data_len > PAGE_SIZE) {
+ if (cmd->data_len > PAGE_SIZE) {
dev_err(hdev->dev, "[%s] data len bigger than 4k\n", __func__);
return -EFAULT;
}
- dev_info(hdev->dev, "[%s] opcode: 0x%x, subopcode: 0x%x, datalen: %d\n",
- __func__, cmd.opcode, cmd.info_1.subopcode, cmd.data_len);
-
- if (cmd.addr && cmd.data_len) {
- data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
- if (!data_ptr)
- return -ENOMEM;
-
- dma_dir = (cmd.opcode & 1) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ if (hdev->state != SPRAID_LIVE) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not live\n",
+ __func__, hdev->state);
+ return -EBUSY;
}
- if (dma_dir == DMA_TO_DEVICE) {
- if (copy_from_user(data_ptr, (void __user *)(uintptr_t)cmd.addr, cmd.data_len)) {
- dev_err(hdev->dev, "[%s] copy user data failed\n", __func__);
- status = -EFAULT;
- goto free_dma_mem;
- }
- }
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init, datalen[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode, cmd->data_len);
memset(&ioq_cmd, 0, sizeof(ioq_cmd));
- ioq_cmd.common.opcode = cmd.opcode;
- ioq_cmd.common.flags = cmd.flags;
- ioq_cmd.common.hdid = cpu_to_le32(cmd.nsid);
- ioq_cmd.common.sense_len = cpu_to_le16(cmd.info_0.res_sense_len);
- ioq_cmd.common.cdb_len = cmd.info_0.cdb_len;
- ioq_cmd.common.rsvd2 = cmd.info_0.rsvd0;
- ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd.cdw3);
- ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd.cdw4);
- ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd.cdw5);
- ioq_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
-
- ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
- ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
- ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
- ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
- ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
- ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd.data_len);
-
- memcpy(ioq_cmd.common.cdb, &cmd.cdw16, cmd.info_0.cdb_len);
-
- ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd.cdw26[0]);
- ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd.cdw26[1]);
- ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd.cdw26[2]);
- ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd.cdw26[3]);
-
- if (cmd.timeout_ms)
- timeout = msecs_to_jiffies(cmd.timeout_ms);
- timeout = timeout ? timeout : ADMIN_TIMEOUT;
-
- status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, &cmd.result0, &sense, timeout);
-
- if (status >= 0) {
- if (put_user(cmd.result0, &ucmd->result0)) {
- status = -EFAULT;
- goto free_dma_mem;
- }
- if (put_user(cmd.result1, &ucmd->result1)) {
- status = -EFAULT;
- goto free_dma_mem;
- }
- if (dma_dir == DMA_FROM_DEVICE &&
- copy_to_user((void __user *)(uintptr_t)cmd.addr, data_ptr, cmd.data_len)) {
- status = -EFAULT;
- goto free_dma_mem;
- }
- }
-
- if (sense) {
- if (copy_to_user((void *__user *)(uintptr_t)cmd.sense_addr,
- sense, cmd.info_0.res_sense_len)) {
- status = -EFAULT;
- goto free_dma_mem;
- }
- }
-
-free_dma_mem:
- if (data_ptr)
- dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
-
- return status;
-
-}
-
-static int spraid_reset_work_sync(struct spraid_dev *hdev);
-
-static int spraid_user_reset_cmd(struct spraid_dev *hdev)
-{
- int ret;
-
- dev_info(hdev->dev, "[%s] start user reset cmd\n", __func__);
- ret = spraid_reset_work_sync(hdev);
- dev_info(hdev->dev, "[%s] stop user reset cmd[%d]\n", __func__, ret);
-
- return ret;
-}
-
-static int hdev_open(struct inode *inode, struct file *file)
-{
- struct spraid_dev *hdev =
- container_of(inode->i_cdev, struct spraid_dev, cdev);
- file->private_data = hdev;
- return 0;
-}
-
-static long hdev_ioctl(struct file *file, u32 cmd, unsigned long arg)
-{
- struct spraid_dev *hdev = file->private_data;
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case SPRAID_IOCTL_ADMIN_CMD:
- return spraid_user_admin_cmd(hdev, argp);
- case SPRAID_IOCTL_IOQ_CMD:
- return spraid_user_ioq_cmd(hdev, argp);
- case SPRAID_IOCTL_RESET_CMD:
- return spraid_user_reset_cmd(hdev);
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations spraid_dev_fops = {
- .owner = THIS_MODULE,
- .open = hdev_open,
- .unlocked_ioctl = hdev_ioctl,
- .compat_ioctl = hdev_ioctl,
-};
-
-static int spraid_create_cdev(struct spraid_dev *hdev)
-{
- int ret;
-
- device_initialize(&hdev->ctrl_device);
- hdev->ctrl_device.devt = MKDEV(MAJOR(spraid_chr_devt), hdev->instance);
- hdev->ctrl_device.class = spraid_class;
- hdev->ctrl_device.parent = hdev->dev;
- dev_set_drvdata(&hdev->ctrl_device, hdev);
- ret = dev_set_name(&hdev->ctrl_device, "spraid%d", hdev->instance);
- if (ret)
- return ret;
- cdev_init(&hdev->cdev, &spraid_dev_fops);
- hdev->cdev.owner = THIS_MODULE;
- ret = cdev_device_add(&hdev->cdev, &hdev->ctrl_device);
- if (ret) {
- dev_err(hdev->dev, "Add cdev failed, ret: %d", ret);
- put_device(&hdev->ctrl_device);
- kfree_const(hdev->ctrl_device.kobj.name);
- return ret;
- }
-
- return 0;
-}
-
-static inline void spraid_remove_cdev(struct spraid_dev *hdev)
-{
- cdev_device_del(&hdev->cdev, &hdev->ctrl_device);
-}
-
-static const struct blk_mq_ops spraid_admin_mq_ops = {
- .queue_rq = spraid_queue_admin_rq,
- .complete = spraid_complete_admin_rq,
- .init_hctx = spraid_admin_init_hctx,
- .init_request = spraid_admin_init_request,
- .timeout = spraid_admin_timeout,
-};
-
-static void spraid_remove_admin_tagset(struct spraid_dev *hdev)
-{
- if (hdev->admin_q && !blk_queue_dying(hdev->admin_q)) {
- blk_mq_unquiesce_queue(hdev->admin_q);
- blk_cleanup_queue(hdev->admin_q);
- blk_mq_free_tag_set(&hdev->admin_tagset);
+ ioq_cmd.common.opcode = cmd->opcode;
+ ioq_cmd.common.flags = cmd->flags;
+ ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ ioq_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = spraid_bsg_map_data(hdev, job, (struct spraid_admin_command *)&ioq_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
}
-}
-static int spraid_alloc_admin_tags(struct spraid_dev *hdev)
-{
- if (!hdev->admin_q) {
- hdev->admin_tagset.ops = &spraid_admin_mq_ops;
- hdev->admin_tagset.nr_hw_queues = 1;
+ status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply, &job->reply_len, timeout);
- hdev->admin_tagset.queue_depth = SPRAID_AQ_MQ_TAG_DEPTH;
- hdev->admin_tagset.timeout = ADMIN_TIMEOUT;
- hdev->admin_tagset.numa_node = hdev->numa_node;
- hdev->admin_tagset.cmd_size =
- spraid_cmd_size(hdev, true, false);
- hdev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
- hdev->admin_tagset.driver_data = hdev;
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x], reply_len[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode, status, job->reply_len);
- if (blk_mq_alloc_tag_set(&hdev->admin_tagset)) {
- dev_err(hdev->dev, "Allocate admin tagset failed\n");
- return -ENOMEM;
- }
+ spraid_bsg_unmap_data(hdev, job);
- hdev->admin_q = blk_mq_init_queue(&hdev->admin_tagset);
- if (IS_ERR(hdev->admin_q)) {
- dev_err(hdev->dev, "Initialize admin request queue failed\n");
- blk_mq_free_tag_set(&hdev->admin_tagset);
- return -ENOMEM;
- }
- if (!blk_get_queue(hdev->admin_q)) {
- dev_err(hdev->dev, "Get admin request queue failed\n");
- spraid_remove_admin_tagset(hdev);
- hdev->admin_q = NULL;
- return -ENODEV;
- }
- } else {
- blk_mq_unquiesce_queue(hdev->admin_q);
- }
- return 0;
+ return status;
}
static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd)
@@ -2891,7 +2688,7 @@ static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd)
spraid_get_tag_from_scmd(scmd, &hwq, &cid);
spraidq = &hdev->queues[hwq];
if (READ_ONCE(iod->state) == SPRAID_CMD_COMPLETE || spraid_poll_cq(spraidq, cid)) {
- dev_warn(hdev->dev, "cid[%d], qid[%d] has been completed\n",
+ dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
cid, spraidq->qid);
return true;
}
@@ -2927,8 +2724,7 @@ static int spraid_send_abort_cmd(struct spraid_dev *hdev, u32 hdid, u16 qid, u16
admin_cmd.abort.sqid = cpu_to_le16(qid);
admin_cmd.abort.cid = cpu_to_le16(cid);
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
}
/* send reset command by admin quueue temporary */
@@ -2941,8 +2737,7 @@ static int spraid_send_reset_cmd(struct spraid_dev *hdev, int type, u32 hdid)
admin_cmd.reset.hdid = cpu_to_le32(hdid);
admin_cmd.reset.type = type;
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
}
static bool spraid_change_host_state(struct spraid_dev *hdev, enum spraid_state newstate)
@@ -3022,7 +2817,7 @@ static void spraid_back_fault_cqe(struct spraid_queue *ioq, struct spraid_comple
scsi_dma_unmap(scmd);
spraid_free_iod_res(hdev, iod);
scmd->scsi_done(scmd);
- dev_warn(hdev->dev, "Back fault CQE, cid[%d], qid[%d]\n",
+ dev_warn(hdev->dev, "Back fault CQE, cid[%d] qid[%d]\n",
cqe->cmd_id, ioq->qid);
}
@@ -3032,6 +2827,8 @@ static void spraid_back_all_io(struct spraid_dev *hdev)
struct spraid_queue *ioq;
struct spraid_completion cqe = { 0 };
+ scsi_block_requests(hdev->shost);
+
for (i = 1; i <= hdev->shost->nr_hw_queues; i++) {
ioq = &hdev->queues[i];
for (j = 0; j < hdev->shost->can_queue; j++) {
@@ -3039,6 +2836,8 @@ static void spraid_back_all_io(struct spraid_dev *hdev)
spraid_back_fault_cqe(ioq, &cqe);
}
}
+
+ scsi_unblock_requests(hdev->shost);
}
static void spraid_dev_disable(struct spraid_dev *hdev, bool shutdown)
@@ -3106,17 +2905,13 @@ static void spraid_reset_work(struct work_struct *work)
if (ret)
goto pci_disable;
- ret = spraid_alloc_admin_tags(hdev);
- if (ret)
- goto pci_disable;
-
ret = spraid_setup_io_queues(hdev);
if (ret || hdev->online_queues <= hdev->shost->nr_hw_queues)
goto pci_disable;
spraid_change_host_state(hdev, SPRAID_LIVE);
- spraid_send_aen(hdev);
+ spraid_send_all_aen(hdev);
return;
@@ -3174,8 +2969,8 @@ static int spraid_abort_handler(struct scsi_cmnd *scmd)
scsi_print_command(scmd);
- if (!spraid_wait_abnl_cmd_done(iod) || spraid_check_scmd_completed(scmd) ||
- hdev->state != SPRAID_LIVE)
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
return SUCCESS;
hostdata = scmd->device->hostdata;
@@ -3183,7 +2978,7 @@ static int spraid_abort_handler(struct scsi_cmnd *scmd)
dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, aborting\n", cid, hwq);
ret = spraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
- if (ret != ADMIN_ERR_TIMEOUT) {
+ if (ret != -ETIME) {
ret = spraid_wait_abnl_cmd_done(iod);
if (ret) {
dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, not found\n", cid, hwq);
@@ -3206,8 +3001,8 @@ static int spraid_tgt_reset_handler(struct scsi_cmnd *scmd)
scsi_print_command(scmd);
- if (!spraid_wait_abnl_cmd_done(iod) || spraid_check_scmd_completed(scmd) ||
- hdev->state != SPRAID_LIVE)
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
return SUCCESS;
hostdata = scmd->device->hostdata;
@@ -3241,8 +3036,8 @@ static int spraid_bus_reset_handler(struct scsi_cmnd *scmd)
scsi_print_command(scmd);
- if (!spraid_wait_abnl_cmd_done(iod) || spraid_check_scmd_completed(scmd) ||
- hdev->state != SPRAID_LIVE)
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
return SUCCESS;
hostdata = scmd->device->hostdata;
@@ -3272,7 +3067,7 @@ static int spraid_shost_reset_handler(struct scsi_cmnd *scmd)
struct spraid_dev *hdev = shost_priv(scmd->device->host);
scsi_print_command(scmd);
- if (spraid_check_scmd_completed(scmd) || hdev->state != SPRAID_LIVE)
+ if (hdev->state != SPRAID_LIVE || spraid_check_scmd_completed(scmd))
return SUCCESS;
spraid_get_tag_from_scmd(scmd, &hwq, &cid);
@@ -3288,6 +3083,62 @@ static int spraid_shost_reset_handler(struct scsi_cmnd *scmd)
return SUCCESS;
}
+static pci_ers_result_t spraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter pci error detect, state:%d\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(hdev->dev, "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ spraid_change_host_state(hdev, SPRAID_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(hdev->dev, "channel io failure, request disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t spraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, the device is resetting state\n", __func__);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ flush_work(&hdev->reset_work);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void spraid_reset_done(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter spraid reset done\n");
+}
+
static ssize_t csts_pp_show(struct device *cdev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3347,7 +3198,7 @@ static ssize_t fw_version_show(struct device *cdev, struct device_attribute *att
struct Scsi_Host *shost = class_to_shost(cdev);
struct spraid_dev *hdev = shost_priv(shost);
- return snprintf(buf, sizeof(hdev->ctrl_info->fr), "%s\n", hdev->ctrl_info->fr);
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr);
}
static DEVICE_ATTR_RO(csts_pp);
@@ -3365,6 +3216,185 @@ static struct device_attribute *spraid_host_attrs[] = {
NULL,
};
+static int spraid_get_vd_info(struct spraid_dev *hdev, struct spraid_vd_info *vd_info, u16 vid)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct spraid_vd_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_get_bgtask(struct spraid_dev *hdev, struct spraid_bgtask *bgtask)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct spraid_bgtask));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_bgtask *bgtask;
+ struct spraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct spraid_bgtask *)vd_info;
+ ret = spraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+
+static struct device_attribute *spraid_dev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_raid_state,
+ &dev_attr_raid_resync,
+ NULL,
+};
+
+static struct pci_error_handlers spraid_err_handler = {
+ .error_detected = spraid_pci_error_detected,
+ .slot_reset = spraid_pci_slot_reset,
+ .reset_done = spraid_reset_done,
+};
+
+static int spraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ dev_info(hdev->dev, "[%s] start sysfs host reset cmd\n", __func__);
+ ret = spraid_reset_work_sync(hdev);
+ dev_info(hdev->dev, "[%s] stop sysfs host reset cmd[%d]\n", __func__, ret);
+
+ return ret;
+}
+
static struct scsi_host_template spraid_driver_template = {
.module = THIS_MODULE,
.name = "Ramaxel Logic spraid driver",
@@ -3379,9 +3409,11 @@ static struct scsi_host_template spraid_driver_template = {
.eh_bus_reset_handler = spraid_bus_reset_handler,
.eh_host_reset_handler = spraid_shost_reset_handler,
.change_queue_depth = scsi_change_queue_depth,
- .host_tagset = 1,
+ .host_tagset = 0,
.this_id = -1,
.shost_attrs = spraid_host_attrs,
+ .sdev_attrs = spraid_dev_attrs,
+ .host_reset = spraid_sysfs_host_reset,
};
static void spraid_shutdown(struct pci_dev *pdev)
@@ -3392,11 +3424,53 @@ static void spraid_shutdown(struct pci_dev *pdev)
spraid_disable_admin_queue(hdev, true);
}
+/* bsg dispatch user command */
+static int spraid_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_bsg_request *bsg_req = job->request;
+ int ret = 0;
+
+ dev_log_dbg(hdev->dev, "[%s] msgcode[%d], msglen[%d], timeout[%d], req_nsge[%d], req_len[%d]\n",
+ __func__, bsg_req->msgcode, job->request_len, rq->timeout,
+ job->request_payload.sg_cnt, job->request_payload.payload_len);
+
+ job->reply_len = 0;
+
+ switch (bsg_req->msgcode) {
+ case SPRAID_BSG_ADM:
+ ret = spraid_user_admin_cmd(hdev, job);
+ break;
+ case SPRAID_BSG_IOQ:
+ ret = spraid_user_ioq_cmd(hdev, job);
+ break;
+ default:
+ dev_info(hdev->dev, "[%s] unsupport msgcode[%d]\n", __func__, bsg_req->msgcode);
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void spraid_remove_bsg(struct spraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct spraid_dev *hdev;
struct Scsi_Host *shost;
int node, ret;
+ char bsg_name[15];
shost = scsi_host_alloc(&spraid_driver_template, sizeof(*hdev));
if (!shost) {
@@ -3421,10 +3495,10 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto put_dev;
init_rwsem(&hdev->devices_rwsem);
- INIT_WORK(&hdev->aen_work, spraid_async_event_work);
INIT_WORK(&hdev->scan_work, spraid_scan_work);
INIT_WORK(&hdev->timesyn_work, spraid_timesyn_work);
INIT_WORK(&hdev->reset_work, spraid_reset_work);
+ INIT_WORK(&hdev->fw_act_work, spraid_fw_act_work);
spin_lock_init(&hdev->state_lock);
ret = spraid_alloc_resources(hdev);
@@ -3439,17 +3513,13 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto pci_disable;
- ret = spraid_alloc_admin_tags(hdev);
- if (ret)
- goto disable_admin_q;
-
ret = spraid_init_ctrl_info(hdev);
if (ret)
- goto free_admin_tagset;
+ goto disable_admin_q;
ret = spraid_alloc_iod_ext_mem_pool(hdev);
if (ret)
- goto free_admin_tagset;
+ goto disable_admin_q;
ret = spraid_setup_io_queues(hdev);
if (ret)
@@ -3464,9 +3534,15 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto remove_io_queues;
}
- ret = spraid_create_cdev(hdev);
- if (ret)
+ snprintf(bsg_name, sizeof(bsg_name), "spraid%d", shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name,
+ spraid_bsg_host_dispatch, NULL,
+ spraid_cmd_size(hdev, true, false));
+ if (IS_ERR(hdev->bsg_queue)) {
+ dev_err(hdev->dev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
goto remove_io_queues;
+ }
if (hdev->online_queues == SPRAID_ADMIN_QUEUE_NUM) {
dev_warn(hdev->dev, "warn only admin queue can be used\n");
@@ -3475,11 +3551,11 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->state = SPRAID_LIVE;
- spraid_send_aen(hdev);
+ spraid_send_all_aen(hdev);
ret = spraid_dev_list_init(hdev);
if (ret)
- goto remove_cdev;
+ goto remove_bsg;
ret = spraid_configure_timestamp(hdev);
if (ret)
@@ -3487,20 +3563,18 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = spraid_alloc_ioq_ptcmds(hdev);
if (ret)
- goto remove_cdev;
+ goto remove_bsg;
scsi_scan_host(hdev->shost);
return 0;
-remove_cdev:
- spraid_remove_cdev(hdev);
+remove_bsg:
+ spraid_remove_bsg(hdev);
remove_io_queues:
spraid_remove_io_queues(hdev);
free_iod_mempool:
spraid_free_iod_ext_mem_pool(hdev);
-free_admin_tagset:
- spraid_remove_admin_tagset(hdev);
disable_admin_q:
spraid_disable_admin_queue(hdev, false);
pci_disable:
@@ -3524,22 +3598,17 @@ static void spraid_remove(struct pci_dev *pdev)
dev_info(hdev->dev, "enter spraid remove\n");
spraid_change_host_state(hdev, SPRAID_DELETING);
+ flush_work(&hdev->reset_work);
- if (!pci_device_is_present(pdev)) {
- scsi_block_requests(shost);
+ if (!pci_device_is_present(pdev))
spraid_back_all_io(hdev);
- scsi_unblock_requests(shost);
- }
- flush_work(&hdev->reset_work);
+ spraid_remove_bsg(hdev);
scsi_remove_host(shost);
-
- kfree(hdev->ioq_ptcmds);
+ spraid_free_ioq_ptcmds(hdev);
kfree(hdev->devices);
- spraid_remove_cdev(hdev);
spraid_remove_io_queues(hdev);
spraid_free_iod_ext_mem_pool(hdev);
- spraid_remove_admin_tagset(hdev);
spraid_disable_admin_queue(hdev, false);
spraid_pci_disable(hdev);
spraid_free_resources(hdev);
@@ -3551,7 +3620,7 @@ static void spraid_remove(struct pci_dev *pdev)
}
static const struct pci_device_id spraid_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC, SPRAID_SERVER_DEVICE_HAB_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC, SPRAID_SERVER_DEVICE_HBA_DID) },
{ PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC, SPRAID_SERVER_DEVICE_RAID_DID) },
{ 0, }
};
@@ -3563,6 +3632,7 @@ static struct pci_driver spraid_driver = {
.probe = spraid_probe,
.remove = spraid_remove,
.shutdown = spraid_shutdown,
+ .err_handler = &spraid_err_handler,
};
static int __init spraid_init(void)
@@ -3573,14 +3643,10 @@ static int __init spraid_init(void)
if (!spraid_wq)
return -ENOMEM;
- ret = alloc_chrdev_region(&spraid_chr_devt, 0, SPRAID_MINORS, "spraid");
- if (ret < 0)
- goto destroy_wq;
-
spraid_class = class_create(THIS_MODULE, "spraid");
if (IS_ERR(spraid_class)) {
ret = PTR_ERR(spraid_class);
- goto unregister_chrdev;
+ goto destroy_wq;
}
ret = pci_register_driver(&spraid_driver);
@@ -3591,8 +3657,6 @@ static int __init spraid_init(void)
destroy_class:
class_destroy(spraid_class);
-unregister_chrdev:
- unregister_chrdev_region(spraid_chr_devt, SPRAID_MINORS);
destroy_wq:
destroy_workqueue(spraid_wq);
@@ -3603,12 +3667,11 @@ static void __exit spraid_exit(void)
{
pci_unregister_driver(&spraid_driver);
class_destroy(spraid_class);
- unregister_chrdev_region(spraid_chr_devt, SPRAID_MINORS);
destroy_workqueue(spraid_wq);
ida_destroy(&spraid_instance_ida);
}
-MODULE_AUTHOR("Ramaxel Memory Technology");
+MODULE_AUTHOR("songyl(a)ramaxel.com");
MODULE_DESCRIPTION("Ramaxel Memory Technology SPraid Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SPRAID_DRV_VERSION);
--
2.32.0
1
0
From: sdlzx <hdu_sdlzx(a)163.com>
Subject: [PATCH openEuler-5.10] Disable legacy DRM drivers
euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4OBDE
CVE: NA
-------------------------------------------------
This config enables dangerous old drivers. It is selected by
CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT which is suggested by upstream
community that "modern distros should consider turning it off".
CONFIG_DRM_VM was selected by CONFIG_DRM_LEGACY and can be turned
off now.
Signed-off-by: sdlzx <hdu_sdlzx(a)163.com>
---
arch/arm64/configs/openeuler_defconfig | 10 ++--------
arch/x86/configs/openeuler_defconfig | 10 ++--------
2 files changed, 4 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 17bc8750b..6949824e8 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -4673,7 +4673,6 @@ CONFIG_DRM_TTM_DMA_PAGE_POOL=y
CONFIG_DRM_VRAM_HELPER=y
CONFIG_DRM_TTM_HELPER=y
CONFIG_DRM_GEM_SHMEM_HELPER=y
-CONFIG_DRM_VM=y
CONFIG_DRM_SCHED=m
#
@@ -4719,7 +4718,7 @@ CONFIG_DRM_AMD_DC_DCN=y
# CONFIG_HSA_AMD is not set
CONFIG_DRM_NOUVEAU=m
-CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y
+# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
CONFIG_NOUVEAU_DEBUG=5
CONFIG_NOUVEAU_DEBUG_DEFAULT=3
# CONFIG_NOUVEAU_DEBUG_MMU is not set
@@ -4817,12 +4816,7 @@ CONFIG_DRM_CIRRUS_QEMU=m
# CONFIG_DRM_LIMA is not set
# CONFIG_DRM_PANFROST is not set
# CONFIG_DRM_TIDSS is not set
-CONFIG_DRM_LEGACY=y
-# CONFIG_DRM_TDFX is not set
-# CONFIG_DRM_R128 is not set
-# CONFIG_DRM_MGA is not set
-# CONFIG_DRM_VIA is not set
-# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_LEGACY is not set
CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
#
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 7b6083018..dbae04231 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -5040,7 +5040,6 @@ CONFIG_DRM_TTM_DMA_PAGE_POOL=y
CONFIG_DRM_VRAM_HELPER=m
CONFIG_DRM_TTM_HELPER=m
CONFIG_DRM_GEM_SHMEM_HELPER=y
-CONFIG_DRM_VM=y
CONFIG_DRM_SCHED=m
#
@@ -5085,7 +5084,7 @@ CONFIG_DRM_AMD_DC_DCN=y
# CONFIG_HSA_AMD is not set
CONFIG_DRM_NOUVEAU=m
-CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT=y
+# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
CONFIG_NOUVEAU_DEBUG=5
CONFIG_NOUVEAU_DEBUG_DEFAULT=3
# CONFIG_NOUVEAU_DEBUG_MMU is not set
@@ -5226,12 +5225,7 @@ CONFIG_DRM_CIRRUS_QEMU=m
# CONFIG_TINYDRM_ST7735R is not set
# CONFIG_DRM_XEN is not set
# CONFIG_DRM_VBOXVIDEO is not set
-CONFIG_DRM_LEGACY=y
-# CONFIG_DRM_TDFX is not set
-# CONFIG_DRM_R128 is not set
-# CONFIG_DRM_MGA is not set
-# CONFIG_DRM_VIA is not set
-# CONFIG_DRM_SAVAGE is not set
+# CONFIG_DRM_LEGACY is not set
CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
#
--
2.27.0
1
0
From: Keefe LIU <liuqifa(a)huawei.com>
hulk inclusion
category: feature
bugzilla: 9511, https://gitee.com/openeuler/kernel/issues/I4IHL1
CVE: NA
-------------------------------------------------
In a typical IPvlan L2 setup where master is in default-ns and
each slave is into different (slave) ns. In this setup, if master
and slaves in different net, egress packet processing for traffic
originating from slave-ns can't be forwarded to master or other
machine whose ip in the same net with master, and they can't be
forwarded to other interface in default-ns.
This patch introuce a new mode l2e for ipvlan to realize above
goals, and it won't affect the original l2, l3, l3s mode.
As the ip tool doesn't support l2e mode, We use module param
"ipvlan_default_mode" to set the default work mode. 0 for l2
mode, 1 for l3, 2 for l2e, 3 for l3s, others invalid now.
Attention, when we create ipvlan devices by "ip" commond, if we
assign the mode, ipvlan will work in the mode we assigned other
then the "ipvlan_default_mode".
Signed-off-by: Keefe LIU <liuqifa(a)huawei.com>
Reviewed-by: Wei Yongjun <weiyongjun1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Modified:
when insmod the ipvlan module, the origin version(4.19) is 0
for l2, 1 for l3, 2 for l2e, 3 for l3s mode, but in 5.10 version
is 0 for l2, 1 for l3, 2 for l3s, 3 for l2e mode.
Signed-off-by: Lu Wei <luwei32(a)huawei.com>
Reviewed-by: Yue Haibing <yuehaibing(a)huawei.com>
Reviewed-by: Wei Yongjun <weiyongjun1(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
---
drivers/net/ipvlan/ipvlan_core.c | 190 +++++++++++++++++++++++++++++++
drivers/net/ipvlan/ipvlan_main.c | 6 +-
include/uapi/linux/if_link.h | 1 +
3 files changed, 196 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8801d093135c..5b695ec5c650 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -532,6 +532,122 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
return ret;
}
+static int ipvlan_process_v4_forward(struct sk_buff *skb)
+{
+ const struct iphdr *ip4h = ip_hdr(skb);
+ struct net_device *dev = skb->dev;
+ struct net *net = dev_net(dev);
+ struct rtable *rt;
+ int err, ret = NET_XMIT_DROP;
+ struct flowi4 fl4 = {
+ .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_flags = FLOWI_FLAG_ANYSRC,
+ .flowi4_mark = skb->mark,
+ .daddr = ip4h->daddr,
+ .saddr = ip4h->saddr,
+ };
+
+ rt = ip_route_output_flow(net, &fl4, NULL);
+ if (IS_ERR(rt))
+ goto err;
+
+ if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ ip_rt_put(rt);
+ goto err;
+ }
+ skb_dst_set(skb, &rt->dst);
+ err = ip_local_out(net, skb->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+out:
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int ipvlan_process_v6_forward(struct sk_buff *skb)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct net_device *dev = skb->dev;
+ struct net *net = dev_net(dev);
+ struct dst_entry *dst;
+ int err, ret = NET_XMIT_DROP;
+ struct flowi6 fl6 = {
+ .daddr = ip6h->daddr,
+ .saddr = ip6h->saddr,
+ .flowi6_flags = FLOWI_FLAG_ANYSRC,
+ .flowlabel = ip6_flowinfo(ip6h),
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst->error) {
+ ret = dst->error;
+ dst_release(dst);
+ goto err;
+ }
+ skb_dst_set(skb, dst);
+ err = ip6_local_out(net, skb->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out;
+err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+out:
+ return ret;
+}
+#else
+static int ipvlan_process_v6_forward(struct sk_buff *skb)
+{
+ return NET_XMIT_DROP;
+}
+#endif
+
+static int ipvlan_process_forward(struct sk_buff *skb)
+{
+ struct ethhdr *ethh = eth_hdr(skb);
+ int ret = NET_XMIT_DROP;
+
+ /* In this mode we dont care about multicast and broadcast traffic */
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
+ ntohs(skb->protocol));
+ kfree_skb(skb);
+ goto out;
+ }
+
+ /* The ipvlan is a pseudo-L2 device, so the packets that we receive
+ * will have L2; which need to discarded and processed further
+ * in the net-ns of the main-device.
+ */
+ if (skb_mac_header_was_set(skb)) {
+ skb_pull(skb, sizeof(*ethh));
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+ skb_reset_network_header(skb);
+ }
+
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ ret = ipvlan_process_v6_forward(skb);
+ } else if (skb->protocol == htons(ETH_P_IP)) {
+ ret = ipvlan_process_v4_forward(skb);
+ } else {
+ pr_warn_ratelimited("Dropped outbound packet type=%x\n",
+ ntohs(skb->protocol));
+ kfree_skb(skb);
+ }
+out:
+ return ret;
+}
+
static void ipvlan_multicast_enqueue(struct ipvl_port *port,
struct sk_buff *skb, bool tx_pkt)
{
@@ -629,6 +745,46 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_queue_xmit(skb);
}
+static int ipvlan_xmit_mode_l2e(struct sk_buff *skb, struct net_device *dev)
+{
+ const struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct ethhdr *eth = eth_hdr(skb);
+ struct ipvl_addr *addr;
+ void *lyr3h;
+ int addr_type;
+
+ if (!ipvlan_is_vepa(ipvlan->port) &&
+ ether_addr_equal(eth->h_dest, eth->h_source)) {
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
+ if (lyr3h) {
+ addr = ipvlan_addr_lookup(ipvlan->port, lyr3h,
+ addr_type, true);
+ if (addr) {
+ if (ipvlan_is_private(ipvlan->port)) {
+ consume_skb(skb);
+ return NET_XMIT_DROP;
+ }
+ return ipvlan_rcv_frame(addr, &skb, true);
+ }
+ }
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_XMIT_DROP;
+
+ /* maybe the packet need been forward */
+ skb->dev = ipvlan->phy_dev;
+ ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
+ return ipvlan_process_forward(skb);
+ } else if (is_multicast_ether_addr(eth->h_dest)) {
+ ipvlan_skb_crossing_ns(skb, NULL);
+ ipvlan_multicast_enqueue(ipvlan->port, skb, true);
+ return NET_XMIT_SUCCESS;
+ }
+
+ skb->dev = ipvlan->phy_dev;
+ return dev_queue_xmit(skb);
+}
+
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -648,6 +804,8 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
case IPVLAN_MODE_L3S:
#endif
return ipvlan_xmit_mode_l3(skb, dev);
+ case IPVLAN_MODE_L2E:
+ return ipvlan_xmit_mode_l2e(skb, dev);
}
/* Should not reach here */
@@ -729,6 +887,36 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
return ret;
}
+static rx_handler_result_t ipvlan_handle_mode_l2e(struct sk_buff **pskb,
+ struct ipvl_port *port)
+{
+ struct sk_buff *skb = *pskb;
+ struct ethhdr *eth = eth_hdr(skb);
+ rx_handler_result_t ret = RX_HANDLER_PASS;
+
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ if (ipvlan_external_frame(skb, port)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ /* External frames are queued for device local
+ * distribution, but a copy is given to master
+ * straight away to avoid sending duplicates later
+ * when work-queue processes this frame. This is
+ * achieved by returning RX_HANDLER_PASS.
+ */
+ if (nskb) {
+ ipvlan_skb_crossing_ns(nskb, NULL);
+ ipvlan_multicast_enqueue(port, nskb, false);
+ }
+ }
+ } else {
+ /* Perform like l3 mode for non-multicast packet */
+ ret = ipvlan_handle_mode_l3(pskb, port);
+ }
+
+ return ret;
+}
+
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
@@ -746,6 +934,8 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
case IPVLAN_MODE_L3S:
return RX_HANDLER_PASS;
#endif
+ case IPVLAN_MODE_L2E:
+ return ipvlan_handle_mode_l2e(pskb, port);
}
/* Should not reach here */
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 60b7d93bb834..efd1452cd929 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -4,6 +4,10 @@
#include "ipvlan.h"
+static int ipvlan_default_mode = IPVLAN_MODE_L3;
+module_param(ipvlan_default_mode, int, 0400);
+MODULE_PARM_DESC(ipvlan_default_mode, "set ipvlan default mode: 0 for l2, 1 for l3, 2 for l3s, 3 for l2e, others invalid now");
+
static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
struct netlink_ext_ack *extack)
{
@@ -535,7 +539,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct ipvl_port *port;
struct net_device *phy_dev;
int err;
- u16 mode = IPVLAN_MODE_L3;
+ u16 mode = ipvlan_default_mode;
if (!tb[IFLA_LINK])
return -EINVAL;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index c4b23f06f69e..50d4705e1cbc 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -690,6 +690,7 @@ enum ipvlan_mode {
IPVLAN_MODE_L2 = 0,
IPVLAN_MODE_L3,
IPVLAN_MODE_L3S,
+ IPVLAN_MODE_L2E,
IPVLAN_MODE_MAX
};
--
2.20.1
1
128

[PATCH OLK-5.10 v2] arm64: Revert feature: Add memmap parameter and register pmem
by Zhuling 27 Dec '21
by Zhuling 27 Dec '21
27 Dec '21
euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4O31I?from=project-issue
CVE: NA
The reserve memory of PMEM conflicts with
"add memmap interface to reserved memory for mremap syscall usage",
need to rollback, resubmit after adaptation.
Feature related commit:
1.PMEM function commit:94dc364f5eda10f49449ba573dc3322e1ea92280
2.PMEM feature config commit: 36d7a831e15ceb84e937122c87d01c14242dc377
Signed-off-by: Zhuling <zhuling8(a)huawei.com>
---
arch/arm64/Kconfig | 21 --------
arch/arm64/configs/openeuler_defconfig | 3 --
arch/arm64/kernel/Makefile | 1 -
arch/arm64/kernel/pmem.c | 35 -------------
arch/arm64/kernel/setup.c | 10 ----
arch/arm64/mm/init.c | 94 ----------------------------------
drivers/nvdimm/Kconfig | 5 --
drivers/nvdimm/Makefile | 1 -
8 files changed, 170 deletions(-)
delete mode 100644 arch/arm64/kernel/pmem.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index df90a6e..2df4b31 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1321,27 +1321,6 @@ config RODATA_FULL_DEFAULT_ENABLED
This requires the linear region to be mapped down to pages,
which may adversely affect performance in some cases.
-config ARM64_PMEM_RESERVE
- bool "Reserve memory for persistent storage"
- default n
- help
- Use memmap=nn[KMG]!ss[KMG](memmap=100K!0x1a0000000) reserve
- memory for persistent storage.
-
- Say y here to enable this feature.
-
-config ARM64_PMEM_LEGACY_DEVICE
- bool "Create persistent storage"
- depends on BLK_DEV
- depends on LIBNVDIMM
- select ARM64_PMEM_RESERVE
- help
- Use reserved memory for persistent storage when the kernel
- restart or update. the data in PMEM will not be lost and
- can be loaded faster.
-
- Say y if unsure.
-
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 17bc875..b5fc851 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -416,8 +416,6 @@ CONFIG_ARM64_CPU_PARK=y
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_UNMAP_KERNEL_AT_EL0=y
CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
-CONFIG_ARM64_PMEM_RESERVE=y
-CONFIG_ARM64_PMEM_LEGACY_DEVICE=y
# CONFIG_ARM64_SW_TTBR0_PAN is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
CONFIG_ARM64_ILP32=y
@@ -6026,7 +6024,6 @@ CONFIG_ND_BTT=m
CONFIG_BTT=y
CONFIG_OF_PMEM=m
CONFIG_NVDIMM_KEYS=y
-CONFIG_PMEM_LEGACY=m
CONFIG_DAX_DRIVER=y
CONFIG_DAX=y
CONFIG_DEV_DAX=m
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index f615325..169d90f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -68,7 +68,6 @@ obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_ARM64_MTE) += mte.o
obj-$(CONFIG_MPAM) += mpam/
-obj-$(CONFIG_ARM64_PMEM_LEGACY_DEVICE) += pmem.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
diff --git a/arch/arm64/kernel/pmem.c b/arch/arm64/kernel/pmem.c
deleted file mode 100644
index 16eaf70..0000000
--- a/arch/arm64/kernel/pmem.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright(c) 2021 Huawei Technologies Co., Ltd
- *
- * Derived from x86 and arm64 implement PMEM.
- */
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-
-static int found(struct resource *res, void *data)
-{
- return 1;
-}
-
-static int __init register_e820_pmem(void)
-{
- struct platform_device *pdev;
- int rc;
-
- rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
- IORESOURCE_MEM, 0, -1, NULL, found);
- if (rc <= 0)
- return 0;
-
- /*
- * See drivers/nvdimm/e820.c for the implementation, this is
- * simply here to trigger the module to load on demand.
- */
- pdev = platform_device_alloc("e820_pmem", -1);
-
- return platform_device_add(pdev);
-}
-device_initcall(register_e820_pmem);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 88ec495..7cd0425 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -70,10 +70,6 @@ static int __init arm64_enable_cpu0_hotplug(char *str)
__setup("arm64_cpu0_hotplug", arm64_enable_cpu0_hotplug);
#endif
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-extern struct resource pmem_res;
-#endif
-
phys_addr_t __fdt_pointer __initdata;
/*
@@ -288,12 +284,6 @@ static void __init request_standard_resources(void)
request_resource(res, &pin_memory_resource);
#endif
}
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
- if (pmem_res.end && pmem_res.start)
- request_resource(&iomem_resource, &pmem_res);
-#endif
-
}
static int __init reserve_memblock_reserved_regions(void)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3b9401e..e8d4461 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -55,7 +55,6 @@
*/
s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
-phys_addr_t start_at, mem_size;
#ifdef CONFIG_PIN_MEMORY
struct resource pin_memory_resource = {
@@ -112,18 +111,6 @@ static void __init reserve_pin_memory_res(void)
*/
phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static unsigned long long pmem_size, pmem_start;
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-struct resource pmem_res = {
- .name = "Persistent Memory (legacy)",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_MEM,
- .desc = IORES_DESC_PERSISTENT_MEMORY_LEGACY
-};
-#endif
-
#ifndef CONFIG_KEXEC_CORE
static void __init reserve_crashkernel(void)
{
@@ -417,83 +404,6 @@ static int __init reserve_park_mem(void)
}
#endif
-static bool __init is_mem_valid(unsigned long long mem_size, unsigned long long mem_start)
-{
- if (!memblock_is_region_memory(mem_start, mem_size)) {
- pr_warn("cannot reserve mem: region is not memory!\n");
- return false;
- }
-
- if (memblock_is_region_reserved(mem_start, mem_size)) {
- pr_warn("cannot reserve mem: region overlaps reserved memory!\n");
- return false;
- }
-
- if (!IS_ALIGNED(mem_start, SZ_2M)) {
- pr_warn("cannot reserve mem: base address is not 2MB aligned!\n");
- return false;
- }
-
- return true;
-}
-
-static int __init parse_memmap_one(char *p)
-{
- char *oldp;
-
- if (!p)
- return -EINVAL;
-
- oldp = p;
- mem_size = memparse(p, &p);
- if (p == oldp)
- return -EINVAL;
-
- if (!mem_size)
- return -EINVAL;
-
- mem_size = PAGE_ALIGN(mem_size);
-
- if (*p == '!') {
- start_at = memparse(p+1, &p);
-
- pmem_start = start_at;
- pmem_size = mem_size;
- } else
- pr_info("Unrecognized memmap option, please check the parameter.\n");
-
- return *p == '\0' ? 0 : -EINVAL;
-}
-
-static int __init parse_memmap_opt(char *str)
-{
- while (str) {
- char *k = strchr(str, ',');
-
- if (k)
- *k++ = 0;
- parse_memmap_one(str);
- str = k;
- }
-
- return 0;
-}
-early_param("memmap", parse_memmap_opt);
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-static void __init reserve_pmem(void)
-{
- if (!is_mem_valid(mem_size, start_at))
- return;
-
- memblock_remove(pmem_start, pmem_size);
- pr_info("pmem reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
- pmem_start, pmem_start + pmem_size, pmem_size >> 20);
- pmem_res.start = pmem_start;
- pmem_res.end = pmem_start + pmem_size - 1;
-}
-#endif
-
void __init arm64_memblock_init(void)
{
const s64 linear_region_size = BIT(vabits_actual - 1);
@@ -668,10 +578,6 @@ void __init bootmem_init(void)
reserve_quick_kexec();
#endif
-#ifdef CONFIG_ARM64_PMEM_RESERVE
- reserve_pmem();
-#endif
-
reserve_pin_memory_res();
memblock_dump_all();
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index ce4de75..b7d1eb3 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -132,8 +132,3 @@ config NVDIMM_TEST_BUILD
infrastructure.
endif
-
-config PMEM_LEGACY
- tristate "Pmem_legacy"
- select X86_PMEM_LEGACY if X86
- select ARM64_PMEM_LEGACY_DEVICE if ARM64
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 6f8dc92..0407753 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -3,7 +3,6 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
-obj-$(CONFIG_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_OF_PMEM) += of_pmem.o
obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
--
2.9.5
2
1

[PATCH openEuler-1.0-LTS 01/36] net: Prevent infinite while loop in skb_tx_hash()
by Yang Yingliang 27 Dec '21
by Yang Yingliang 27 Dec '21
27 Dec '21
From: Michael Chan <michael.chan(a)broadcom.com>
stable inclusion
from linux-4.19.215
commit 02302cbd52264337630a32848ac03648648e9685
--------------------------------
commit 0c57eeecc559ca6bc18b8c4e2808bc78dbe769b0 upstream.
Drivers call netdev_set_num_tc() and then netdev_set_tc_queue()
to set the queue count and offset for each TC. So the queue count
and offset for the TCs may be zero for a short period after dev->num_tc
has been set. If a TX packet is being transmitted at this time in the
code path netdev_pick_tx() -> skb_tx_hash(), skb_tx_hash() may see
nonzero dev->num_tc but zero qcount for the TC. The while loop that
keeps looping while hash >= qcount will not end.
Fix it by checking the TC's qcount to be nonzero before using it.
Fixes: eadec877ce9c ("net: Add support for subordinate traffic classes to netdev_pick_tx")
Reviewed-by: Andy Gospodarek <gospo(a)broadcom.com>
Signed-off-by: Michael Chan <michael.chan(a)broadcom.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
net/core/dev.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/net/core/dev.c b/net/core/dev.c
index 5d9800804d4a4..6ed7810ed7bd4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2846,6 +2846,12 @@ static u16 skb_tx_hash(const struct net_device *dev,
qoffset = sb_dev->tc_to_txq[tc].offset;
qcount = sb_dev->tc_to_txq[tc].count;
+ if (unlikely(!qcount)) {
+ net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
+ sb_dev->name, qoffset, tc);
+ qoffset = 0;
+ qcount = dev->real_num_tx_queues;
+ }
}
if (skb_rx_queue_recorded(skb)) {
--
2.25.1
1
35

[[PATCH OLK-5.10] Revert feature — arm64: Add memmap parameter and register pmem] revert feature — [arm64: Add memmap parameter and register pmem]
by Zhuling 27 Dec '21
by Zhuling 27 Dec '21
27 Dec '21
euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4O31I?from=project-issue
CVE: NA
The reserve memory of PMEM conflicts with
"add memmap interface to reserved memory for mremap syscall usage",
need to rollback, resubmit after adaptation.
Feature related commit:
1.PMEM function commit:94dc364f5eda10f49449ba573dc3322e1ea92280
2.PMEM feature config commit: 36d7a831e15ceb84e937122c87d01c14242dc377
Signed-off-by: Zhuling <zhuling8(a)huawei.com>
---
arch/arm64/Kconfig | 21 --------
arch/arm64/configs/openeuler_defconfig | 3 --
arch/arm64/kernel/Makefile | 1 -
arch/arm64/kernel/pmem.c | 35 -------------
arch/arm64/kernel/setup.c | 10 ----
arch/arm64/mm/init.c | 94 ----------------------------------
drivers/nvdimm/Kconfig | 5 --
drivers/nvdimm/Makefile | 1 -
8 files changed, 170 deletions(-)
delete mode 100644 arch/arm64/kernel/pmem.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index df90a6e..2df4b31 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1321,27 +1321,6 @@ config RODATA_FULL_DEFAULT_ENABLED
This requires the linear region to be mapped down to pages,
which may adversely affect performance in some cases.
-config ARM64_PMEM_RESERVE
- bool "Reserve memory for persistent storage"
- default n
- help
- Use memmap=nn[KMG]!ss[KMG](memmap=100K!0x1a0000000) reserve
- memory for persistent storage.
-
- Say y here to enable this feature.
-
-config ARM64_PMEM_LEGACY_DEVICE
- bool "Create persistent storage"
- depends on BLK_DEV
- depends on LIBNVDIMM
- select ARM64_PMEM_RESERVE
- help
- Use reserved memory for persistent storage when the kernel
- restart or update. the data in PMEM will not be lost and
- can be loaded faster.
-
- Say y if unsure.
-
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 17bc875..b5fc851 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -416,8 +416,6 @@ CONFIG_ARM64_CPU_PARK=y
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_UNMAP_KERNEL_AT_EL0=y
CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
-CONFIG_ARM64_PMEM_RESERVE=y
-CONFIG_ARM64_PMEM_LEGACY_DEVICE=y
# CONFIG_ARM64_SW_TTBR0_PAN is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
CONFIG_ARM64_ILP32=y
@@ -6026,7 +6024,6 @@ CONFIG_ND_BTT=m
CONFIG_BTT=y
CONFIG_OF_PMEM=m
CONFIG_NVDIMM_KEYS=y
-CONFIG_PMEM_LEGACY=m
CONFIG_DAX_DRIVER=y
CONFIG_DAX=y
CONFIG_DEV_DAX=m
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index f615325..169d90f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -68,7 +68,6 @@ obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_ARM64_MTE) += mte.o
obj-$(CONFIG_MPAM) += mpam/
-obj-$(CONFIG_ARM64_PMEM_LEGACY_DEVICE) += pmem.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
diff --git a/arch/arm64/kernel/pmem.c b/arch/arm64/kernel/pmem.c
deleted file mode 100644
index 16eaf70..0000000
--- a/arch/arm64/kernel/pmem.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright(c) 2021 Huawei Technologies Co., Ltd
- *
- * Derived from x86 and arm64 implement PMEM.
- */
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-
-static int found(struct resource *res, void *data)
-{
- return 1;
-}
-
-static int __init register_e820_pmem(void)
-{
- struct platform_device *pdev;
- int rc;
-
- rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
- IORESOURCE_MEM, 0, -1, NULL, found);
- if (rc <= 0)
- return 0;
-
- /*
- * See drivers/nvdimm/e820.c for the implementation, this is
- * simply here to trigger the module to load on demand.
- */
- pdev = platform_device_alloc("e820_pmem", -1);
-
- return platform_device_add(pdev);
-}
-device_initcall(register_e820_pmem);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 88ec495..7cd0425 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -70,10 +70,6 @@ static int __init arm64_enable_cpu0_hotplug(char *str)
__setup("arm64_cpu0_hotplug", arm64_enable_cpu0_hotplug);
#endif
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-extern struct resource pmem_res;
-#endif
-
phys_addr_t __fdt_pointer __initdata;
/*
@@ -288,12 +284,6 @@ static void __init request_standard_resources(void)
request_resource(res, &pin_memory_resource);
#endif
}
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
- if (pmem_res.end && pmem_res.start)
- request_resource(&iomem_resource, &pmem_res);
-#endif
-
}
static int __init reserve_memblock_reserved_regions(void)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3b9401e..e8d4461 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -55,7 +55,6 @@
*/
s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
-phys_addr_t start_at, mem_size;
#ifdef CONFIG_PIN_MEMORY
struct resource pin_memory_resource = {
@@ -112,18 +111,6 @@ static void __init reserve_pin_memory_res(void)
*/
phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static unsigned long long pmem_size, pmem_start;
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-struct resource pmem_res = {
- .name = "Persistent Memory (legacy)",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_MEM,
- .desc = IORES_DESC_PERSISTENT_MEMORY_LEGACY
-};
-#endif
-
#ifndef CONFIG_KEXEC_CORE
static void __init reserve_crashkernel(void)
{
@@ -417,83 +404,6 @@ static int __init reserve_park_mem(void)
}
#endif
-static bool __init is_mem_valid(unsigned long long mem_size, unsigned long long mem_start)
-{
- if (!memblock_is_region_memory(mem_start, mem_size)) {
- pr_warn("cannot reserve mem: region is not memory!\n");
- return false;
- }
-
- if (memblock_is_region_reserved(mem_start, mem_size)) {
- pr_warn("cannot reserve mem: region overlaps reserved memory!\n");
- return false;
- }
-
- if (!IS_ALIGNED(mem_start, SZ_2M)) {
- pr_warn("cannot reserve mem: base address is not 2MB aligned!\n");
- return false;
- }
-
- return true;
-}
-
-static int __init parse_memmap_one(char *p)
-{
- char *oldp;
-
- if (!p)
- return -EINVAL;
-
- oldp = p;
- mem_size = memparse(p, &p);
- if (p == oldp)
- return -EINVAL;
-
- if (!mem_size)
- return -EINVAL;
-
- mem_size = PAGE_ALIGN(mem_size);
-
- if (*p == '!') {
- start_at = memparse(p+1, &p);
-
- pmem_start = start_at;
- pmem_size = mem_size;
- } else
- pr_info("Unrecognized memmap option, please check the parameter.\n");
-
- return *p == '\0' ? 0 : -EINVAL;
-}
-
-static int __init parse_memmap_opt(char *str)
-{
- while (str) {
- char *k = strchr(str, ',');
-
- if (k)
- *k++ = 0;
- parse_memmap_one(str);
- str = k;
- }
-
- return 0;
-}
-early_param("memmap", parse_memmap_opt);
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-static void __init reserve_pmem(void)
-{
- if (!is_mem_valid(mem_size, start_at))
- return;
-
- memblock_remove(pmem_start, pmem_size);
- pr_info("pmem reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
- pmem_start, pmem_start + pmem_size, pmem_size >> 20);
- pmem_res.start = pmem_start;
- pmem_res.end = pmem_start + pmem_size - 1;
-}
-#endif
-
void __init arm64_memblock_init(void)
{
const s64 linear_region_size = BIT(vabits_actual - 1);
@@ -668,10 +578,6 @@ void __init bootmem_init(void)
reserve_quick_kexec();
#endif
-#ifdef CONFIG_ARM64_PMEM_RESERVE
- reserve_pmem();
-#endif
-
reserve_pin_memory_res();
memblock_dump_all();
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index ce4de75..b7d1eb3 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -132,8 +132,3 @@ config NVDIMM_TEST_BUILD
infrastructure.
endif
-
-config PMEM_LEGACY
- tristate "Pmem_legacy"
- select X86_PMEM_LEGACY if X86
- select ARM64_PMEM_LEGACY_DEVICE if ARM64
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 6f8dc92..0407753 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -3,7 +3,6 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
-obj-$(CONFIG_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_OF_PMEM) += of_pmem.o
obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
--
2.9.5
4
4

[PATCH openEuler-1.0-LTS] watchdog: Fix check_preemption_disabled() error
by Yang Yingliang 27 Dec '21
by Yang Yingliang 27 Dec '21
27 Dec '21
From: Wei Li <liwei391(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 173968, https://gitee.com/openeuler/kernel/issues/I3J87Y
CVE: NA
-------------------------------------------------
When enabling CONFIG_DEBUG_PREEMPT and CONFIG_PREEMPT, it triggers a 'BUG'
in the pmu based nmi_watchdog initializaion:
[ 3.341853] BUG: using smp_processor_id() in preemptible [00000000] code: swapper/0/1
[ 3.344392] caller is debug_smp_processor_id+0x17/0x20
[ 3.344395] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.10.0+ #398
[ 3.344397] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.10.2-0-g5f4c7b1-prebuilt.qemu-project.org 04/01/2014
[ 3.344399] Call Trace:
[ 3.344410] dump_stack+0x60/0x76
[ 3.344412] check_preemption_disabled+0xba/0xc0
[ 3.344415] debug_smp_processor_id+0x17/0x20
[ 3.344422] hardlockup_detector_event_create+0xf/0x60
[ 3.344427] hardlockup_detector_perf_init+0xf/0x41
[ 3.344430] watchdog_nmi_probe+0xe/0x10
[ 3.344432] lockup_detector_init+0x22/0x5b
[ 3.344437] kernel_init_freeable+0x20c/0x245
[ 3.344439] ? rest_init+0xd0/0xd0
[ 3.344441] kernel_init+0xe/0x110
[ 3.344446] ret_from_fork+0x22/0x30
This issue was introduced by commit 141482cb4b01, which move down
lockup_detector_init() after do_basic_setup(), after sched_init_smp() too.
hardlockup_detector_event_create
|- hardlockup_detector_perf_init (unsafe)
|- watchdog_nmi_probe
|- lockup_detector_init
|- hardlockup_detector_perf_enable
|- watchdog_nmi_enable
|- watchdog_enable
|- lockup_detector_online_cpu
|- softlockup_start_fn
|- softlockup_start_all
|- lockup_detector_reconfigure
|- lockup_detector_setup
|- lockup_detector_init
After analysing the calling context, it's only unsafe to use
smp_processor_id() in hardlockup_detector_perf_init() as the thread
'kernel_init' is preemptible after sched_init_smp().
While it is just a test if we can enable the pmu based nmi_watchdog, the
real enabling process is in softlockup_start_fn() later which ensures
that watchdog_enable() is called on all cores. So it's free to disable
preempt to fix this 'BUG'.
Fixes: 141482cb4b01 ("lockup_detector: init lockup detector after all the init_calls")
Signed-off-by: Wei Li <liwei391(a)huawei.com>
Reviewed-by: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
kernel/watchdog_hld.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 43832b1023693..a5aff8ffa48ce 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -508,14 +508,17 @@ void __init hardlockup_detector_perf_restart(void)
*/
int __init hardlockup_detector_perf_init(void)
{
- int ret = hardlockup_detector_event_create();
+ int ret;
+ preempt_disable();
+ ret = hardlockup_detector_event_create();
if (ret) {
pr_info("Perf NMI watchdog permanently disabled\n");
} else {
perf_event_release_kernel(this_cpu_read(watchdog_ev));
this_cpu_write(watchdog_ev, NULL);
}
+ preempt_enable();
return ret;
}
#endif /* CONFIG_HARDLOCKUP_DETECTOR_PERF */
--
2.25.1
1
0

[PATCH openEuler-1.0-LTS] btrfs: unlock newly allocated extent buffer after error
by Yang Yingliang 27 Dec '21
by Yang Yingliang 27 Dec '21
27 Dec '21
From: Qu Wenruo <wqu(a)suse.com>
mainline inclusion
from mainline-v5.15-rc6
commit 19ea40dddf1833db868533958ca066f368862211
category: bugfix
bugzilla: NA
CVE: CVE-2021-4149
--------------------------------
[BUG]
There is a bug report that injected ENOMEM error could leave a tree
block locked while we return to user-space:
BTRFS info (device loop0): enabling ssd optimizations
FAULT_INJECTION: forcing a failure.
name failslab, interval 1, probability 0, space 0, times 0
CPU: 0 PID: 7579 Comm: syz-executor Not tainted 5.15.0-rc1 #16
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:88 [inline]
dump_stack_lvl+0x8d/0xcf lib/dump_stack.c:106
fail_dump lib/fault-inject.c:52 [inline]
should_fail+0x13c/0x160 lib/fault-inject.c:146
should_failslab+0x5/0x10 mm/slab_common.c:1328
slab_pre_alloc_hook.constprop.99+0x4e/0xc0 mm/slab.h:494
slab_alloc_node mm/slub.c:3120 [inline]
slab_alloc mm/slub.c:3214 [inline]
kmem_cache_alloc+0x44/0x280 mm/slub.c:3219
btrfs_alloc_delayed_extent_op fs/btrfs/delayed-ref.h:299 [inline]
btrfs_alloc_tree_block+0x38c/0x670 fs/btrfs/extent-tree.c:4833
__btrfs_cow_block+0x16f/0x7d0 fs/btrfs/ctree.c:415
btrfs_cow_block+0x12a/0x300 fs/btrfs/ctree.c:570
btrfs_search_slot+0x6b0/0xee0 fs/btrfs/ctree.c:1768
btrfs_insert_empty_items+0x80/0xf0 fs/btrfs/ctree.c:3905
btrfs_new_inode+0x311/0xa60 fs/btrfs/inode.c:6530
btrfs_create+0x12b/0x270 fs/btrfs/inode.c:6783
lookup_open+0x660/0x780 fs/namei.c:3282
open_last_lookups fs/namei.c:3352 [inline]
path_openat+0x465/0xe20 fs/namei.c:3557
do_filp_open+0xe3/0x170 fs/namei.c:3588
do_sys_openat2+0x357/0x4a0 fs/open.c:1200
do_sys_open+0x87/0xd0 fs/open.c:1216
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x34/0xb0 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x46ae99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48
89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d
01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f46711b9c48 EFLAGS: 00000246 ORIG_RAX: 0000000000000055
RAX: ffffffffffffffda RBX: 000000000078c0a0 RCX: 000000000046ae99
RDX: 0000000000000000 RSI: 00000000000000a1 RDI: 0000000020005800
RBP: 00007f46711b9c80 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000017
R13: 0000000000000000 R14: 000000000078c0a0 R15: 00007ffc129da6e0
================================================
WARNING: lock held when returning to user space!
5.15.0-rc1 #16 Not tainted
------------------------------------------------
syz-executor/7579 is leaving the kernel with locks still held!
1 lock held by syz-executor/7579:
#0: ffff888104b73da8 (btrfs-tree-01/1){+.+.}-{3:3}, at:
__btrfs_tree_lock+0x2e/0x1a0 fs/btrfs/locking.c:112
[CAUSE]
In btrfs_alloc_tree_block(), after btrfs_init_new_buffer(), the new
extent buffer @buf is locked, but if later operations like adding
delayed tree ref fail, we just free @buf without unlocking it,
resulting above warning.
[FIX]
Unlock @buf in out_free_buf: label.
Reported-by: Hao Sun <sunhao.th(a)gmail.com>
Link: https://lore.kernel.org/linux-btrfs/CACkBjsZ9O6Zr0KK1yGn=1rQi6Crh1yeCRdTSBx…
CC: stable(a)vger.kernel.org # 5.4+
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/btrfs/extent-tree.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4bf93184362ec..dcbb76b62a0b0 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -8324,6 +8324,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
out_free_delayed:
btrfs_free_delayed_extent_op(extent_op);
out_free_buf:
+ btrfs_tree_unlock(buf);
free_extent_buffer(buf);
out_free_reserved:
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
--
2.25.1
1
0

[PATCH openEuler-1.0-LTS] ext4: Fix null-ptr-deref in '__ext4_journal_ensure_credits'
by Yang Yingliang 25 Dec '21
by Yang Yingliang 25 Dec '21
25 Dec '21
From: Ye Bin <yebin10(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 185945, https://gitee.com/openeuler/kernel/issues/I4O33F
CVE: NA
-----------------------------------------------
We got issue as follows when run syzkaller test:
[ 1901.130043] EXT4-fs error (device vda): ext4_remount:5624: comm syz-executor.5: Abort forced by user
[ 1901.130901] Aborting journal on device vda-8.
[ 1901.131437] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.16: Detected aborted journal
[ 1901.131566] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.11: Detected aborted journal
[ 1901.132586] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.18: Detected aborted journal
[ 1901.132751] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.9: Detected aborted journal
[ 1901.136149] EXT4-fs error (device vda) in ext4_reserve_inode_write:6035: Journal has aborted
[ 1901.136837] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-fuzzer: Detected aborted journal
[ 1901.136915] ==================================================================
[ 1901.138175] BUG: KASAN: null-ptr-deref in __ext4_journal_ensure_credits+0x74/0x140 [ext4]
[ 1901.138343] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.13: Detected aborted journal
[ 1901.138398] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.1: Detected aborted journal
[ 1901.138808] Read of size 8 at addr 0000000000000000 by task syz-executor.17/968
[ 1901.138817]
[ 1901.138852] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.30: Detected aborted journal
[ 1901.144779] CPU: 1 PID: 968 Comm: syz-executor.17 Not tainted 4.19.90-vhulk2111.1.0.h893.eulerosv2r10.aarch64+ #1
[ 1901.146479] Hardware name: linux,dummy-virt (DT)
[ 1901.147317] Call trace:
[ 1901.147552] dump_backtrace+0x0/0x2d8
[ 1901.147898] show_stack+0x28/0x38
[ 1901.148215] dump_stack+0xec/0x15c
[ 1901.148746] kasan_report+0x108/0x338
[ 1901.149207] __asan_load8+0x58/0xb0
[ 1901.149753] __ext4_journal_ensure_credits+0x74/0x140 [ext4]
[ 1901.150579] ext4_xattr_delete_inode+0xe4/0x700 [ext4]
[ 1901.151316] ext4_evict_inode+0x524/0xba8 [ext4]
[ 1901.151985] evict+0x1a4/0x378
[ 1901.152353] iput+0x310/0x428
[ 1901.152733] do_unlinkat+0x260/0x428
[ 1901.153056] __arm64_sys_unlinkat+0x6c/0xc0
[ 1901.153455] el0_svc_common+0xc8/0x320
[ 1901.153799] el0_svc_handler+0xf8/0x160
[ 1901.154265] el0_svc+0x10/0x218
[ 1901.154682] ==================================================================
This issue may happens like this:
Process1 Process2
ext4_evict_inode
ext4_journal_start
ext4_truncate
ext4_ind_truncate
ext4_free_branches
ext4_ind_truncate_ensure_credits
ext4_journal_ensure_credits_fn
ext4_journal_restart
handle->h_transaction = NULL;
mount -o remount,abort /mnt
-> trigger JBD abort
start_this_handle -> will return failed
ext4_xattr_delete_inode
ext4_journal_ensure_credits
ext4_journal_ensure_credits_fn
__ext4_journal_ensure_credits
jbd2_handle_buffer_credits
journal = handle->h_transaction->t_journal; ->null-ptr-deref
Now, indirect truncate process didn't handle error. To solve this issue
maybe simply add check handle is abort in '__ext4_journal_ensure_credits'
is enough, and i also think this is necessary.
Signed-off-by: Ye Bin <yebin10(a)huawei.com>
Reviewed-by: Zhang Yi <yi.zhang(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/ext4/ext4_jbd2.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 022ab8afd9499..2c6dc99292fdc 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -140,6 +140,8 @@ int __ext4_journal_ensure_credits(handle_t *handle, int check_cred,
{
if (!ext4_handle_valid(handle))
return 0;
+ if (is_handle_aborted(handle))
+ return -EROFS;
if (jbd2_handle_buffer_credits(handle) >= check_cred &&
handle->h_revoke_credits >= revoke_cred)
return 0;
--
2.25.1
1
0

[[PATCH OLK-5.10] Revert feature — [arm64: Add memmap parameter and register pmem]] revert feature — [arm64: Add memmap parameter and register pmem]
by Zhuling 25 Dec '21
by Zhuling 25 Dec '21
25 Dec '21
euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4O31I?from=project-issue
CVE: NA
The reserve memory of PMEM conflicts with
"add memmap interface to reserved memory for mremap syscall usage",
need to rollback, resubmit after adaptation.
Feature related commit:
1.PMEM function commit:94dc364f5eda10f49449ba573dc3322e1ea92280
2.PMEM feature config commit: 36d7a831e15ceb84e937122c87d01c14242dc377
Signed-off-by: Zhuling <zhuling8(a)huawei.com>
---
arch/arm64/Kconfig | 21 --------
arch/arm64/configs/openeuler_defconfig | 3 --
arch/arm64/kernel/Makefile | 1 -
arch/arm64/kernel/pmem.c | 35 -------------
arch/arm64/kernel/setup.c | 10 ----
arch/arm64/mm/init.c | 94 ----------------------------------
drivers/nvdimm/Kconfig | 5 --
drivers/nvdimm/Makefile | 1 -
8 files changed, 170 deletions(-)
delete mode 100644 arch/arm64/kernel/pmem.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index df90a6e..2df4b31 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1321,27 +1321,6 @@ config RODATA_FULL_DEFAULT_ENABLED
This requires the linear region to be mapped down to pages,
which may adversely affect performance in some cases.
-config ARM64_PMEM_RESERVE
- bool "Reserve memory for persistent storage"
- default n
- help
- Use memmap=nn[KMG]!ss[KMG](memmap=100K!0x1a0000000) reserve
- memory for persistent storage.
-
- Say y here to enable this feature.
-
-config ARM64_PMEM_LEGACY_DEVICE
- bool "Create persistent storage"
- depends on BLK_DEV
- depends on LIBNVDIMM
- select ARM64_PMEM_RESERVE
- help
- Use reserved memory for persistent storage when the kernel
- restart or update. the data in PMEM will not be lost and
- can be loaded faster.
-
- Say y if unsure.
-
config ARM64_SW_TTBR0_PAN
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 17bc875..b5fc851 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -416,8 +416,6 @@ CONFIG_ARM64_CPU_PARK=y
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_UNMAP_KERNEL_AT_EL0=y
CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
-CONFIG_ARM64_PMEM_RESERVE=y
-CONFIG_ARM64_PMEM_LEGACY_DEVICE=y
# CONFIG_ARM64_SW_TTBR0_PAN is not set
CONFIG_ARM64_TAGGED_ADDR_ABI=y
CONFIG_ARM64_ILP32=y
@@ -6026,7 +6024,6 @@ CONFIG_ND_BTT=m
CONFIG_BTT=y
CONFIG_OF_PMEM=m
CONFIG_NVDIMM_KEYS=y
-CONFIG_PMEM_LEGACY=m
CONFIG_DAX_DRIVER=y
CONFIG_DAX=y
CONFIG_DEV_DAX=m
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index f615325..169d90f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -68,7 +68,6 @@ obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_ARM64_MTE) += mte.o
obj-$(CONFIG_MPAM) += mpam/
-obj-$(CONFIG_ARM64_PMEM_LEGACY_DEVICE) += pmem.o
obj-y += vdso/ probes/
obj-$(CONFIG_COMPAT_VDSO) += vdso32/
diff --git a/arch/arm64/kernel/pmem.c b/arch/arm64/kernel/pmem.c
deleted file mode 100644
index 16eaf70..0000000
--- a/arch/arm64/kernel/pmem.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright(c) 2021 Huawei Technologies Co., Ltd
- *
- * Derived from x86 and arm64 implement PMEM.
- */
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-
-static int found(struct resource *res, void *data)
-{
- return 1;
-}
-
-static int __init register_e820_pmem(void)
-{
- struct platform_device *pdev;
- int rc;
-
- rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
- IORESOURCE_MEM, 0, -1, NULL, found);
- if (rc <= 0)
- return 0;
-
- /*
- * See drivers/nvdimm/e820.c for the implementation, this is
- * simply here to trigger the module to load on demand.
- */
- pdev = platform_device_alloc("e820_pmem", -1);
-
- return platform_device_add(pdev);
-}
-device_initcall(register_e820_pmem);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 88ec495..7cd0425 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -70,10 +70,6 @@ static int __init arm64_enable_cpu0_hotplug(char *str)
__setup("arm64_cpu0_hotplug", arm64_enable_cpu0_hotplug);
#endif
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-extern struct resource pmem_res;
-#endif
-
phys_addr_t __fdt_pointer __initdata;
/*
@@ -288,12 +284,6 @@ static void __init request_standard_resources(void)
request_resource(res, &pin_memory_resource);
#endif
}
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
- if (pmem_res.end && pmem_res.start)
- request_resource(&iomem_resource, &pmem_res);
-#endif
-
}
static int __init reserve_memblock_reserved_regions(void)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3b9401e..e8d4461 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -55,7 +55,6 @@
*/
s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
-phys_addr_t start_at, mem_size;
#ifdef CONFIG_PIN_MEMORY
struct resource pin_memory_resource = {
@@ -112,18 +111,6 @@ static void __init reserve_pin_memory_res(void)
*/
phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static unsigned long long pmem_size, pmem_start;
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-struct resource pmem_res = {
- .name = "Persistent Memory (legacy)",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_MEM,
- .desc = IORES_DESC_PERSISTENT_MEMORY_LEGACY
-};
-#endif
-
#ifndef CONFIG_KEXEC_CORE
static void __init reserve_crashkernel(void)
{
@@ -417,83 +404,6 @@ static int __init reserve_park_mem(void)
}
#endif
-static bool __init is_mem_valid(unsigned long long mem_size, unsigned long long mem_start)
-{
- if (!memblock_is_region_memory(mem_start, mem_size)) {
- pr_warn("cannot reserve mem: region is not memory!\n");
- return false;
- }
-
- if (memblock_is_region_reserved(mem_start, mem_size)) {
- pr_warn("cannot reserve mem: region overlaps reserved memory!\n");
- return false;
- }
-
- if (!IS_ALIGNED(mem_start, SZ_2M)) {
- pr_warn("cannot reserve mem: base address is not 2MB aligned!\n");
- return false;
- }
-
- return true;
-}
-
-static int __init parse_memmap_one(char *p)
-{
- char *oldp;
-
- if (!p)
- return -EINVAL;
-
- oldp = p;
- mem_size = memparse(p, &p);
- if (p == oldp)
- return -EINVAL;
-
- if (!mem_size)
- return -EINVAL;
-
- mem_size = PAGE_ALIGN(mem_size);
-
- if (*p == '!') {
- start_at = memparse(p+1, &p);
-
- pmem_start = start_at;
- pmem_size = mem_size;
- } else
- pr_info("Unrecognized memmap option, please check the parameter.\n");
-
- return *p == '\0' ? 0 : -EINVAL;
-}
-
-static int __init parse_memmap_opt(char *str)
-{
- while (str) {
- char *k = strchr(str, ',');
-
- if (k)
- *k++ = 0;
- parse_memmap_one(str);
- str = k;
- }
-
- return 0;
-}
-early_param("memmap", parse_memmap_opt);
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-static void __init reserve_pmem(void)
-{
- if (!is_mem_valid(mem_size, start_at))
- return;
-
- memblock_remove(pmem_start, pmem_size);
- pr_info("pmem reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
- pmem_start, pmem_start + pmem_size, pmem_size >> 20);
- pmem_res.start = pmem_start;
- pmem_res.end = pmem_start + pmem_size - 1;
-}
-#endif
-
void __init arm64_memblock_init(void)
{
const s64 linear_region_size = BIT(vabits_actual - 1);
@@ -668,10 +578,6 @@ void __init bootmem_init(void)
reserve_quick_kexec();
#endif
-#ifdef CONFIG_ARM64_PMEM_RESERVE
- reserve_pmem();
-#endif
-
reserve_pin_memory_res();
memblock_dump_all();
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index ce4de75..b7d1eb3 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -132,8 +132,3 @@ config NVDIMM_TEST_BUILD
infrastructure.
endif
-
-config PMEM_LEGACY
- tristate "Pmem_legacy"
- select X86_PMEM_LEGACY if X86
- select ARM64_PMEM_LEGACY_DEVICE if ARM64
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 6f8dc92..0407753 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -3,7 +3,6 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
-obj-$(CONFIG_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_OF_PMEM) += of_pmem.o
obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
--
2.9.5
1
0

[PATCH openEuler-1.0-LTS] net/hinic: Fix call trace when the rx_buff module parameter is grater than 2
by Yang Yingliang 25 Dec '21
by Yang Yingliang 25 Dec '21
25 Dec '21
From: Chiqijun <chiqijun(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4O2ZZ
-----------------------------------------------------------------------
When rx_buff is greater than 2, the driver will alloc for more than 1
page of memory for network rx, but the __GFP_COMP gfp flag is not set,
resulting in the following call trace:
CPU: 3 PID: 494041 Comm: ping Kdump: loaded Tainted: G W OE 4.19.90-2106.3.0.0095.oe1.x86_64 #1
Hardware name: Huawei Technologies Co., Ltd. RH2288H V3/BC11HGSA0, BIOS 5.15 05/21/2019
RIP: 0010:copy_page_to_iter+0x154/0x310
Code: 31 b8 00 10 00 00 f7 c6 00 80 00 00 74 07 0f b6 49 51 48 d3 e0 48 39 c2 0f 86 ed fe ff ff 48 c7 c7 30
RSP: 0018:ffffbd6907d03bd8 EFLAGS: 00010286
RAX: 0000000000000024 RBX: ffffe0ffee5b3000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffff9edbbfcd6858 RDI: ffff9edbbfcd6858
RBP: 0000000000000001 R08: 000000000001574a R09: 0000000000000004
R10: 000000000000004e R11: 0000000000000001 R12: ffffbd6907d03ed0
R13: 0000000000002100 R14: 0000000000000030 R15: 0000000000000000
FS: 00007f9d37244dc0(0000) GS:ffff9edbbfcc0000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007ffe0e715f80 CR3: 000000203c018005 CR4: 00000000001606e0
Call Trace:
skb_copy_datagram_iter+0x16c/0x2a0
raw_recvmsg+0xd0/0x1f0
inet_recvmsg+0x5b/0xd0
____sys_recvmsg+0x95/0x160
? import_iovec+0x37/0xd0
? copy_msghdr_from_user+0x5c/0x90
___sys_recvmsg+0x8c/0xd0
? __audit_syscall_exit+0x228/0x290
? kretprobe_trampoline+0x25/0x50
? __sys_recvmsg+0x5b/0xa0
__sys_recvmsg+0x5b/0xa0
do_syscall_64+0x5f/0x240
entry_SYSCALL_64_after_hwframe+0x44/0xa9
Use 'dev_alloc_pages' instead of calling ’alloc_pages_node‘ directly.
Signed-off-by: Chiqijun <chiqijun(a)huawei.com>
Reviewed-by: Wangxiaoyun <cloud.wangxiaoyun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic/hinic_rx.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index a9047432f3053..019cd439ce696 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -67,7 +67,7 @@ static bool rx_alloc_mapped_page(struct hinic_rxq *rxq,
return true;
/* alloc new page for storage */
- page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC, nic_dev->page_order);
+ page = dev_alloc_pages(nic_dev->page_order);
if (unlikely(!page)) {
RXQ_STATS_INC(rxq, alloc_rx_buf_err);
return false;
--
2.25.1
2
1

[PATCH openEuler-1.0-LTS 1/3] arm64/mpam: remove __init macro to support driver probe
by Yang Yingliang 24 Dec '21
by Yang Yingliang 24 Dec '21
24 Dec '21
From: Xingang Wang <wangxingang5(a)huawei.com>
ascend inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I49RB2
CVE: NA
---------------------------------------------------
To support device tree boot for arm64 mpam, the __init macro might be
used by the dts driver. This remove the necessary __init macro for the
related functions.
Signed-off-by: Xingang Wang <wangxingang5(a)huawei.com>
Reviewed-by: Wang ShaoBo <bobo.shaobowang(a)huawei.com>
Acked-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/include/asm/resctrl.h | 2 +-
arch/arm64/kernel/mpam/mpam_device.c | 14 +++++++-------
arch/arm64/kernel/mpam/mpam_internal.h | 2 +-
arch/arm64/kernel/mpam/mpam_resctrl.c | 2 +-
fs/resctrlfs.c | 2 +-
include/linux/arm_mpam.h | 14 +++++++-------
6 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/arch/arm64/include/asm/resctrl.h b/arch/arm64/include/asm/resctrl.h
index ff4ce56430d1f..3fceaa482e5a3 100644
--- a/arch/arm64/include/asm/resctrl.h
+++ b/arch/arm64/include/asm/resctrl.h
@@ -424,7 +424,7 @@ int resctrl_update_groups_config(struct rdtgroup *rdtgrp);
#define RESCTRL_MAX_CLOSID 32
-int __init resctrl_group_init(void);
+int resctrl_group_init(void);
void post_resctrl_mount(void);
diff --git a/arch/arm64/kernel/mpam/mpam_device.c b/arch/arm64/kernel/mpam/mpam_device.c
index c0615f6947a1f..4139b4476a836 100644
--- a/arch/arm64/kernel/mpam/mpam_device.c
+++ b/arch/arm64/kernel/mpam/mpam_device.c
@@ -534,7 +534,7 @@ static void mpam_disable_irqs(void)
* Scheduled by mpam_discovery_complete() once all devices have been created.
* Also scheduled when new devices are probed when new CPUs come online.
*/
-static void __init mpam_enable(struct work_struct *work)
+static void mpam_enable(struct work_struct *work)
{
int err;
unsigned long flags;
@@ -761,7 +761,7 @@ static struct mpam_class * __init mpam_class_get(u8 level_idx,
* class/component structures may be allocated.
* Returns the new device, or an ERR_PTR().
*/
-struct mpam_device * __init
+struct mpam_device *
__mpam_device_create(u8 level_idx, enum mpam_class_types type,
int component_id, const struct cpumask *fw_affinity,
phys_addr_t hwpage_address)
@@ -810,7 +810,7 @@ __mpam_device_create(u8 level_idx, enum mpam_class_types type,
return dev;
}
-void __init mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
+void mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
u32 flags)
{
unsigned long irq_save_flags;
@@ -821,7 +821,7 @@ void __init mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
spin_unlock_irqrestore(&dev->lock, irq_save_flags);
}
-void __init mpam_device_set_overflow_irq(struct mpam_device *dev, u32 irq,
+void mpam_device_set_overflow_irq(struct mpam_device *dev, u32 irq,
u32 flags)
{
unsigned long irq_save_flags;
@@ -864,7 +864,7 @@ static inline u16 mpam_cpu_max_pmg(void)
/*
* prepare for initializing devices.
*/
-int __init mpam_discovery_start(void)
+int mpam_discovery_start(void)
{
if (!mpam_cpus_have_feature())
return -EOPNOTSUPP;
@@ -1104,7 +1104,7 @@ static int mpam_cpu_offline(unsigned int cpu)
return 0;
}
-int __init mpam_discovery_complete(void)
+int mpam_discovery_complete(void)
{
int ret = 0;
@@ -1121,7 +1121,7 @@ int __init mpam_discovery_complete(void)
return ret;
}
-void __init mpam_discovery_failed(void)
+void mpam_discovery_failed(void)
{
struct mpam_class *class, *tmp;
diff --git a/arch/arm64/kernel/mpam/mpam_internal.h b/arch/arm64/kernel/mpam/mpam_internal.h
index cfaef82428aa5..7b84ea54975aa 100644
--- a/arch/arm64/kernel/mpam/mpam_internal.h
+++ b/arch/arm64/kernel/mpam/mpam_internal.h
@@ -329,7 +329,7 @@ int mpam_resctrl_setup(void);
struct raw_resctrl_resource *
mpam_get_raw_resctrl_resource(u32 level);
-int __init mpam_resctrl_init(void);
+int mpam_resctrl_init(void);
int mpam_resctrl_set_default_cpu(unsigned int cpu);
void mpam_resctrl_clear_default_cpu(unsigned int cpu);
diff --git a/arch/arm64/kernel/mpam/mpam_resctrl.c b/arch/arm64/kernel/mpam/mpam_resctrl.c
index 51cdefebaeba8..1ad4cd49762a3 100644
--- a/arch/arm64/kernel/mpam/mpam_resctrl.c
+++ b/arch/arm64/kernel/mpam/mpam_resctrl.c
@@ -2209,7 +2209,7 @@ static int __init mpam_setup(char *str)
}
__setup("mpam", mpam_setup);
-int __init mpam_resctrl_init(void)
+int mpam_resctrl_init(void)
{
mpam_init_padding();
diff --git a/fs/resctrlfs.c b/fs/resctrlfs.c
index 11741c87eb0af..ea9df7d77b95a 100644
--- a/fs/resctrlfs.c
+++ b/fs/resctrlfs.c
@@ -1029,7 +1029,7 @@ static int __init resctrl_group_setup_root(void)
*
* Return: 0 on success or -errno
*/
-int __init resctrl_group_init(void)
+int resctrl_group_init(void)
{
int ret = 0;
diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h
index 44d0690ae8c4b..a242d4fdff8b3 100644
--- a/include/linux/arm_mpam.h
+++ b/include/linux/arm_mpam.h
@@ -16,7 +16,7 @@ enum mpam_class_types {
MPAM_CLASS_UNKNOWN, /* Everything else, e.g. TLBs etc */
};
-struct mpam_device * __init
+struct mpam_device *
__mpam_device_create(u8 level_idx, enum mpam_class_types type,
int component_id, const struct cpumask *fw_affinity,
phys_addr_t hwpage_address);
@@ -54,9 +54,9 @@ mpam_device_create_memory(int nid, phys_addr_t hwpage_address)
return __mpam_device_create(~0, MPAM_CLASS_MEMORY, nid,
&dev_affinity, hwpage_address);
}
-int __init mpam_discovery_start(void);
-int __init mpam_discovery_complete(void);
-void __init mpam_discovery_failed(void);
+int mpam_discovery_start(void);
+int mpam_discovery_complete(void);
+void mpam_discovery_failed(void);
enum mpam_enable_type {
MPAM_ENABLE_DENIED = 0,
@@ -71,12 +71,12 @@ extern enum mpam_enable_type mpam_enabled;
#define mpam_irq_flags_to_acpi(x) ((x & MPAM_IRQ_MODE_LEVEL) ? \
ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE)
-void __init mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
+void mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
u32 flags);
-void __init mpam_device_set_overflow_irq(struct mpam_device *dev, u32 irq,
+void mpam_device_set_overflow_irq(struct mpam_device *dev, u32 irq,
u32 flags);
-static inline int __init mpam_register_device_irq(struct mpam_device *dev,
+static inline int mpam_register_device_irq(struct mpam_device *dev,
u32 overflow_interrupt, u32 overflow_flags,
u32 error_interrupt, u32 error_flags)
{
--
2.25.1
1
2

[openEuler-5.10 01/19] drm/hisilicon: Support i2c driver algorithms for bit-shift adapters
by Zheng Zengkai 23 Dec '21
by Zheng Zengkai 23 Dec '21
23 Dec '21
From: Ma Hai <mahai1(a)huawei.com>
mainline inclusion
from mainline-v5.12-rc2
commit 4eb4d99dfe3018d86f4529112aa7082f43b6996a
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4M6CD?from=project-issue
CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/d…
----------------------------------------------------------------------
Adding driver implementation to support i2c driver algorithms for
bit-shift adapters, so hibmc will using the interface provided by
drm to read edid.
Signed-off-by: Ma Hai <mahai1(a)huawei.com>
Signed-off-by: Tian Tao <tiantao6(a)hisilicon.com>
Reviewed-by: Thomas Zimmermann <tzimmermann(a)suse.de>
Reviewed-by: Li Dongming <lidongming5(a)huawei.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1600778670-60370-2-git-send-e…
Acked-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
---
drivers/gpu/drm/hisilicon/hibmc/Makefile | 2 +-
.../gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h | 25 ++++-
.../gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c | 99 +++++++++++++++++++
.../gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c | 2 +-
4 files changed, 125 insertions(+), 3 deletions(-)
create mode 100644 drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index f99132715597..684ef794eb7c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 197485e2fe0b..87d2aad0bb5e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -14,11 +14,23 @@
#ifndef HIBMC_DRM_DRV_H
#define HIBMC_DRM_DRV_H
+#include <linux/gpio/consumer.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
struct drm_device;
+struct hibmc_connector {
+ struct drm_connector base;
+
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data bit_data;
+};
+
struct hibmc_drm_private {
/* hw */
void __iomem *mmio;
@@ -31,10 +43,20 @@ struct hibmc_drm_private {
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct hibmc_connector connector;
bool mode_config_initialized;
};
+static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct hibmc_connector, base);
+}
+
+static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
unsigned int power_mode);
void hibmc_set_current_gate(struct hibmc_drm_private *priv,
@@ -47,6 +69,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc);
void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
extern const struct drm_mode_config_funcs hibmc_mode_funcs;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
new file mode 100644
index 000000000000..86d712090d87
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Tian Tao <tiantao6(a)hisilicon.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "hibmc_drm_drv.h"
+
+#define GPIO_DATA 0x0802A0
+#define GPIO_DATA_DIRECTION 0x0802A4
+
+#define I2C_SCL_MASK BIT(0)
+#define I2C_SDA_MASK BIT(1)
+
+static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
+{
+ struct hibmc_connector *hibmc_connector = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+ if (value) {
+ tmp_dir &= ~mask;
+ writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+ } else {
+ u32 tmp_data = readl(priv->mmio + GPIO_DATA);
+
+ tmp_data &= ~mask;
+ writel(tmp_data, priv->mmio + GPIO_DATA);
+
+ tmp_dir |= mask;
+ writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+ }
+}
+
+static int hibmc_get_i2c_signal(void *data, u32 mask)
+{
+ struct hibmc_connector *hibmc_connector = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+ if ((tmp_dir & mask) != mask) {
+ tmp_dir &= ~mask;
+ writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+ }
+
+ return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0;
+}
+
+static void hibmc_ddc_setsda(void *data, int state)
+{
+ hibmc_set_i2c_signal(data, I2C_SDA_MASK, state);
+}
+
+static void hibmc_ddc_setscl(void *data, int state)
+{
+ hibmc_set_i2c_signal(data, I2C_SCL_MASK, state);
+}
+
+static int hibmc_ddc_getsda(void *data)
+{
+ return hibmc_get_i2c_signal(data, I2C_SDA_MASK);
+}
+
+static int hibmc_ddc_getscl(void *data)
+{
+ return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
+}
+
+int hibmc_ddc_create(struct drm_device *drm_dev,
+ struct hibmc_connector *connector)
+{
+ connector->adapter.owner = THIS_MODULE;
+ connector->adapter.class = I2C_CLASS_DDC;
+ snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+ connector->adapter.dev.parent = &drm_dev->pdev->dev;
+ i2c_set_adapdata(&connector->adapter, connector);
+ connector->adapter.algo_data = &connector->bit_data;
+
+ connector->bit_data.udelay = 20;
+ connector->bit_data.timeout = usecs_to_jiffies(2000);
+ connector->bit_data.data = connector;
+ connector->bit_data.setsda = hibmc_ddc_setsda;
+ connector->bit_data.setscl = hibmc_ddc_setscl;
+ connector->bit_data.getsda = hibmc_ddc_getsda;
+ connector->bit_data.getscl = hibmc_ddc_getscl;
+
+ return i2c_bit_add_bus(&connector->adapter);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 376a05ddbc2f..c8b14afbcbed 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -78,7 +78,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = priv->dev;
struct drm_encoder *encoder = &priv->encoder;
- struct drm_connector *connector = &priv->connector;
+ struct drm_connector *connector = &priv->connector.base;
int ret;
encoder->possible_crtcs = 0x1;
--
2.20.1
1
18

[PATCH openEuler-1.0-LTS 1/3] arm64/mpam: Fix mpam corrupt when cpu online
by Yang Yingliang 23 Dec '21
by Yang Yingliang 23 Dec '21
23 Dec '21
From: Wang ShaoBo <bobo.shaobowang(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I3YAI3
CVE: NA
-------------------------------------------------
The following error occurred occasionally on a machine that supports MPAM:
[ 13.321386][ T658] Unable to handle kernel paging request at virtual address ffff80001115816c
[ 13.326013][ T684] hid-generic 0003:12D1:0003.0002: input,hidraw1: USB HID v1.10 Mouse [Keyboard/Mouse KVM 1.1.0] on usb-0000:7a:01.0-1.1/input1
[ 13.340558][ T658] Mem abort info:
[ 13.340563][ T658] ESR = 0x86000007
[ 13.352567][ T5] hub 6-1:1.0: USB hub found
[ 13.364750][ T658] EC = 0x21: IABT (current EL), IL = 32 bits
[ 13.369891][ T5] hub 6-1:1.0: 4 ports detected
[ 13.373871][ T658] SET = 0, FnV = 0
[ 13.396107][ T658] EA = 0, S1PTW = 0
[ 13.400599][ T658] swapper pgtable: 64k pages, 48-bit VAs, pgdp=0000000029540000
[ 13.408726][ T658] [ffff80001115816c] pgd=0000205fffff0003, p4d=0000205fffff0003, pud=0000205fffff0003, pmd=0000205ffffe0003, pte=0000000000000000
[ 13.423346][ T658] Internal error: Oops: 86000007 [#1] SMP
[ 13.429720][ T658] Modules linked in:
[ 13.434243][ T658] CPU: 72 PID: 658 Comm: kworker/72:1 Not tainted 5.10.0-4.17.0.28.oe1.aarch64 #1
[ 13.443966][ T658] Hardware name: Huawei TaiShan 200 (Model 2280)/BC82AMDDA, BIOS 1.70 01/07/2021
[ 13.453683][ T658] Workqueue: events mpam_enable
[ 13.459206][ T658] pstate: 20c00009 (nzCv daif +PAN +UAO -TCO BTYPE=--)
[ 13.466625][ T658] pc : mpam_enable+0x194/0x1d8
[ 13.472019][ T658] lr : mpam_enable+0x194/0x1d8
[ 13.477301][ T658] sp : ffff80004664fd70
[ 13.481937][ T658] x29: ffff80004664fd70 x28: 0000000000000000
[ 13.488578][ T658] x27: ffff00400484a648 x26: ffff800011b71080
[ 13.495306][ T658] x25: 0000000000000000 x24: ffff800011b6cda0
[ 13.502001][ T658] x23: ffff800011646f18 x22: ffff800011b6cd80
[ 13.508684][ T658] x21: ffff800011b6c000 x20: ffff800011646f08
[ 13.515425][ T658] x19: ffff800011646f70 x18: 0000000000000020
[ 13.522075][ T658] x17: 000000001790b332 x16: 0000000000000001
[ 13.528785][ T658] x15: ffffffffffffffff x14: ff00000000000000
[ 13.535464][ T658] x13: ffffffffffffffff x12: 0000000000000006
[ 13.542045][ T658] x11: 00000091cea718e2 x10: 0000000000000b90
[ 13.548735][ T658] x9 : ffff80001009ebac x8 : ffff2040061aabf0
[ 13.555383][ T658] x7 : ffffa05f8dca0000 x6 : 000000000000000f
[ 13.561924][ T658] x5 : 0000000000000000 x4 : ffff2040061aa000
[ 13.568613][ T658] x3 : ffff80001164dfa0 x2 : 00000000ffffffff
[ 13.575267][ T658] x1 : ffffa05f8dca0000 x0 : 00000000000000c1
[ 13.581813][ T658] Call trace:
[ 13.585600][ T658] mpam_enable+0x194/0x1d8
[ 13.590450][ T658] process_one_work+0x1cc/0x390
[ 13.595654][ T658] worker_thread+0x70/0x2f0
[ 13.600499][ T658] kthread+0x118/0x120
[ 13.604935][ T658] ret_from_fork+0x10/0x18
[ 13.609717][ T658] Code: bad PC value
[ 13.613944][ T658] ---[ end trace f1e305d2c339f67f ]---
[ 13.753818][ T658] Kernel panic - not syncing: Oops: Fatal exception
[ 13.760885][ T658] SMP: stopping secondary CPUs
[ 13.765933][ T658] Kernel Offset: disabled
[ 13.770516][ T658] CPU features: 0x8040002,22208a38
[ 13.775862][ T658] Memory Limit: none
[ 13.913929][ T658] ---[ end Kernel panic - not syncing:
The process of MPAM devices initialization is like this:
mpam_discovery_start()
... // discover devices
mpam_discovery_complete() // hang up the mpam_online/offline_cpu callbacks
-=> mpam_cpu_online() // probe all devices
-=> mpam_enable() // prepare for resctrl
(1) -=> cpuhp_remove_state() // clean resctrl internal structure
(2) -=> cpuhp_setup_state() // rehang mpam_online/offline_cpu callbacks
-=> mpam_cpu_online() // it does not call mpam_enable again
-=> mpam_resctrl_cpu_online() // pull up resctrl
Re-hang process of mpam_cpu_online/offline callbacks should not be
disturbed by irqs, to ensure that CPU context is reliable before
re-entering mpam_cpu_online(), which always happens between (1) and (2).
Fixes: 2ab89c893faf ("arm64/mpam: resctrl: Re-synchronise resctrl's view of online CPUs")
Signed-off-by: Wang ShaoBo <bobo.shaobowang(a)huawei.com>
Reviewed-by: Cheng Jian <cj.chengjian(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/kernel/mpam/mpam_device.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/arch/arm64/kernel/mpam/mpam_device.c b/arch/arm64/kernel/mpam/mpam_device.c
index f8840274b902f..c0615f6947a1f 100644
--- a/arch/arm64/kernel/mpam/mpam_device.c
+++ b/arch/arm64/kernel/mpam/mpam_device.c
@@ -593,9 +593,11 @@ static void __init mpam_enable(struct work_struct *work)
pr_err("Failed to setup/init resctrl\n");
mutex_unlock(&mpam_devices_lock);
+ local_irq_disable();
mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"mpam:online", mpam_cpu_online,
mpam_cpu_offline);
+ local_irq_enable();
if (mpam_cpuhp_state <= 0)
pr_err("Failed to re-register 'dyn' cpuhp callbacks");
mutex_unlock(&mpam_cpuhp_lock);
--
2.25.1
1
2

[PATCH openEuler-1.0-LTS 1/2] kprobes: Set unoptimized flag after unoptimizing code
by Yang Yingliang 22 Dec '21
by Yang Yingliang 22 Dec '21
22 Dec '21
From: Masami Hiramatsu <mhiramat(a)kernel.org>
stable inclusion
from stable-v4.19.108
commit 39af044d1ccb207da43af5c060bb0fb0dd548b3e
category: bugfix
bugzilla: NA
CVE: NA
-------------------------------------------------
commit f66c0447cca1281116224d474cdb37d6a18e4b5b upstream.
Set the unoptimized flag after confirming the code is completely
unoptimized. Without this fix, when a kprobe hits the intermediate
modified instruction (the first byte is replaced by an INT3, but
later bytes can still be a jump address operand) while unoptimizing,
it can return to the middle byte of the modified code, which causes
an invalid instruction exception in the kernel.
Usually, this is a rare case, but if we put a probe on the function
call while text patching, it always causes a kernel panic as below:
# echo p text_poke+5 > kprobe_events
# echo 1 > events/kprobes/enable
# echo 0 > events/kprobes/enable
invalid opcode: 0000 [#1] PREEMPT SMP PTI
RIP: 0010:text_poke+0x9/0x50
Call Trace:
arch_unoptimize_kprobe+0x22/0x28
arch_unoptimize_kprobes+0x39/0x87
kprobe_optimizer+0x6e/0x290
process_one_work+0x2a0/0x610
worker_thread+0x28/0x3d0
? process_one_work+0x610/0x610
kthread+0x10d/0x130
? kthread_park+0x80/0x80
ret_from_fork+0x3a/0x50
text_poke() is used for patching the code in optprobes.
This can happen even if we blacklist text_poke() and other functions,
because there is a small time window during which we show the intermediate
code to other CPUs.
[ mingo: Edited the changelog. ]
Tested-by: Alexei Starovoitov <ast(a)kernel.org>
Signed-off-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Steven Rostedt <rostedt(a)goodmis.org>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: bristot(a)redhat.com
Fixes: 6274de4984a6 ("kprobes: Support delayed unoptimizing")
Link: https://lkml.kernel.org/r/157483422375.25881.13508326028469515760.stgit@dev…
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Jihong <yangjihong1(a)huawei.com>
Reviewed-by: Kuohai Xu <xukuohai(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
kernel/kprobes.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f6e6edaf964c1..666243fff573d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -523,6 +523,8 @@ static void do_unoptimize_kprobes(void)
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
/* Loop free_list for disarming */
list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+ /* Switching from detour code to origin */
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
/* Disarm probes if marked disabled */
if (kprobe_disabled(&op->kp))
arch_disarm_kprobe(&op->kp);
@@ -662,6 +664,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
{
lockdep_assert_cpus_held();
arch_unoptimize_kprobe(op);
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
if (kprobe_disabled(&op->kp))
arch_disarm_kprobe(&op->kp);
}
@@ -689,7 +692,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
return;
}
- op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
if (!list_empty(&op->list)) {
/* Dequeue from the optimization queue */
list_del_init(&op->list);
--
2.25.1
1
1

[PATCH openEuler-1.0-LTS] cpufreq: schedutil: Destroy mutex before kobject_put() frees the memory
by Yang Yingliang 22 Dec '21
by Yang Yingliang 22 Dec '21
22 Dec '21
From: James Morse <james.morse(a)arm.com>
stable inclusion
from linux-4.19.209
commit 3cb9595e23d879824ebcefeabdf5156bc288000c
--------------------------------
[ Upstream commit cdef1196608892b9a46caa5f2b64095a7f0be60c ]
Since commit e5c6b312ce3c ("cpufreq: schedutil: Use kobject release()
method to free sugov_tunables") kobject_put() has kfree()d the
attr_set before gov_attr_set_put() returns.
kobject_put() isn't the last user of attr_set in gov_attr_set_put(),
the subsequent mutex_destroy() triggers a use-after-free:
| BUG: KASAN: use-after-free in mutex_is_locked+0x20/0x60
| Read of size 8 at addr ffff000800ca4250 by task cpuhp/2/20
|
| CPU: 2 PID: 20 Comm: cpuhp/2 Not tainted 5.15.0-rc1 #12369
| Hardware name: ARM LTD ARM Juno Development Platform/ARM Juno Development
| Platform, BIOS EDK II Jul 30 2018
| Call trace:
| dump_backtrace+0x0/0x380
| show_stack+0x1c/0x30
| dump_stack_lvl+0x8c/0xb8
| print_address_description.constprop.0+0x74/0x2b8
| kasan_report+0x1f4/0x210
| kasan_check_range+0xfc/0x1a4
| __kasan_check_read+0x38/0x60
| mutex_is_locked+0x20/0x60
| mutex_destroy+0x80/0x100
| gov_attr_set_put+0xfc/0x150
| sugov_exit+0x78/0x190
| cpufreq_offline.isra.0+0x2c0/0x660
| cpuhp_cpufreq_offline+0x14/0x24
| cpuhp_invoke_callback+0x430/0x6d0
| cpuhp_thread_fun+0x1b0/0x624
| smpboot_thread_fn+0x5e0/0xa6c
| kthread+0x3a0/0x450
| ret_from_fork+0x10/0x20
Swap the order of the calls.
Fixes: e5c6b312ce3c ("cpufreq: schedutil: Use kobject release() method to free sugov_tunables")
Cc: 4.7+ <stable(a)vger.kernel.org> # 4.7+
Signed-off-by: James Morse <james.morse(a)arm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki(a)intel.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/cpufreq/cpufreq_governor_attr_set.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
index 52841f807a7eb..45fdf30cade39 100644
--- a/drivers/cpufreq/cpufreq_governor_attr_set.c
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -77,8 +77,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
if (count)
return count;
- kobject_put(&attr_set->kobj);
mutex_destroy(&attr_set->update_lock);
+ kobject_put(&attr_set->kobj);
return 0;
}
EXPORT_SYMBOL_GPL(gov_attr_set_put);
--
2.25.1
1
0

[PATCH openEuler-1.0-LTS v3] scsi:spraid: support Ramaxel's spraid driver
by Yanling Song 22 Dec '21
by Yanling Song 22 Dec '21
22 Dec '21
Ramaxel inclusion
category: features
bugzilla: https://gitee.com/openeuler/kernel/issues/I4NJIB
CVE: NA
v2->v3:
1. Add deconfig for hulk in arm64 and x86
v1->v2:
1. Add deconfig for arm64 and x86
Support Ramaxel's SPRxxx Raid controller
Signed-off-by: Yanling Song <songyl(a)ramaxel.com>
Reviewed-by: Jiang Yu<yujiang(a)ramaxel.com>
---
arch/arm64/configs/hulk_defconfig | 1 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/hulk_defconfig | 3 +-
arch/x86/configs/openeuler_defconfig | 1 +
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/spraid/Kconfig | 13 +
drivers/scsi/spraid/Makefile | 7 +
drivers/scsi/spraid/spraid.h | 746 +++++
drivers/scsi/spraid/spraid_main.c | 3875 ++++++++++++++++++++++++
10 files changed, 4648 insertions(+), 1 deletion(-)
create mode 100644 drivers/scsi/spraid/Kconfig
create mode 100644 drivers/scsi/spraid/Makefile
create mode 100644 drivers/scsi/spraid/spraid.h
create mode 100644 drivers/scsi/spraid/spraid_main.c
diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig
index bc350c41b333..71517c091c43 100644
--- a/arch/arm64/configs/hulk_defconfig
+++ b/arch/arm64/configs/hulk_defconfig
@@ -2149,6 +2149,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
CONFIG_LIBFC=m
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index d2167c80757f..47b0931b91bf 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2143,6 +2143,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
CONFIG_LIBFC=m
diff --git a/arch/x86/configs/hulk_defconfig b/arch/x86/configs/hulk_defconfig
index 983d52ccad82..536ffb38c00c 100644
--- a/arch/x86/configs/hulk_defconfig
+++ b/arch/x86/configs/hulk_defconfig
@@ -1,4 +1,4 @@
-#
+
# Automatically generated file; DO NOT EDIT.
# Linux/x86 4.19.21 Kernel Configuration
#
@@ -2177,6 +2177,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_BUSLOGIC is not set
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 7043426d9780..786186288fea 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2182,6 +2182,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_BUSLOGIC is not set
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8450484184e3..63d2aaa22834 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -522,6 +522,7 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/ufs/Kconfig"
+source "drivers/scsi/spraid/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2973693f6dcc..4056cf26e09e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
obj-$(CONFIG_CXLFLASH) += cxlflash/
+obj-$(CONFIG_RAMAXEL_SPRAID) += spraid/
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
diff --git a/drivers/scsi/spraid/Kconfig b/drivers/scsi/spraid/Kconfig
new file mode 100644
index 000000000000..bfbba3db8db0
--- /dev/null
+++ b/drivers/scsi/spraid/Kconfig
@@ -0,0 +1,13 @@
+#
+# Ramaxel driver configuration
+#
+
+config RAMAXEL_SPRAID
+ tristate "Ramaxel spraid Adapter"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ depends on ARM64 || X86_64
+ help
+ This driver supports Ramaxel SPRxxx serial
+ raid controller, which has PCIE Gen4 interface
+ with host and supports SAS/SATA Hdd/ssd.
diff --git a/drivers/scsi/spraid/Makefile b/drivers/scsi/spraid/Makefile
new file mode 100644
index 000000000000..aadc2ffd37eb
--- /dev/null
+++ b/drivers/scsi/spraid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Ramaxel device drivers.
+#
+
+obj-$(CONFIG_RAMAXEL_SPRAID) += spraid.o
+
+spraid-objs := spraid_main.o
\ No newline at end of file
diff --git a/drivers/scsi/spraid/spraid.h b/drivers/scsi/spraid/spraid.h
new file mode 100644
index 000000000000..c1e4980e18e5
--- /dev/null
+++ b/drivers/scsi/spraid/spraid.h
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+#ifndef __SPRAID_H_
+#define __SPRAID_H_
+
+#define SPRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define SPRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define SPRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define SPRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define SPRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define SPRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define SPRAID_DEFAULT_MAX_CHANNEL 4
+#define SPRAID_DEFAULT_MAX_ID 240
+#define SPRAID_DEFAULT_MAX_LUN_PER_HOST 8
+#define MAX_SECTORS 2048
+
+#define IO_SQE_SIZE sizeof(struct spraid_ioq_command)
+#define ADMIN_SQE_SIZE sizeof(struct spraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct spraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define SPRAID_AQ_DEPTH 128
+#define SPRAID_NR_AEN_COMMANDS 16
+#define SPRAID_AQ_BLK_MQ_DEPTH (SPRAID_AQ_DEPTH - SPRAID_NR_AEN_COMMANDS)
+#define SPRAID_AQ_MQ_TAG_DEPTH (SPRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define SPRAID_ADMIN_QUEUE_NUM 1
+#define SPRAID_PTCMDS_PERQ 1
+#define SPRAID_IO_BLK_MQ_DEPTH (hdev->shost->can_queue)
+#define SPRAID_NR_IOQ_PTCMDS (SPRAID_PTCMDS_PERQ * hdev->shost->nr_hw_queues)
+
+#define FUA_MASK 0x08
+#define SPRAID_MINORS BIT(MINORBITS)
+
+#define COMMAND_IS_WRITE(cmd) ((cmd)->common.opcode & 1)
+
+#define SPRAID_IO_IOSQES 7
+#define SPRAID_IO_IOCQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define SMALL_POOL_SIZE 256
+#define MAX_SMALL_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 32
+
+#define SPRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03))
+
+#define CQE_STATUS_SUCCESS (0x0)
+
+#define PCI_VENDOR_ID_RAMAXEL_LOGIC 0x1E81
+
+#define SPRAID_SERVER_DEVICE_HBA_DID 0x2100
+#define SPRAID_SERVER_DEVICE_RAID_DID 0x2200
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define SPRAID_INT_PAGES 2
+#define SPRAID_INT_BYTES(hdev) (SPRAID_INT_PAGES * (hdev)->page_size)
+
+enum {
+ SPRAID_REQ_CANCELLED = (1 << 0),
+ SPRAID_REQ_USERCMD = (1 << 1),
+};
+
+enum {
+ SPRAID_SC_SUCCESS = 0x0,
+ SPRAID_SC_INVALID_OPCODE = 0x1,
+ SPRAID_SC_INVALID_FIELD = 0x2,
+
+ SPRAID_SC_ABORT_LIMIT = 0x103,
+ SPRAID_SC_ABORT_MISSING = 0x104,
+ SPRAID_SC_ASYNC_LIMIT = 0x105,
+
+ SPRAID_SC_DNR = 0x4000,
+};
+
+enum {
+ SPRAID_REG_CAP = 0x0000,
+ SPRAID_REG_CC = 0x0014,
+ SPRAID_REG_CSTS = 0x001c,
+ SPRAID_REG_AQA = 0x0024,
+ SPRAID_REG_ASQ = 0x0028,
+ SPRAID_REG_ACQ = 0x0030,
+ SPRAID_REG_DBS = 0x1000,
+};
+
+enum {
+ SPRAID_CC_ENABLE = 1 << 0,
+ SPRAID_CC_CSS_NVM = 0 << 4,
+ SPRAID_CC_MPS_SHIFT = 7,
+ SPRAID_CC_AMS_SHIFT = 11,
+ SPRAID_CC_SHN_SHIFT = 14,
+ SPRAID_CC_IOSQES_SHIFT = 16,
+ SPRAID_CC_IOCQES_SHIFT = 20,
+ SPRAID_CC_AMS_RR = 0 << SPRAID_CC_AMS_SHIFT,
+ SPRAID_CC_SHN_NONE = 0 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CC_IOSQES = SPRAID_IO_IOSQES << SPRAID_CC_IOSQES_SHIFT,
+ SPRAID_CC_IOCQES = SPRAID_IO_IOCQES << SPRAID_CC_IOCQES_SHIFT,
+ SPRAID_CC_SHN_NORMAL = 1 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CC_SHN_MASK = 3 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CSTS_CFS_SHIFT = 1,
+ SPRAID_CSTS_SHST_SHIFT = 2,
+ SPRAID_CSTS_PP_SHIFT = 5,
+ SPRAID_CSTS_RDY = 1 << 0,
+ SPRAID_CSTS_SHST_CMPLT = 2 << 2,
+ SPRAID_CSTS_SHST_MASK = 3 << 2,
+ SPRAID_CSTS_CFS_MASK = 1 << SPRAID_CSTS_CFS_SHIFT,
+ SPRAID_CSTS_PP_MASK = 1 << SPRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+ SPRAID_ADMIN_DELETE_SQ = 0x00,
+ SPRAID_ADMIN_CREATE_SQ = 0x01,
+ SPRAID_ADMIN_DELETE_CQ = 0x04,
+ SPRAID_ADMIN_CREATE_CQ = 0x05,
+ SPRAID_ADMIN_ABORT_CMD = 0x08,
+ SPRAID_ADMIN_SET_FEATURES = 0x09,
+ SPRAID_ADMIN_ASYNC_EVENT = 0x0c,
+ SPRAID_ADMIN_GET_INFO = 0xc6,
+ SPRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+ SPRAID_GET_INFO_CTRL = 0,
+ SPRAID_GET_INFO_DEV_LIST = 1,
+};
+
+enum {
+ SPRAID_RESET_TARGET = 0,
+ SPRAID_RESET_BUS = 1,
+};
+
+enum {
+ SPRAID_AEN_ERROR = 0,
+ SPRAID_AEN_NOTICE = 2,
+ SPRAID_AEN_VS = 7,
+};
+
+enum {
+ SPRAID_AEN_DEV_CHANGED = 0x00,
+ SPRAID_AEN_FW_ACT_START = 0x01,
+ SPRAID_AEN_HOST_PROBING = 0x10,
+};
+
+enum {
+ SPRAID_AEN_TIMESYN = 0x00,
+ SPRAID_AEN_FW_ACT_FINISH = 0x02,
+ SPRAID_AEN_EVENT_MIN = 0x80,
+ SPRAID_AEN_EVENT_MAX = 0xff,
+};
+
+enum {
+ SPRAID_CMD_WRITE = 0x01,
+ SPRAID_CMD_READ = 0x02,
+
+ SPRAID_CMD_NONIO_NONE = 0x80,
+ SPRAID_CMD_NONIO_TODEV = 0x81,
+ SPRAID_CMD_NONIO_FROMDEV = 0x82,
+};
+
+enum {
+ SPRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+ SPRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+ SPRAID_FEAT_NUM_QUEUES = 0x07,
+ SPRAID_FEAT_ASYNC_EVENT = 0x0b,
+ SPRAID_FEAT_TIMESTAMP = 0x0e,
+};
+
+enum spraid_state {
+ SPRAID_NEW,
+ SPRAID_LIVE,
+ SPRAID_RESETTING,
+ SPRAID_DELETING,
+ SPRAID_DEAD,
+};
+
+enum {
+ SPRAID_CARD_HBA,
+ SPRAID_CARD_RAID,
+};
+
+enum spraid_cmd_type {
+ SPRAID_CMD_ADM,
+ SPRAID_CMD_IOPT,
+};
+
+struct spraid_completion {
+ __le32 result;
+ union {
+ struct {
+ __u8 sense_len;
+ __u8 resv[3];
+ };
+ __le32 result1;
+ };
+ __le16 sq_head;
+ __le16 sq_id;
+ __u16 cmd_id;
+ __le16 status;
+};
+
+struct spraid_ctrl_info {
+ __le32 nd;
+ __le16 max_cmds;
+ __le16 max_channel;
+ __le32 max_tgt_id;
+ __le16 max_lun;
+ __le16 max_num_sge;
+ __le16 lun_num_in_boot;
+ __u8 mdts;
+ __u8 acl;
+ __u8 aerl;
+ __u8 card_type;
+ __u16 rsvd;
+ __u32 rtd3e;
+ __u8 sn[32];
+ __u8 fr[16];
+ __u8 rsvd1[4020];
+};
+
+struct spraid_dev {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct Scsi_Host *shost;
+ struct spraid_queue *queues;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM];
+ mempool_t *iod_mempool;
+ void __iomem *bar;
+ u32 max_qid;
+ u32 num_vecs;
+ u32 queue_count;
+ u32 ioq_depth;
+ int db_stride;
+ u32 __iomem *dbs;
+ struct rw_semaphore devices_rwsem;
+ int numa_node;
+ u32 page_size;
+ u32 ctrl_config;
+ u32 online_queues;
+ u64 cap;
+ int instance;
+ struct spraid_ctrl_info *ctrl_info;
+ struct spraid_dev_info *devices;
+
+ struct spraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct spraid_cmd *ioq_ptcmds;
+ struct list_head ioq_pt_list;
+ spinlock_t ioq_pt_lock;
+
+ struct work_struct scan_work;
+ struct work_struct timesyn_work;
+ struct work_struct reset_work;
+ struct work_struct fw_act_work;
+
+ enum spraid_state state;
+ spinlock_t state_lock;
+
+ struct request_queue *bsg_queue;
+};
+
+struct spraid_sgl_desc {
+ __le64 addr;
+ __le32 length;
+ __u8 rsvd[3];
+ __u8 type;
+};
+
+union spraid_data_ptr {
+ struct {
+ __le64 prp1;
+ __le64 prp2;
+ };
+ struct spraid_sgl_desc sgl;
+};
+
+struct spraid_admin_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le32 cdw2[4];
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct spraid_features {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[2];
+ union spraid_data_ptr dptr;
+ __le32 fid;
+ __le32 dword11;
+ __le32 dword12;
+ __le32 dword13;
+ __le32 dword14;
+ __le32 dword15;
+};
+
+struct spraid_create_cq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 cqid;
+ __le16 qsize;
+ __le16 cq_flags;
+ __le16 irq_vector;
+ __u32 rsvd12[4];
+};
+
+struct spraid_create_sq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 sqid;
+ __le16 qsize;
+ __le16 sq_flags;
+ __le16 cqid;
+ __u32 rsvd12[4];
+};
+
+struct spraid_delete_queue {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[9];
+ __le16 qid;
+ __u16 rsvd10;
+ __u32 rsvd11[5];
+};
+
+struct spraid_get_info {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u32 rsvd2[4];
+ union spraid_data_ptr dptr;
+ __u8 type;
+ __u8 rsvd10[3];
+ __le32 cdw11;
+ __u32 rsvd12[4];
+};
+
+struct spraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+enum {
+ SPRAID_CMD_FLAG_SGL_METABUF = (1 << 6),
+ SPRAID_CMD_FLAG_SGL_METASEG = (1 << 7),
+ SPRAID_CMD_FLAG_SGL_ALL = SPRAID_CMD_FLAG_SGL_METABUF |
+ SPRAID_CMD_FLAG_SGL_METASEG,
+};
+
+enum spraid_cmd_state {
+ SPRAID_CMD_IDLE = 0,
+ SPRAID_CMD_IN_FLIGHT = 1,
+ SPRAID_CMD_COMPLETE = 2,
+ SPRAID_CMD_TIMEOUT = 3,
+ SPRAID_CMD_TMO_COMPLETE = 4,
+};
+
+struct spraid_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __le16 sqid;
+ __le16 cid;
+ __u32 rsvd11[5];
+};
+
+struct spraid_reset_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __u8 type;
+ __u8 rsvd10[3];
+ __u32 rsvd11[5];
+};
+
+struct spraid_admin_command {
+ union {
+ struct spraid_admin_common_command common;
+ struct spraid_features features;
+ struct spraid_create_cq create_cq;
+ struct spraid_create_sq create_sq;
+ struct spraid_delete_queue delete_queue;
+ struct spraid_get_info get_info;
+ struct spraid_abort_cmd abort;
+ struct spraid_reset_cmd reset;
+ struct spraid_usr_cmd usr_cmd;
+ };
+};
+
+struct spraid_ioq_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __le32 cdw3[3];
+ union spraid_data_ptr dptr;
+ __le32 cdw10[6];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __le32 cdw26[6];
+};
+
+struct spraid_rw_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union spraid_data_ptr dptr;
+ __le64 slba;
+ __le16 nlb;
+ __le16 control;
+ __u32 rsvd13[3];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct spraid_scsi_nonio {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_length;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union spraid_data_ptr dptr;
+ __u32 rsvd10[5];
+ __le32 buffer_len;
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct spraid_ioq_command {
+ union {
+ struct spraid_ioq_common_command common;
+ struct spraid_rw_command rw;
+ struct spraid_scsi_nonio scsi_nonio;
+ };
+};
+
+struct spraid_passthru_common_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 data_len;
+ __u16 param_len;
+ } info_1;
+ __u32 cdw3;
+ };
+ __u64 metadata;
+
+ __u64 addr;
+ __u64 prp2;
+
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct spraid_ioq_passthru_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 res_sense_len;
+ __u8 cdb_len;
+ __u8 rsvd0;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_1;
+ __u32 cdw3;
+ };
+ union {
+ struct {
+ __u16 rsvd;
+ __u16 param_len;
+ } info_2;
+ __u32 cdw4;
+ };
+ __u32 cdw5;
+ __u64 addr;
+ __u64 prp2;
+ union {
+ struct {
+ __u16 eid;
+ __u16 sid;
+ } info_3;
+ __u32 cdw10;
+ };
+ union {
+ struct {
+ __u16 did;
+ __u8 did_flag;
+ __u8 rsvd2;
+ } info_4;
+ __u32 cdw11;
+ };
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 data_len;
+ __u32 cdw16;
+ __u32 cdw17;
+ __u32 cdw18;
+ __u32 cdw19;
+ __u32 cdw20;
+ __u32 cdw21;
+ __u32 cdw22;
+ __u32 cdw23;
+ __u64 sense_addr;
+ __u32 cdw26[4];
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct spraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct spraid_passthru_common_cmd admcmd;
+ struct spraid_ioq_passthru_cmd ioqcmd;
+ };
+};
+
+enum {
+ SPRAID_BSG_ADM,
+ SPRAID_BSG_IOQ,
+};
+
+struct spraid_cmd {
+ int qid;
+ int cid;
+ u32 result0;
+ u32 result1;
+ u16 status;
+ void *priv;
+ enum spraid_cmd_state state;
+ struct completion cmd_done;
+ struct list_head list;
+};
+
+struct spraid_queue {
+ struct spraid_dev *hdev;
+ spinlock_t sq_lock;
+
+ spinlock_t cq_lock ____cacheline_aligned_in_smp;
+
+ void *sq_cmds;
+
+ struct spraid_completion *cqes;
+
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ u32 __iomem *q_db;
+ u8 cq_phase;
+ u8 sqes;
+ u16 qid;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 last_cq_head;
+ u16 q_depth;
+ s16 cq_vector;
+ void *sense;
+ dma_addr_t sense_dma_addr;
+ struct dma_pool *prp_small_pool;
+};
+
+struct spraid_iod {
+ struct spraid_queue *spraidq;
+ enum spraid_cmd_state state;
+ int npages;
+ u32 nsge;
+ u32 length;
+ bool use_sgl;
+ bool sg_drv_mgmt;
+ dma_addr_t first_dma;
+ void *sense;
+ dma_addr_t sense_dma;
+ struct scatterlist *sg;
+ struct scatterlist inline_sg[0];
+};
+
+#define SPRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define SPRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define SPRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define SPRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+
+#define SPRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define SPRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+struct spraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct spraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+struct spraid_dev_info {
+ __le32 hdid;
+ __le16 target;
+ __u8 channel;
+ __u8 lun;
+ __u8 attr;
+ __u8 flag;
+ __le16 max_io_kb;
+};
+
+#define MAX_DEV_ENTRY_PER_PAGE_4K 340
+struct spraid_dev_list {
+ __le32 dev_num;
+ __u32 rsvd0[3];
+ struct spraid_dev_info devices[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+struct spraid_sdev_hostdata {
+ u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 rsvd[3];
+};
+
+#endif
+
diff --git a/drivers/scsi/spraid/spraid_main.c b/drivers/scsi/spraid/spraid_main.c
new file mode 100644
index 000000000000..519b39f44e91
--- /dev/null
+++ b/drivers/scsi/spraid/spraid_main.c
@@ -0,0 +1,3875 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+/* Ramaxel Raid SPXXX Series Linux Driver */
+
+#define pr_fmt(fmt) "spraid: " fmt
+
+#include <linux/sched/signal.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/once.h>
+#include <linux/debugfs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
+#include <target/target_core_backend.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+
+#include "spraid.h"
+
+static u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_nonpt = 180;
+module_param(scmd_tmout_nonpt, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_nonpt,
+ "scsi commands timeout for rawdisk&raid(seconds)");
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops ioq_depth_ops = {
+ .set = ioq_depth_set,
+ .get = param_get_uint,
+};
+
+static u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &ioq_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static int log_debug_switch_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+
+ return param_set_byte(val, kp);
+}
+
+static const struct kernel_param_ops log_debug_switch_ops = {
+ .set = log_debug_switch_set,
+ .get = param_get_byte,
+};
+
+static unsigned char log_debug_switch;
+module_param_cb(log_debug_switch, &log_debug_switch_ops,
+ &log_debug_switch, 0644);
+MODULE_PARM_DESC(log_debug_switch,
+ "set log state, default non-zero for switch on");
+
+static int small_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+ if (n > MAX_SMALL_POOL_NUM)
+ n = MAX_SMALL_POOL_NUM;
+ if (n < 1)
+ n = 1;
+ *((u8 *)kp->arg) = n;
+
+ return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+ .set = small_pool_num_set,
+ .get = param_get_byte,
+};
+
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+static unsigned char small_pool_num = 4;
+module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644);
+MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16");
+
+static void spraid_free_queue(struct spraid_queue *spraidq);
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result);
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+ u32 result, u32 result1);
+
+static DEFINE_IDA(spraid_instance_ida);
+
+static struct class *spraid_class;
+
+#define SPRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
+
+static struct workqueue_struct *spraid_wq;
+
+#define dev_log_dbg(dev, fmt, ...) do { \
+ if (unlikely(log_debug_switch)) \
+ dev_info(dev, "[%s] [%d] " fmt, \
+ __func__, __LINE__, ##__VA_ARGS__); \
+} while (0)
+
+#define SPRAID_DRV_VERSION "1.0.0.0"
+
+#define ADMIN_TIMEOUT (admin_tmout * HZ)
+#define ADMIN_ERR_TIMEOUT 32757
+
+#define SPRAID_WAIT_ABNL_CMD_TIMEOUT (3 * 2)
+
+#define SPRAID_DMA_MSK_BIT_MAX 64
+
+enum FW_STAT_CODE {
+ FW_STAT_OK = 0,
+ FW_STAT_NEED_CHECK,
+ FW_STAT_ERROR,
+ FW_STAT_EP_PCIE_ERROR,
+ FW_STAT_NAC_DMA_ERROR,
+ FW_STAT_ABORTED,
+ FW_STAT_NEED_RETRY
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60",
+ "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED",
+ "FORMATTING", "SANITIZING", "INITIALIZING", "INITIALIZE_FAIL",
+ "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static int spraid_remap_bar(struct spraid_dev *hdev, u32 size)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (size > pci_resource_len(pdev, 0)) {
+ dev_err(hdev->dev, "Input size[%u] exceed bar0 length[%llu]\n",
+ size, pci_resource_len(pdev, 0));
+ return -ENOMEM;
+ }
+
+ if (hdev->bar)
+ iounmap(hdev->bar);
+
+ hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (!hdev->bar) {
+ dev_err(hdev->dev, "ioremap for bar0 failed\n");
+ return -ENOMEM;
+ }
+ hdev->dbs = hdev->bar + SPRAID_REG_DBS;
+
+ return 0;
+}
+
+static int spraid_dev_map(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = pci_request_mem_regions(pdev, "spraid");
+ if (ret) {
+ dev_err(hdev->dev, "fail to request memory regions\n");
+ return ret;
+ }
+
+ ret = spraid_remap_bar(hdev, SPRAID_REG_DBS + 4096);
+ if (ret) {
+ pci_release_mem_regions(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void spraid_dev_unmap(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (hdev->bar) {
+ iounmap(hdev->bar);
+ hdev->bar = NULL;
+ }
+ pci_release_mem_regions(pdev);
+}
+
+static int spraid_pci_enable(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = -ENOMEM;
+ u64 maskbit = SPRAID_DMA_MSK_BIT_MAX;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(hdev->dev,
+ "Enable pci device memory resources failed\n");
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ if (readl(hdev->bar + SPRAID_REG_CSTS) == U32_MAX) {
+ ret = -ENODEV;
+ dev_err(hdev->dev, "Read csts register failed\n");
+ goto disable;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(hdev->dev,
+ "Allocate one IRQ for setup admin channel failed\n");
+ goto disable;
+ }
+
+ hdev->cap = lo_hi_readq(hdev->bar + SPRAID_REG_CAP);
+ hdev->ioq_depth = min_t(u32, SPRAID_CAP_MQES(hdev->cap) + 1,
+ io_queue_depth);
+ hdev->db_stride = 1 << SPRAID_CAP_STRIDE(hdev->cap);
+
+ maskbit = SPRAID_CAP_DMAMASK(hdev->cap);
+ if (maskbit < 32 || maskbit > SPRAID_DMA_MSK_BIT_MAX) {
+ dev_err(hdev->dev,
+ "err, dma mask invalid[%llu], set to default\n",
+ maskbit);
+ maskbit = SPRAID_DMA_MSK_BIT_MAX;
+ }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+ dev_err(hdev->dev, "set dma mask and coherent failed\n");
+ goto disable;
+ }
+
+ dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit);
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
+ return 0;
+
+disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static int spraid_npages_prp(u32 size, struct spraid_dev *hdev)
+{
+ u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+ return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, PAGE_SIZE - PRP_ENTRY_SIZE);
+}
+
+static int spraid_npages_sgl(u32 nseg)
+{
+ return DIV_ROUND_UP(nseg * sizeof(struct spraid_sgl_desc), PAGE_SIZE);
+}
+
+static void **spraid_iod_list(struct spraid_iod *iod)
+{
+ return (void **)(iod->inline_sg + (iod->sg_drv_mgmt ? iod->nsge : 0));
+}
+
+static u32 spraid_iod_ext_size(struct spraid_dev *hdev, u32 size, u32 nsge,
+ bool sg_drv_mgmt, bool use_sgl)
+{
+ size_t alloc_size, sg_size;
+
+ if (use_sgl)
+ alloc_size = sizeof(__le64 *) * spraid_npages_sgl(nsge);
+ else
+ alloc_size = sizeof(__le64 *) * spraid_npages_prp(size, hdev);
+
+ sg_size = sg_drv_mgmt ? (sizeof(struct scatterlist) * nsge) : 0;
+ return sg_size + alloc_size;
+}
+
+static u32 spraid_cmd_size(struct spraid_dev *hdev,
+ bool sg_drv_mgmt, bool use_sgl)
+{
+ u32 alloc_size = spraid_iod_ext_size(hdev, SPRAID_INT_BYTES(hdev),
+ SPRAID_INT_PAGES, sg_drv_mgmt, use_sgl);
+
+ dev_info(hdev->dev, "sg_drv_mgmt: %s, use_sgl: %s, iod size: %lu;"
+ " alloc_size: %u\n", sg_drv_mgmt ? "true" : "false",
+ use_sgl ? "true" : "false",
+ sizeof(struct spraid_iod), alloc_size);
+
+ return sizeof(struct spraid_iod) + alloc_size;
+}
+
+static int spraid_setup_prps(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+ struct scatterlist *sg = iod->sg;
+ u64 dma_addr = sg_dma_address(sg);
+ int dma_len = sg_dma_len(sg);
+ __le64 *prp_list, *old_prp_list;
+ u32 page_size = hdev->page_size;
+ int offset = dma_addr & (page_size - 1);
+ void **list = spraid_iod_list(iod);
+ int length = iod->length;
+ struct dma_pool *pool;
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (page_size - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ return 0;
+ }
+
+ dma_len -= (page_size - offset);
+ if (dma_len) {
+ dma_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= page_size) {
+ iod->first_dma = dma_addr;
+ return 0;
+ }
+
+ nprps = DIV_ROUND_UP(length, page_size);
+ if (nprps <= (SMALL_POOL_SIZE / PRP_ENTRY_SIZE)) {
+ pool = iod->spraidq->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate first prp_list memory failed\n");
+ iod->first_dma = dma_addr;
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(hdev->dev, "Allocate %dth;"
+ " prp_list memory failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= page_size;
+ dma_addr += page_size;
+ length -= page_size;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_sgl:
+ dev_err(hdev->dev,
+ "Setup prps, invalid SGL for payload: %d nents: %d\n",
+ iod->length, iod->nsge);
+ return -EIO;
+}
+
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct spraid_sgl_desc))
+
+static void spraid_submit_cmd(struct spraid_queue *spraidq, const void *cmd)
+{
+ u32 sqes = SQE_SIZE(spraidq->qid);
+ unsigned long flags;
+ struct spraid_admin_common_command *acd =
+ (struct spraid_admin_common_command *)cmd;
+
+ spin_lock_irqsave(&spraidq->sq_lock, flags);
+ memcpy((spraidq->sq_cmds + sqes * spraidq->sq_tail), cmd, sqes);
+ if (++spraidq->sq_tail == spraidq->q_depth)
+ spraidq->sq_tail = 0;
+
+ writel(spraidq->sq_tail, spraidq->q_db);
+ spin_unlock_irqrestore(&spraidq->sq_lock, flags);
+
+ dev_log_dbg(spraidq->hdev->dev,
+ "cid[%d] qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
+ acd->command_id, spraidq->qid, acd->opcode,
+ acd->flags, le32_to_cpu(acd->hdid));
+}
+
+static u32 spraid_mod64(u64 dividend, u32 divisor)
+{
+ u64 d;
+ u32 remainder;
+
+ if (!divisor)
+ pr_err("DIVISOR is zero, in div fn\n");
+
+ d = dividend;
+ remainder = do_div(d, divisor);
+ return remainder;
+}
+
+static inline bool spraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case READ_32:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool spraid_is_prp(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd, u32 nsge)
+{
+ struct scatterlist *sg = scsi_sglist(scmd);
+ u32 page_size = hdev->page_size;
+ bool is_prp = true;
+ int i = 0;
+
+ scsi_for_each_sg(scmd, sg, nsge, i) {
+ if (i != 0 && i != nsge - 1) {
+ if (spraid_mod64(sg_dma_len(sg), page_size) ||
+ spraid_mod64(sg_dma_address(sg), page_size)) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == 0) {
+ if ((spraid_mod64((sg_dma_address(sg) + sg_dma_len(sg)),
+ page_size))) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == (nsge - 1)) {
+ if (spraid_mod64(sg_dma_address(sg), page_size)) {
+ is_prp = false;
+ break;
+ }
+ }
+ }
+
+ return is_prp;
+}
+
+enum {
+ SPRAID_SGL_FMT_DATA_DESC = 0x00,
+ SPRAID_SGL_FMT_SEG_DESC = 0x02,
+ SPRAID_SGL_FMT_LAST_SEG_DESC = 0x03,
+ SPRAID_KEY_SGL_FMT_DATA_DESC = 0x04,
+ SPRAID_TRANSPORT_SGL_DATA_DESC = 0x05
+};
+
+static void spraid_sgl_set_data(struct spraid_sgl_desc *sge,
+ struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = SPRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void spraid_sgl_set_seg(struct spraid_sgl_desc *sge,
+ dma_addr_t dma_addr, int entries)
+{
+ sge->addr = cpu_to_le64(dma_addr);
+ if (entries <= SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = SPRAID_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = SPRAID_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+static int spraid_setup_ioq_cmd_sgl(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd,
+ struct spraid_ioq_command *ioq_cmd,
+ struct spraid_iod *iod)
+{
+ struct spraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = scsi_sglist(scmd);
+ void **list = spraid_iod_list(iod);
+ struct dma_pool *pool;
+ int nsge = iod->nsge;
+ dma_addr_t sgl_dma;
+ int i = 0;
+
+ ioq_cmd->common.flags |= SPRAID_CMD_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ spraid_sgl_set_data(&ioq_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ if (nsge <= (SMALL_POOL_SIZE / sizeof(struct spraid_sgl_desc))) {
+ pool = iod->spraidq->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate first sgl_list failed\n");
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ iod->first_dma = sgl_dma;
+ spraid_sgl_set_seg(&ioq_cmd->common.dptr.sgl, sgl_dma, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate %dth sgl_list;"
+ " failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ spraid_sgl_set_seg(link, sgl_dma, nsge);
+ }
+
+ spraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+#define SPRAID_RW_FUA BIT(14)
+
+static void spraid_setup_rw_cmd(struct spraid_dev *hdev,
+ struct spraid_rw_command *rw,
+ struct scsi_cmnd *scmd)
+{
+ u32 start_lba_lo, start_lba_hi;
+ u32 datalength = 0;
+ u16 control = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ rw->opcode = SPRAID_CMD_WRITE;
+ } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+ rw->opcode = SPRAID_CMD_READ;
+ } else {
+ dev_err(hdev->dev,
+ "Invalid IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ }
+
+ /* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+ if (scmd->cmd_len == 6) {
+ datalength = (u32)(scmd->cmnd[4] == 0 ?
+ IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+ else if (scmd->cmd_len == 10) {
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+
+ /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+ else if (scmd->cmd_len == 12) {
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+ /* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 16) {
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+ /* 32-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 32) {
+ datalength = get_unaligned_be32(&scmd->cmnd[28]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[16]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[12]);
+
+ if (scmd->cmnd[10] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+
+ if (unlikely(datalength > U16_MAX || datalength == 0)) {
+ dev_err(hdev->dev,
+ "Invalid IO for illegal transfer data length: %u\n",
+ datalength);
+ WARN_ON(1);
+ }
+
+ rw->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+ /* 0base for nlb */
+ rw->nlb = cpu_to_le16((u16)(datalength - 1));
+ rw->control = cpu_to_le16(control);
+}
+
+static void spraid_setup_nonio_cmd(struct spraid_dev *hdev,
+ struct spraid_scsi_nonio *scsi_nonio,
+ struct scsi_cmnd *scmd)
+{
+ scsi_nonio->buffer_len = cpu_to_le32(scsi_bufflen(scmd));
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_TODEV;
+ break;
+ case DMA_FROM_DEVICE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_FROMDEV;
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Invalid IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ }
+}
+
+static void spraid_setup_ioq_cmd(struct spraid_dev *hdev,
+ struct spraid_ioq_command *ioq_cmd,
+ struct scsi_cmnd *scmd)
+{
+ memcpy(ioq_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+ ioq_cmd->common.cdb_len = scmd->cmd_len;
+
+ if (spraid_is_rw_scmd(scmd))
+ spraid_setup_rw_cmd(hdev, &ioq_cmd->rw, scmd);
+ else
+ spraid_setup_nonio_cmd(hdev, &ioq_cmd->scsi_nonio, scmd);
+}
+
+static int spraid_init_iod(struct spraid_dev *hdev, struct spraid_iod *iod,
+ struct spraid_ioq_command *ioq_cmd,
+ struct scsi_cmnd *scmd)
+{
+ if (unlikely(!iod->sense)) {
+ dev_err(hdev->dev, "Allocate sense data buffer failed\n");
+ return -ENOMEM;
+ }
+ ioq_cmd->common.sense_addr = cpu_to_le64(iod->sense_dma);
+ ioq_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+ iod->nsge = 0;
+ iod->npages = -1;
+ iod->use_sgl = 0;
+ iod->sg_drv_mgmt = false;
+ WRITE_ONCE(iod->state, SPRAID_CMD_IDLE);
+
+ return 0;
+}
+
+static void spraid_free_iod_res(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+ const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr, next_dma_addr;
+ struct spraid_sgl_desc *sg_list;
+ __le64 *prp_list;
+ void *addr;
+ int i;
+
+ dma_addr = iod->first_dma;
+ if (iod->npages == 0)
+ dma_pool_free(iod->spraidq->prp_small_pool,
+ spraid_iod_list(iod)[0], dma_addr);
+
+ for (i = 0; i < iod->npages; i++) {
+ addr = spraid_iod_list(iod)[i];
+
+ if (iod->use_sgl) {
+ sg_list = addr;
+ next_dma_addr =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ prp_list = addr;
+ next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(hdev->prp_page_pool, addr, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+
+ if (iod->sg_drv_mgmt && iod->sg != iod->inline_sg) {
+ iod->sg_drv_mgmt = false;
+ mempool_free(iod->sg, hdev->iod_mempool);
+ }
+
+ iod->sense = NULL;
+ iod->npages = -1;
+}
+
+static int spraid_io_map_data(struct spraid_dev *hdev, struct spraid_iod *iod,
+ struct scsi_cmnd *scmd,
+ struct spraid_ioq_command *ioq_cmd)
+{
+ int ret;
+
+ iod->nsge = scsi_dma_map(scmd);
+
+ /* No data to DMA, it may be scsi no-rw command */
+ if (unlikely(iod->nsge == 0))
+ return 0;
+
+ iod->length = scsi_bufflen(scmd);
+ iod->sg = scsi_sglist(scmd);
+ iod->use_sgl = !spraid_is_prp(hdev, scmd, iod->nsge);
+
+ if (iod->use_sgl) {
+ ret = spraid_setup_ioq_cmd_sgl(hdev, scmd, ioq_cmd, iod);
+ } else {
+ ret = spraid_setup_prps(hdev, iod);
+ ioq_cmd->common.dptr.prp1 =
+ cpu_to_le64(sg_dma_address(iod->sg));
+ ioq_cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+ }
+
+ if (ret)
+ scsi_dma_unmap(scmd);
+
+ return ret;
+}
+
+static void spraid_map_status(struct spraid_iod *iod, struct scsi_cmnd *scmd,
+ struct spraid_completion *cqe)
+{
+ scsi_set_resid(scmd, 0);
+
+ switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+ case FW_STAT_OK:
+ set_host_byte(scmd, DID_OK);
+ break;
+ case FW_STAT_NEED_CHECK:
+ set_host_byte(scmd, DID_OK);
+ scmd->result |= le16_to_cpu(cqe->status) >> 8;
+ if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(scmd->sense_buffer, iod->sense,
+ SCSI_SENSE_BUFFERSIZE);
+ scmd->result =
+ (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+ }
+ break;
+ case FW_STAT_ABORTED:
+ set_host_byte(scmd, DID_ABORT);
+ break;
+ case FW_STAT_NEED_RETRY:
+ set_host_byte(scmd, DID_REQUEUE);
+ break;
+ default:
+ set_host_byte(scmd, DID_BAD_TARGET);
+ break;
+ }
+}
+
+static inline void spraid_get_tag_from_scmd(struct scsi_cmnd *scmd,
+ u16 *qid, u16 *cid)
+{
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ *qid = blk_mq_unique_tag_to_hwq(tag) + 1;
+ *cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct scsi_device *sdev = scmd->device;
+ struct spraid_sdev_hostdata *hostdata;
+ struct spraid_ioq_command ioq_cmd;
+ struct spraid_queue *ioq;
+ unsigned long elapsed;
+ u16 hwq, cid;
+ int ret;
+
+ if (unlikely(!scmd)) {
+ dev_err(hdev->dev, "err, scmd is null\n");
+ return 0;
+ }
+
+ if (unlikely(hdev->state != SPRAID_LIVE)) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (log_debug_switch)
+ scsi_print_command(scmd);
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ hostdata = sdev->hostdata;
+ ioq = &hdev->queues[hwq];
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+ ioq_cmd.rw.command_id = cid;
+
+ spraid_setup_ioq_cmd(hdev, &ioq_cmd, scmd);
+
+ ret = cid * SCSI_SENSE_BUFFERSIZE;
+ iod->sense = ioq->sense + ret;
+ iod->sense_dma = ioq->sense_dma_addr + ret;
+
+ ret = spraid_init_iod(hdev, iod, &ioq_cmd, scmd);
+ if (unlikely(ret))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ iod->spraidq = ioq;
+ ret = spraid_io_map_data(hdev, iod, scmd, &ioq_cmd);
+ if (unlikely(ret)) {
+ dev_err(hdev->dev, "spraid_io_map_data Err.\n");
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ ret = 0;
+ goto deinit_iod;
+ }
+
+ WRITE_ONCE(iod->state, SPRAID_CMD_IN_FLIGHT);
+ spraid_submit_cmd(ioq, &ioq_cmd);
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev,
+ "cid[%d] qid[%d] submit IO cost %3ld.%3ld seconds\n",
+ cid, hwq, elapsed / HZ, elapsed % HZ);
+ return 0;
+
+deinit_iod:
+ spraid_free_iod_res(hdev, iod);
+ return ret;
+}
+
+static int spraid_match_dev(struct spraid_dev *hdev, u16 idx,
+ struct scsi_device *sdev)
+{
+ if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[idx].flag)) {
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ dev_info(hdev->dev,
+ "Match device success, channel;"
+ "target:lun[%d:%d:%d]\n",
+ hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int spraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct spraid_sdev_hostdata *hostdata;
+ struct spraid_dev *hdev;
+ u16 idx;
+
+ hdev = shost_priv(sdev->host);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ dev_err(hdev->dev, "Alloc scsi host data memory failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&hdev->devices_rwsem);
+ for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+ if (spraid_match_dev(hdev, idx, sdev))
+ goto scan_host;
+ }
+ up_read(&hdev->devices_rwsem);
+
+ kfree(hostdata);
+ return -ENXIO;
+
+scan_host:
+ hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb);
+ hostdata->attr = hdev->devices[idx].attr;
+ hostdata->flag = hdev->devices[idx].flag;
+ hostdata->rg_id = 0xff;
+ sdev->hostdata = hostdata;
+ up_read(&hdev->devices_rwsem);
+ return 0;
+}
+
+static void spraid_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+static int spraid_slave_configure(struct scsi_device *sdev)
+{
+ u16 idx;
+ unsigned int timeout = scmd_tmout_nonpt * HZ;
+ struct spraid_dev *hdev = shost_priv(sdev->host);
+ struct spraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u32 max_sec = sdev->host->max_sectors;
+
+ if (hostdata) {
+ idx = hostdata->hdid - 1;
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ if (SPRAID_DEV_INFO_ATTR_PT(hdev->devices[idx].attr))
+ timeout = 30 * HZ;
+ else
+ timeout = scmd_tmout_nonpt * HZ;
+ max_sec = le16_to_cpu(hdev->devices[idx].max_io_kb)
+ << 1;
+ } else {
+ dev_err(hdev->dev,
+ "[%s] err, sdev->channel:id:lun[%d:%d:%lld];"
+ "devices[%d], channel:target:lun[%d:%d:%d]\n",
+ __func__, sdev->channel, sdev->id, sdev->lun,
+ idx, hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ }
+ } else {
+ dev_err(hdev->dev, "[%s] err, sdev->hostdata is null\n",
+ __func__);
+ }
+
+ blk_queue_rq_timeout(sdev->request_queue, timeout);
+ sdev->eh_timeout = timeout;
+
+ if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+ max_sec = sdev->host->max_sectors;
+
+ dev_info(hdev->dev,
+ "[%s] sdev->channel:id:lun[%d:%d:%lld];"
+ " scmd_timeout[%d]s, maxsec[%d]\n",
+ __func__, sdev->channel, sdev->id,
+ sdev->lun, timeout / HZ, max_sec);
+
+ return 0;
+}
+
+static void spraid_shost_init(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u8 domain, bus;
+ u32 dev_func;
+
+ domain = pci_domain_nr(pdev->bus);
+ bus = pdev->bus->number;
+ dev_func = pdev->devfn;
+
+ hdev->shost->nr_hw_queues = hdev->online_queues - 1;
+ hdev->shost->can_queue = (hdev->ioq_depth - SPRAID_PTCMDS_PERQ);
+
+ hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ /* 512B per sector */
+ hdev->shost->max_sectors =
+ (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+ hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+ hdev->shost->max_channel =
+ le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+ hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+ hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+ hdev->shost->this_id = -1;
+ hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+ hdev->shost->max_cmd_len = MAX_CDB_LEN;
+ hdev->shost->hostt->cmd_size = max(spraid_cmd_size(hdev, false, true),
+ spraid_cmd_size(hdev, false, false));
+}
+
+static inline void spraid_host_deinit(struct spraid_dev *hdev)
+{
+ ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static int spraid_alloc_queue(struct spraid_dev *hdev, u16 qid, u16 depth)
+{
+ struct spraid_queue *spraidq = &hdev->queues[qid];
+ int ret = 0;
+
+ if (hdev->queue_count > qid) {
+ dev_info(hdev->dev, "[%s] warn: queue[%d] is exist\n",
+ __func__, qid);
+ return 0;
+ }
+
+ spraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth),
+ &spraidq->cq_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!spraidq->cqes)
+ return -ENOMEM;
+
+ spraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ &spraidq->sq_dma_addr,
+ GFP_KERNEL);
+ if (!spraidq->sq_cmds) {
+ ret = -ENOMEM;
+ goto free_cqes;
+ }
+
+ spin_lock_init(&spraidq->sq_lock);
+ spin_lock_init(&spraidq->cq_lock);
+ spraidq->hdev = hdev;
+ spraidq->q_depth = depth;
+ spraidq->qid = qid;
+ spraidq->cq_vector = -1;
+ hdev->queue_count++;
+
+ /* alloc sense buffer */
+ spraidq->sense = dma_alloc_coherent(hdev->dev, SENSE_SIZE(depth),
+ &spraidq->sense_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!spraidq->sense) {
+ ret = -ENOMEM;
+ goto free_sq_cmds;
+ }
+
+ return 0;
+
+free_sq_cmds:
+ dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ (void *)spraidq->sq_cmds, spraidq->sq_dma_addr);
+free_cqes:
+ dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)spraidq->cqes,
+ spraidq->cq_dma_addr);
+ return ret;
+}
+
+static int spraid_wait_ready(struct spraid_dev *hdev, u64 cap, bool enabled)
+{
+ unsigned long timeout =
+ ((SPRAID_CAP_TIMEOUT(cap) + 1) * SPRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+ u32 bit = enabled ? SPRAID_CSTS_RDY : 0;
+
+ while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY) != bit) {
+ usleep_range(1000, 2000);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev, "Device not ready; aborting %s\n",
+ enabled ? "initialisation" : "reset");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int spraid_shutdown_ctrl(struct spraid_dev *hdev)
+{
+ unsigned long timeout = hdev->ctrl_info->rtd3e + jiffies;
+
+ hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+ hdev->ctrl_config |= SPRAID_CC_SHN_NORMAL;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_SHST_MASK) !=
+ SPRAID_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev,
+ "Device shutdown incomplete; abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int spraid_disable_ctrl(struct spraid_dev *hdev)
+{
+ hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+ hdev->ctrl_config &= ~SPRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ return spraid_wait_ready(hdev, hdev->cap, false);
+}
+
+static int spraid_enable_ctrl(struct spraid_dev *hdev)
+{
+ u64 cap = hdev->cap;
+ u32 dev_page_min = SPRAID_CAP_MPSMIN(cap) + 12;
+ u32 page_shift = PAGE_SHIFT;
+
+ if (page_shift < dev_page_min) {
+ dev_err(hdev->dev,
+ "Minimum device page size[%u], too large for host[%u]\n",
+ 1U << dev_page_min, 1U << page_shift);
+ return -ENODEV;
+ }
+
+ page_shift = min_t(unsigned int, SPRAID_CAP_MPSMAX(cap) + 12,
+ PAGE_SHIFT);
+ hdev->page_size = 1U << page_shift;
+
+ hdev->ctrl_config = SPRAID_CC_CSS_NVM;
+ hdev->ctrl_config |= (page_shift - 12) << SPRAID_CC_MPS_SHIFT;
+ hdev->ctrl_config |= SPRAID_CC_AMS_RR | SPRAID_CC_SHN_NONE;
+ hdev->ctrl_config |= SPRAID_CC_IOSQES | SPRAID_CC_IOCQES;
+ hdev->ctrl_config |= SPRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ return spraid_wait_ready(hdev, cap, true);
+}
+
+static void spraid_init_queue(struct spraid_queue *spraidq, u16 qid)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ memset((void *)spraidq->cqes, 0, CQ_SIZE(spraidq->q_depth));
+
+ spraidq->sq_tail = 0;
+ spraidq->cq_head = 0;
+ spraidq->cq_phase = 1;
+ spraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride];
+ spraidq->prp_small_pool = hdev->prp_small_pool[qid % small_pool_num];
+ hdev->online_queues++;
+}
+
+static inline bool spraid_cqe_pending(struct spraid_queue *spraidq)
+{
+ return (le16_to_cpu(spraidq->cqes[spraidq->cq_head].status) & 1) ==
+ spraidq->cq_phase;
+}
+
+static void spraid_sata_report_zone_handle(struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ int i = 0;
+ unsigned int bytes = 0;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ scsi_for_each_sg(scmd, sg, iod->nsge, i) {
+ unsigned int offset = 0;
+
+ if (bytes == 0) {
+ char *hdr;
+ u32 list_length;
+ u64 max_lba, opt_lba;
+ u16 same;
+
+ hdr = sg_virt(sg);
+
+ list_length = get_unaligned_le32(&hdr[0]);
+ same = get_unaligned_le16(&hdr[4]);
+ max_lba = get_unaligned_le64(&hdr[8]);
+ opt_lba = get_unaligned_le64(&hdr[16]);
+ put_unaligned_be32(list_length, &hdr[0]);
+ hdr[4] = same & 0xf;
+ put_unaligned_be64(max_lba, &hdr[8]);
+ put_unaligned_be64(opt_lba, &hdr[16]);
+ offset += 64;
+ bytes += 64;
+ }
+ while (offset < sg_dma_len(sg)) {
+ char *rec;
+ u8 cond, type, non_seq, reset;
+ u64 size, start, wp;
+
+ rec = sg_virt(sg) + offset;
+ type = rec[0] & 0xf;
+ cond = (rec[1] >> 4) & 0xf;
+ non_seq = (rec[1] & 2);
+ reset = (rec[1] & 1);
+ size = get_unaligned_le64(&rec[8]);
+ start = get_unaligned_le64(&rec[16]);
+ wp = get_unaligned_le64(&rec[24]);
+ rec[0] = type;
+ rec[1] = (cond << 4) | non_seq | reset;
+ put_unaligned_be64(size, &rec[8]);
+ put_unaligned_be64(start, &rec[16]);
+ put_unaligned_be64(wp, &rec[24]);
+ WARN_ON(offset + 64 > sg_dma_len(sg));
+ offset += 64;
+ bytes += 64;
+ }
+ }
+}
+
+static inline void spraid_handle_ata_cmd(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ if (hdev->ctrl_info->card_type != SPRAID_CARD_HBA)
+ return;
+
+ switch (scmd->cmnd[0]) {
+ case ZBC_IN:
+ dev_info(hdev->dev, "[%s] process report zone\n", __func__);
+ spraid_sata_report_zone_handle(scmd, iod);
+ break;
+ default:
+ break;
+ }
+}
+
+static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct spraid_iod *iod;
+ struct request *req;
+ unsigned long elapsed;
+
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+ if (unlikely(!req || !blk_mq_request_started(req))) {
+ dev_warn(hdev->dev, "Invalid id %d completed on queue %d\n",
+ cqe->cmd_id, ioq->qid);
+ return;
+ }
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev,
+ "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
+ cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+
+ if (cmpxchg(&iod->state, SPRAID_CMD_IN_FLIGHT, SPRAID_CMD_COMPLETE) !=
+ SPRAID_CMD_IN_FLIGHT) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d] enters abnormal handler;"
+ " cost %3ld.%3ld seconds\n",
+ cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+ WRITE_ONCE(iod->state, SPRAID_CMD_TMO_COMPLETE);
+
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ spraid_free_iod_res(hdev, iod);
+
+ return;
+ }
+
+ spraid_handle_ata_cmd(hdev, scmd, iod);
+
+ spraid_map_status(iod, scmd, cqe);
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ spraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+}
+
+static void spraid_complete_adminq_cmnd(struct spraid_queue *adminq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = adminq->hdev;
+ struct spraid_cmd *adm_cmd;
+
+ adm_cmd = hdev->adm_cmds + cqe->cmd_id;
+ if (unlikely(adm_cmd->state == SPRAID_CMD_IDLE)) {
+ dev_warn(adminq->hdev->dev,
+ "Invalid id %d completed on queue %d\n",
+ cqe->cmd_id, le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid);
+
+static void spraid_complete_aen(struct spraid_queue *spraidq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+ u32 result = le32_to_cpu(cqe->result);
+
+ dev_info(hdev->dev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n",
+ cqe->cmd_id, le16_to_cpu(cqe->status) >> 1, result);
+
+ spraid_send_aen(hdev, cqe->cmd_id);
+
+ if ((le16_to_cpu(cqe->status) >> 1) != SPRAID_SC_SUCCESS)
+ return;
+ switch (result & 0x7) {
+ case SPRAID_AEN_NOTICE:
+ spraid_handle_aen_notice(hdev, result);
+ break;
+ case SPRAID_AEN_VS:
+ spraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1));
+ break;
+ default:
+ dev_warn(hdev->dev, "Unsupported async event type: %u\n",
+ result & 0x7);
+ break;
+ }
+}
+
+static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct spraid_cmd *ptcmd;
+
+ ptcmd = hdev->ioq_ptcmds + (ioq->qid - 1) * SPRAID_PTCMDS_PERQ +
+ cqe->cmd_id - SPRAID_IO_BLK_MQ_DEPTH;
+
+ ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+ ptcmd->result0 = le32_to_cpu(cqe->result);
+ ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&ptcmd->cmd_done);
+}
+
+static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx)
+{
+ struct spraid_completion *cqe = &spraidq->cqes[idx];
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ if (unlikely(cqe->cmd_id >= spraidq->q_depth)) {
+ dev_err(hdev->dev,
+ "Invalid command id[%d] completed on queue %d\n",
+ cqe->cmd_id, cqe->sq_id);
+ return;
+ }
+
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d];"
+ " result[0x%x], sq_id[%d], status[0x%x]\n",
+ cqe->cmd_id, spraidq->qid, le32_to_cpu(cqe->result),
+ le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+ if (unlikely(spraidq->qid == 0
+ && cqe->cmd_id >= SPRAID_AQ_BLK_MQ_DEPTH)) {
+ spraid_complete_aen(spraidq, cqe);
+ return;
+ }
+
+ if (unlikely(spraidq->qid && cqe->cmd_id >= SPRAID_IO_BLK_MQ_DEPTH)) {
+ spraid_complete_ioq_sync_cmnd(spraidq, cqe);
+ return;
+ }
+
+ if (spraidq->qid)
+ spraid_complete_ioq_cmnd(spraidq, cqe);
+ else
+ spraid_complete_adminq_cmnd(spraidq, cqe);
+}
+
+static void spraid_complete_cqes(struct spraid_queue *spraidq,
+ u16 start, u16 end)
+{
+ while (start != end) {
+ spraid_handle_cqe(spraidq, start);
+ if (++start == spraidq->q_depth)
+ start = 0;
+ }
+}
+
+static inline void spraid_update_cq_head(struct spraid_queue *spraidq)
+{
+ if (++spraidq->cq_head == spraidq->q_depth) {
+ spraidq->cq_head = 0;
+ spraidq->cq_phase = !spraidq->cq_phase;
+ }
+}
+
+static inline bool spraid_process_cq(struct spraid_queue *spraidq,
+ u16 *start, u16 *end, int tag)
+{
+ bool found = false;
+
+ *start = spraidq->cq_head;
+ while (!found && spraid_cqe_pending(spraidq)) {
+ if (spraidq->cqes[spraidq->cq_head].cmd_id == tag)
+ found = true;
+ spraid_update_cq_head(spraidq);
+ }
+ *end = spraidq->cq_head;
+
+ if (*start != *end)
+ writel(spraidq->cq_head,
+ spraidq->q_db + spraidq->hdev->db_stride);
+
+ return found;
+}
+
+static bool spraid_poll_cq(struct spraid_queue *spraidq, int cid)
+{
+ u16 start, end;
+ bool found;
+
+ if (!spraid_cqe_pending(spraidq))
+ return 0;
+
+ spin_lock_irq(&spraidq->cq_lock);
+ found = spraid_process_cq(spraidq, &start, &end, cid);
+ spin_unlock_irq(&spraidq->cq_lock);
+
+ spraid_complete_cqes(spraidq, start, end);
+ return found;
+}
+
+static irqreturn_t spraid_irq(int irq, void *data)
+{
+ struct spraid_queue *spraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+
+ spin_lock(&spraidq->cq_lock);
+ if (spraidq->cq_head != spraidq->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ spraid_process_cq(spraidq, &start, &end, -1);
+ spraidq->last_cq_head = spraidq->cq_head;
+ spin_unlock(&spraidq->cq_lock);
+
+ if (start != end) {
+ spraid_complete_cqes(spraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static int spraid_setup_admin_queue(struct spraid_dev *hdev)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u32 aqa;
+ int ret;
+
+ dev_info(hdev->dev, "[%s] start disable ctrl\n", __func__);
+
+ ret = spraid_disable_ctrl(hdev);
+ if (ret)
+ return ret;
+
+ ret = spraid_alloc_queue(hdev, 0, SPRAID_AQ_DEPTH);
+ if (ret)
+ return ret;
+
+ aqa = adminq->q_depth - 1;
+ aqa |= aqa << 16;
+ writel(aqa, hdev->bar + SPRAID_REG_AQA);
+ lo_hi_writeq(adminq->sq_dma_addr, hdev->bar + SPRAID_REG_ASQ);
+ lo_hi_writeq(adminq->cq_dma_addr, hdev->bar + SPRAID_REG_ACQ);
+
+ dev_info(hdev->dev, "[%s] start enable ctrl\n", __func__);
+
+ ret = spraid_enable_ctrl(hdev);
+ if (ret) {
+ ret = -ENODEV;
+ goto free_queue;
+ }
+
+ adminq->cq_vector = 0;
+ spraid_init_queue(adminq, 0);
+ ret = pci_request_irq(hdev->pdev, adminq->cq_vector, spraid_irq, NULL,
+ adminq, "spraid%d_q%d",
+ hdev->instance, adminq->qid);
+
+ if (ret) {
+ adminq->cq_vector = -1;
+ hdev->online_queues--;
+ goto free_queue;
+ }
+
+ dev_info(hdev->dev, "[%s] success, queuecount:[%d], onlinequeue:[%d]\n",
+ __func__, hdev->queue_count, hdev->online_queues);
+
+ return 0;
+
+free_queue:
+ spraid_free_queue(adminq);
+ return ret;
+}
+
+static u32 spraid_bar_size(struct spraid_dev *hdev, u32 nr_ioqs)
+{
+ return (SPRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
+}
+
+static int spraid_alloc_admin_cmds(struct spraid_dev *hdev)
+{
+ int i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(SPRAID_AQ_BLK_MQ_DEPTH,
+ sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ dev_err(hdev->dev, "Alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SPRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+ }
+
+ dev_info(hdev->dev, "Alloc admin cmds success, num[%d]\n",
+ SPRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
+}
+
+static void spraid_free_admin_cmds(struct spraid_dev *hdev)
+{
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct spraid_cmd *spraid_get_cmd(struct spraid_dev *hdev,
+ enum spraid_cmd_type type)
+{
+ struct spraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct spraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
+
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IN_FLIGHT);
+
+ return cmd;
+}
+
+static void spraid_put_cmd(struct spraid_dev *hdev, struct spraid_cmd *cmd,
+ enum spraid_cmd_type type)
+{
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+
+static int spraid_submit_admin_sync_cmd(struct spraid_dev *hdev,
+ struct spraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct spraid_cmd *adm_cmd = spraid_get_cmd(hdev, SPRAID_CMD_ADM);
+
+ if (!adm_cmd) {
+ dev_err(hdev->dev, "err, get admin cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&adm_cmd->cmd_done);
+
+ cmd->common.command_id = adm_cmd->cid;
+ spraid_submit_cmd(&hdev->queues[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+ " opcode[0x%x] subopcode[0x%x]\n",
+ __func__, adm_cmd->cid, adm_cmd->qid,
+ cmd->usr_cmd.opcode, cmd->usr_cmd.info_0.subopcode);
+ WRITE_ONCE(adm_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+ return -EINVAL;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+
+ return adm_cmd->status;
+}
+
+static int spraid_create_cq(struct spraid_dev *hdev, u16 qid,
+ struct spraid_queue *spraidq, u16 cq_vector)
+{
+ struct spraid_admin_command admin_cmd;
+ int flags = SPRAID_QUEUE_PHYS_CONTIG | SPRAID_CQ_IRQ_ENABLED;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_cq.opcode = SPRAID_ADMIN_CREATE_CQ;
+ admin_cmd.create_cq.prp1 = cpu_to_le64(spraidq->cq_dma_addr);
+ admin_cmd.create_cq.cqid = cpu_to_le16(qid);
+ admin_cmd.create_cq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+ admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+ admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int spraid_create_sq(struct spraid_dev *hdev, u16 qid,
+ struct spraid_queue *spraidq)
+{
+ struct spraid_admin_command admin_cmd;
+ int flags = SPRAID_QUEUE_PHYS_CONTIG;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_sq.opcode = SPRAID_ADMIN_CREATE_SQ;
+ admin_cmd.create_sq.prp1 = cpu_to_le64(spraidq->sq_dma_addr);
+ admin_cmd.create_sq.sqid = cpu_to_le16(qid);
+ admin_cmd.create_sq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+ admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+ admin_cmd.create_sq.cqid = cpu_to_le16(qid);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static void spraid_free_queue(struct spraid_queue *spraidq)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ hdev->queue_count--;
+ dma_free_coherent(hdev->dev, CQ_SIZE(spraidq->q_depth),
+ (void *)spraidq->cqes, spraidq->cq_dma_addr);
+ dma_free_coherent(hdev->dev, SQ_SIZE(spraidq->qid, spraidq->q_depth),
+ spraidq->sq_cmds, spraidq->sq_dma_addr);
+ dma_free_coherent(hdev->dev, SENSE_SIZE(spraidq->q_depth),
+ spraidq->sense, spraidq->sense_dma_addr);
+}
+
+static void spraid_free_admin_queue(struct spraid_dev *hdev)
+{
+ spraid_free_queue(&hdev->queues[0]);
+}
+
+static void spraid_free_io_queues(struct spraid_dev *hdev)
+{
+ int i;
+
+ for (i = hdev->queue_count - 1; i >= 1; i--)
+ spraid_free_queue(&hdev->queues[i]);
+}
+
+static int spraid_delete_queue(struct spraid_dev *hdev, u8 op, u16 id)
+{
+ struct spraid_admin_command admin_cmd;
+ int ret;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.delete_queue.opcode = op;
+ admin_cmd.delete_queue.qid = cpu_to_le16(id);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret)
+ dev_err(hdev->dev, "Delete %s:[%d] failed\n",
+ (op == SPRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", id);
+
+ return ret;
+}
+
+static int spraid_delete_cq(struct spraid_dev *hdev, u16 cqid)
+{
+ return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_CQ, cqid);
+}
+
+static int spraid_delete_sq(struct spraid_dev *hdev, u16 sqid)
+{
+ return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_SQ, sqid);
+}
+
+static int spraid_create_queue(struct spraid_queue *spraidq, u16 qid)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+ u16 cq_vector;
+ int ret;
+
+ cq_vector = (hdev->num_vecs == 1) ? 0 : qid;
+ ret = spraid_create_cq(hdev, qid, spraidq, cq_vector);
+ if (ret)
+ return ret;
+
+ ret = spraid_create_sq(hdev, qid, spraidq);
+ if (ret)
+ goto delete_cq;
+
+ spraid_init_queue(spraidq, qid);
+ spraidq->cq_vector = cq_vector;
+
+ ret = pci_request_irq(hdev->pdev, cq_vector, spraid_irq, NULL,
+ spraidq, "spraid%d_q%d", hdev->instance, qid);
+
+ if (ret) {
+ dev_err(hdev->dev, "Request queue[%d] irq failed\n", qid);
+ goto delete_sq;
+ }
+
+ return 0;
+
+delete_sq:
+ spraidq->cq_vector = -1;
+ hdev->online_queues--;
+ spraid_delete_sq(hdev, qid);
+delete_cq:
+ spraid_delete_cq(hdev, qid);
+
+ return ret;
+}
+
+static int spraid_create_io_queues(struct spraid_dev *hdev)
+{
+ u32 i, max;
+ int ret = 0;
+
+ max = min(hdev->max_qid, hdev->queue_count - 1);
+ for (i = hdev->online_queues; i <= max; i++) {
+ ret = spraid_create_queue(&hdev->queues[i], i);
+ if (ret) {
+ dev_err(hdev->dev, "Create queue[%d] failed\n", i);
+ break;
+ }
+ }
+
+ dev_info(hdev->dev, "[%s] queue_count[%d], online_queue[%d]",
+ __func__, hdev->queue_count, hdev->online_queues);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+static int spraid_set_features(struct spraid_dev *hdev, u32 fid,
+ u32 dword11, void *buffer,
+ size_t buflen, u32 *result)
+{
+ struct spraid_admin_command admin_cmd;
+ int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(hdev->dev, buflen,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.features.opcode = SPRAID_ADMIN_SET_FEATURES;
+ admin_cmd.features.fid = cpu_to_le32(fid);
+ admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0);
+
+ if (data_ptr)
+ dma_free_coherent(hdev->dev, buflen, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_configure_timestamp(struct spraid_dev *hdev)
+{
+ __le64 ts;
+ int ret;
+
+ ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+ ret = spraid_set_features(hdev, SPRAID_FEAT_TIMESTAMP,
+ 0, &ts, sizeof(ts), NULL);
+
+ if (ret)
+ dev_err(hdev->dev, "set timestamp failed: %d\n", ret);
+ return ret;
+}
+
+static int spraid_set_queue_cnt(struct spraid_dev *hdev, u32 *cnt)
+{
+ u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+ u32 nr_ioqs, result;
+ int status;
+
+ status = spraid_set_features(hdev, SPRAID_FEAT_NUM_QUEUES,
+ q_cnt, NULL, 0, &result);
+ if (status) {
+ dev_err(hdev->dev, "Set queue count failed, status: %d\n",
+ status);
+ return -EIO;
+ }
+
+ nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+ *cnt = min(*cnt, nr_ioqs);
+ if (*cnt == 0) {
+ dev_err(hdev->dev, "Illegal queue count: zero\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int spraid_setup_io_queues(struct spraid_dev *hdev)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ struct pci_dev *pdev = hdev->pdev;
+ u32 nr_ioqs = num_online_cpus();
+ u32 i, size;
+ int ret;
+
+ struct irq_affinity affd = {
+ .pre_vectors = 1
+ };
+
+ ret = spraid_set_queue_cnt(hdev, &nr_ioqs);
+ if (ret < 0)
+ return ret;
+
+ size = spraid_bar_size(hdev, nr_ioqs);
+ ret = spraid_remap_bar(hdev, size);
+ if (ret)
+ return -ENOMEM;
+
+ adminq->q_db = hdev->dbs;
+
+ pci_free_irq(pdev, 0, adminq);
+ pci_free_irq_vectors(pdev);
+
+ ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1),
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (ret <= 0)
+ return -EIO;
+
+ hdev->num_vecs = ret;
+
+ hdev->max_qid = max(ret - 1, 1);
+
+ ret = pci_request_irq(pdev, adminq->cq_vector, spraid_irq, NULL,
+ adminq, "spraid%d_q%d",
+ hdev->instance, adminq->qid);
+ if (ret) {
+ dev_err(hdev->dev, "Request admin irq failed\n");
+ adminq->cq_vector = -1;
+ return ret;
+ }
+
+ for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
+ ret = spraid_alloc_queue(hdev, i, hdev->ioq_depth);
+ if (ret)
+ break;
+ }
+ dev_info(hdev->dev, "[%s] max_qid: %d, queue_count: %d;"
+ " online_queue: %d, ioq_depth: %d\n",
+ __func__, hdev->max_qid, hdev->queue_count,
+ hdev->online_queues, hdev->ioq_depth);
+
+ return spraid_create_io_queues(hdev);
+}
+
+static void spraid_delete_io_queues(struct spraid_dev *hdev)
+{
+ u16 queues = hdev->online_queues - 1;
+ u8 opcode = SPRAID_ADMIN_DELETE_SQ;
+ u16 i, pass;
+
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_err(hdev->dev,
+ "pci_device is not present, skip disable io queues\n");
+ return;
+ }
+
+ if (hdev->online_queues < 2) {
+ dev_err(hdev->dev, "[%s] err, io queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ for (i = queues; i > 0; i--)
+ if (spraid_delete_queue(hdev, opcode, i))
+ break;
+
+ opcode = SPRAID_ADMIN_DELETE_CQ;
+ }
+}
+
+static void spraid_remove_io_queues(struct spraid_dev *hdev)
+{
+ spraid_delete_io_queues(hdev);
+ spraid_free_io_queues(hdev);
+}
+
+static void spraid_pci_disable(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u32 i;
+
+ for (i = 0; i < hdev->online_queues; i++)
+ pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]);
+ pci_free_irq_vectors(pdev);
+ if (pci_is_enabled(pdev)) {
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
+ hdev->online_queues = 0;
+}
+
+static void spraid_disable_admin_queue(struct spraid_dev *hdev, bool shutdown)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ spraid_shutdown_ctrl(hdev);
+ else
+ spraid_disable_ctrl(hdev);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "[%s] err, admin queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ spraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+
+ spraid_complete_cqes(adminq, start, end);
+ spraid_free_admin_queue(hdev);
+}
+
+static int spraid_create_dma_pools(struct spraid_dev *hdev)
+{
+ int i;
+ char poolname[20] = { 0 };
+
+ hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+
+ if (!hdev->prp_page_pool) {
+ dev_err(hdev->dev, "create prp_page_pool failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < small_pool_num; i++) {
+ sprintf(poolname, "prp_list_256_%d", i);
+ hdev->prp_small_pool[i] =
+ dma_pool_create(poolname, hdev->dev, SMALL_POOL_SIZE,
+ SMALL_POOL_SIZE, 0);
+
+ if (!hdev->prp_small_pool[i]) {
+ dev_err(hdev->dev, "create prp_small_pool %d failed\n",
+ i);
+ goto destroy_prp_small_pool;
+ }
+ }
+
+ return 0;
+
+destroy_prp_small_pool:
+ while (i > 0)
+ dma_pool_destroy(hdev->prp_small_pool[--i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+
+ return -ENOMEM;
+}
+
+static void spraid_destroy_dma_pools(struct spraid_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < small_pool_num; i++)
+ dma_pool_destroy(hdev->prp_small_pool[i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int spraid_get_dev_list(struct spraid_dev *hdev,
+ struct spraid_dev_info *devices)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ struct spraid_admin_command admin_cmd;
+ struct spraid_dev_list *list_buf;
+ dma_addr_t data_dma = 0;
+ u32 i, idx, hdid, ndev;
+ int ret = 0;
+
+ list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!list_buf)
+ return -ENOMEM;
+
+ for (idx = 0; idx < nd;) {
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SPRAID_GET_INFO_DEV_LIST;
+ admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd,
+ NULL, NULL, 0);
+
+ if (ret) {
+ dev_err(hdev->dev, "Get device list failed, nd: %u;"
+ "idx: %u, ret: %d\n",
+ nd, idx, ret);
+ goto out;
+ }
+ ndev = le32_to_cpu(list_buf->dev_num);
+
+ dev_info(hdev->dev, "ndev numbers: %u\n", ndev);
+
+ for (i = 0; i < ndev; i++) {
+ hdid = le32_to_cpu(list_buf->devices[i].hdid);
+ dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u;"
+ "target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ i, hdid,
+ le16_to_cpu(list_buf->devices[i].target),
+ list_buf->devices[i].channel,
+ list_buf->devices[i].lun,
+ list_buf->devices[i].attr);
+ if (hdid > nd || hdid == 0) {
+ dev_err(hdev->dev, "err, hdid[%d] invalid\n",
+ hdid);
+ continue;
+ }
+ memcpy(&devices[hdid - 1], &list_buf->devices[i],
+ sizeof(struct spraid_dev_info));
+ }
+ idx += ndev;
+
+ if (idx < MAX_DEV_ENTRY_PER_PAGE_4K)
+ break;
+ }
+
+out:
+ dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, data_dma);
+ return ret;
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = SPRAID_ADMIN_ASYNC_EVENT;
+ admin_cmd.common.command_id = cid;
+
+ spraid_submit_cmd(adminq, &admin_cmd);
+ dev_info(hdev->dev, "send aen, cid[%d]\n", cid);
+}
+
+static inline void spraid_send_all_aen(struct spraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->aerl; i++)
+ spraid_send_aen(hdev, i + SPRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int spraid_add_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "add device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel,
+ le16_to_cpu(device->target), 0);
+ if (sdev) {
+ dev_warn(hdev->dev, "Device is already exist, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ scsi_device_put(sdev);
+ return -EEXIST;
+ }
+ scsi_add_device(shost, device->channel, le16_to_cpu(device->target), 0);
+ return 0;
+}
+
+static int spraid_rescan_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "rescan device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel,
+ le16_to_cpu(device->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit rescan it, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int spraid_remove_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *org_device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "remove device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target),
+ org_device->channel, org_device->lun, org_device->attr);
+
+ sdev = scsi_device_lookup(shost, org_device->channel,
+ le16_to_cpu(org_device->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit remove it, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ org_device->channel,
+ le16_to_cpu(org_device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int spraid_dev_list_init(struct spraid_dev *hdev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ int i, ret;
+
+ hdev->devices = kzalloc_node(nd * sizeof(struct spraid_dev_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->devices)
+ return -ENOMEM;
+
+ ret = spraid_get_dev_list(hdev, hdev->devices);
+ if (ret) {
+ dev_err(hdev->dev,
+ "Ignore failure of getting device list;"
+ " within initialization\n");
+ return 0;
+ }
+
+ for (i = 0; i < nd; i++) {
+ if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[i].flag) &&
+ SPRAID_DEV_INFO_ATTR_BOOT(hdev->devices[i].attr)) {
+ spraid_add_device(hdev, &hdev->devices[i]);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int luntarget_cmp_func(const void *l, const void *r)
+{
+ const struct spraid_dev_info *ln = l;
+ const struct spraid_dev_info *rn = r;
+
+ if (ln->channel == rn->channel)
+ return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+ return ln->channel - rn->channel;
+}
+
+static void spraid_scan_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, scan_work);
+ struct spraid_dev_info *devices, *org_devices;
+ struct spraid_dev_info *sortdevice;
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ u8 flag, org_flag;
+ int i, ret;
+ int count = 0;
+
+ devices = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!devices)
+ return;
+
+ sortdevice = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!sortdevice)
+ goto free_list;
+
+ ret = spraid_get_dev_list(hdev, devices);
+ if (ret)
+ goto free_all;
+ org_devices = hdev->devices;
+ for (i = 0; i < nd; i++) {
+ org_flag = org_devices[i].flag;
+ flag = devices[i].flag;
+
+ dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n",
+ i, org_flag, flag);
+
+ if (SPRAID_DEV_INFO_FLAG_VALID(flag)) {
+ if (!SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ memcpy(&org_devices[i], &devices[i],
+ sizeof(struct spraid_dev_info));
+ memcpy(&sortdevice[count++], &devices[i],
+ sizeof(struct spraid_dev_info));
+ up_write(&hdev->devices_rwsem);
+ } else if (SPRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+ spraid_rescan_device(hdev, &devices[i]);
+ }
+ } else {
+ if (SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ org_devices[i].flag &= 0xfe;
+ up_write(&hdev->devices_rwsem);
+ spraid_remove_device(hdev, &org_devices[i]);
+ }
+ }
+ }
+
+ dev_info(hdev->dev, "scan work add device count = %d\n", count);
+
+ sort(sortdevice, count, sizeof(sortdevice[0]),
+ luntarget_cmp_func, NULL);
+
+ for (i = 0; i < count; i++)
+ spraid_add_device(hdev, &sortdevice[i]);
+
+free_all:
+ kfree(sortdevice);
+free_list:
+ kfree(devices);
+}
+
+static void spraid_timesyn_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, timesyn_work);
+
+ spraid_configure_timestamp(hdev);
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev);
+static void spraid_fw_act_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, fw_act_work);
+
+ if (spraid_init_ctrl_info(hdev))
+ dev_err(hdev->dev, "get ctrl info failed after fw act\n");
+}
+
+static void spraid_queue_scan(struct spraid_dev *hdev)
+{
+ queue_work(spraid_wq, &hdev->scan_work);
+}
+
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SPRAID_AEN_DEV_CHANGED:
+ spraid_queue_scan(hdev);
+ break;
+ case SPRAID_AEN_FW_ACT_START:
+ dev_info(hdev->dev, "fw activation starting\n");
+ break;
+ case SPRAID_AEN_HOST_PROBING:
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result %08x\n", result);
+ }
+}
+
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+ u32 result, u32 result1)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SPRAID_AEN_TIMESYN:
+ queue_work(spraid_wq, &hdev->timesyn_work);
+ break;
+ case SPRAID_AEN_FW_ACT_FINISH:
+ dev_info(hdev->dev, "fw activation finish\n");
+ queue_work(spraid_wq, &hdev->fw_act_work);
+ break;
+ case SPRAID_AEN_EVENT_MIN ... SPRAID_AEN_EVENT_MAX:
+ dev_info(hdev->dev, "rcv card event[%d];"
+ " param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result: 0x%x\n", result);
+ }
+}
+
+static int spraid_alloc_resources(struct spraid_dev *hdev)
+{
+ int ret, nqueue;
+
+ ret = ida_alloc(&spraid_instance_ida, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(hdev->dev, "Get instance id failed\n");
+ return ret;
+ }
+ hdev->instance = ret;
+
+ hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->ctrl_info) {
+ ret = -ENOMEM;
+ goto release_instance;
+ }
+
+ ret = spraid_create_dma_pools(hdev);
+ if (ret)
+ goto free_ctrl_info;
+ nqueue = num_possible_cpus() + 1;
+ hdev->queues = kcalloc_node(nqueue, sizeof(struct spraid_queue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->queues) {
+ ret = -ENOMEM;
+ goto destroy_dma_pools;
+ }
+
+ ret = spraid_alloc_admin_cmds(hdev);
+ if (ret)
+ goto free_queues;
+
+ dev_info(hdev->dev, "[%s] queues num: %d\n", __func__, nqueue);
+
+ return 0;
+
+free_queues:
+ kfree(hdev->queues);
+destroy_dma_pools:
+ spraid_destroy_dma_pools(hdev);
+free_ctrl_info:
+ kfree(hdev->ctrl_info);
+release_instance:
+ ida_free(&spraid_instance_ida, hdev->instance);
+ return ret;
+}
+
+static void spraid_free_resources(struct spraid_dev *hdev)
+{
+ spraid_free_admin_cmds(hdev);
+ kfree(hdev->queues);
+ spraid_destroy_dma_pools(hdev);
+ kfree(hdev->ctrl_info);
+ ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static void spraid_bsg_unmap_data(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir =
+ rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (iod->nsge)
+ dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+
+ spraid_free_iod_res(hdev, iod);
+}
+
+static int spraid_bsg_map_data(struct spraid_dev *hdev, struct bsg_job *job,
+ struct spraid_admin_command *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir =
+ rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ iod->sg = job->request_payload.sg_list;
+ iod->nsge = job->request_payload.sg_cnt;
+ iod->length = job->request_payload.payload_len;
+ iod->use_sgl = false;
+ iod->npages = -1;
+ iod->sg_drv_mgmt = false;
+
+ if (!iod->nsge)
+ goto out;
+
+ ret = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge,
+ dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
+ goto out;
+
+ ret = spraid_setup_prps(hdev, iod);
+ if (ret)
+ goto unmap;
+
+ cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+
+ return 0;
+
+unmap:
+ dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+out:
+ return ret;
+}
+
+static int spraid_get_ctrl_info(struct spraid_dev *hdev,
+ struct spraid_ctrl_info *ctrl_info)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SPRAID_GET_INFO_CTRL;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(ctrl_info, data_ptr, sizeof(struct spraid_ctrl_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev)
+{
+ int ret;
+
+ hdev->ctrl_info->nd = cpu_to_le32(240);
+ hdev->ctrl_info->mdts = 8;
+ hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+ hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+ hdev->ctrl_info->max_channel = cpu_to_le16(4);
+ hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+ hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+ ret = spraid_get_ctrl_info(hdev, hdev->ctrl_info);
+ if (ret)
+ dev_err(hdev->dev, "get controller info failed: %d\n", ret);
+
+ dev_info(hdev->dev, "[%s]nd = %d\n", __func__, hdev->ctrl_info->nd);
+ dev_info(hdev->dev, "[%s]max_cmd = %d\n",
+ __func__, hdev->ctrl_info->max_cmds);
+ dev_info(hdev->dev, "[%s]max_channel = %d\n",
+ __func__, hdev->ctrl_info->max_channel);
+ dev_info(hdev->dev, "[%s]max_tgt_id = %d\n",
+ __func__, hdev->ctrl_info->max_tgt_id);
+ dev_info(hdev->dev, "[%s]max_lun = %d\n",
+ __func__, hdev->ctrl_info->max_lun);
+ dev_info(hdev->dev, "[%s]max_num_sge = %d\n",
+ __func__, hdev->ctrl_info->max_num_sge);
+ dev_info(hdev->dev, "[%s]lun_num_boot = %d\n",
+ __func__, hdev->ctrl_info->lun_num_in_boot);
+ dev_info(hdev->dev, "[%s]mdts = %d\n", __func__, hdev->ctrl_info->mdts);
+ dev_info(hdev->dev, "[%s]acl = %d\n", __func__, hdev->ctrl_info->acl);
+ dev_info(hdev->dev, "[%s]aer1 = %d\n", __func__, hdev->ctrl_info->aerl);
+ dev_info(hdev->dev, "[%s]card_type = %d\n",
+ __func__, hdev->ctrl_info->card_type);
+ dev_info(hdev->dev, "[%s]rtd3e = %d\n",
+ __func__, hdev->ctrl_info->rtd3e);
+ dev_info(hdev->dev, "[%s]sn = %s\n", __func__, hdev->ctrl_info->sn);
+ dev_info(hdev->dev, "[%s]fr = %s\n", __func__, hdev->ctrl_info->fr);
+
+ if (!hdev->ctrl_info->aerl)
+ hdev->ctrl_info->aerl = 1;
+ if (hdev->ctrl_info->aerl > SPRAID_NR_AEN_COMMANDS)
+ hdev->ctrl_info->aerl = SPRAID_NR_AEN_COMMANDS;
+
+ return 0;
+}
+
+#define SPRAID_MAX_ADMIN_PAYLOAD_SIZE BIT(16)
+static int spraid_alloc_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+ u16 max_sge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ size_t alloc_size;
+
+ alloc_size = spraid_iod_ext_size(hdev, SPRAID_MAX_ADMIN_PAYLOAD_SIZE,
+ max_sge, true, false);
+ if (alloc_size > PAGE_SIZE)
+ dev_warn(hdev->dev, "It is unreasonable ;"
+ " sg allocation more than one page\n");
+ hdev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+ mempool_kfree,
+ (void *)alloc_size, GFP_KERNEL,
+ hdev->numa_node);
+ if (!hdev->iod_mempool) {
+ dev_err(hdev->dev, "Create iod extension memory pool failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void spraid_free_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+ mempool_destroy(hdev->iod_mempool);
+}
+
+static int spraid_user_admin_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct spraid_bsg_request *bsg_req = job->request;
+ struct spraid_passthru_common_cmd *cmd = &(bsg_req->admcmd);
+ struct spraid_admin_command admin_cmd;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ u32 result[2] = {0};
+ int status;
+
+ if (hdev->state >= SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not right\n",
+ __func__, hdev->state);
+ return -EBUSY;
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode);
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = cmd->opcode;
+ admin_cmd.common.flags = cmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15);
+
+ status = spraid_bsg_map_data(hdev, job, &admin_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
+ }
+
+ status = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0],
+ &result[1], timeout);
+ if (status >= 0) {
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x];"
+ " status[0x%x] result0[0x%x] result1[0x%x]\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode,
+ status, result[0], result[1]);
+
+ spraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev)
+{
+ int i;
+ int ptnum = SPRAID_NR_IOQ_PTCMDS;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+ spin_lock_init(&hdev->ioq_pt_lock);
+
+ hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->ioq_ptcmds) {
+ dev_err(hdev->dev, "Alloc ioq_ptcmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ptnum; i++) {
+ hdev->ioq_ptcmds[i].qid = i / SPRAID_PTCMDS_PERQ + 1;
+ hdev->ioq_ptcmds[i].cid = i % SPRAID_PTCMDS_PERQ
+ + SPRAID_IO_BLK_MQ_DEPTH;
+ list_add_tail(&(hdev->ioq_ptcmds[i].list), &hdev->ioq_pt_list);
+ }
+
+ dev_info(hdev->dev, "Alloc ioq_ptcmds success, ptnum[%d]\n", ptnum);
+
+ return 0;
+}
+
+static void spraid_free_ioq_ptcmds(struct spraid_dev *hdev)
+{
+ kfree(hdev->ioq_ptcmds);
+ hdev->ioq_ptcmds = NULL;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+}
+
+static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev,
+ struct spraid_ioq_command *cmd,
+ u32 *result, u32 *reslen, u32 timeout)
+{
+ int ret;
+ dma_addr_t sense_dma;
+ struct spraid_queue *ioq;
+ void *sense_addr = NULL;
+ struct spraid_cmd *pt_cmd = spraid_get_cmd(hdev, SPRAID_CMD_IOPT);
+
+ if (!pt_cmd) {
+ dev_err(hdev->dev, "err, get ioq cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&pt_cmd->cmd_done);
+
+ ioq = &hdev->queues[pt_cmd->qid];
+ ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+ sense_addr = ioq->sense + ret;
+ sense_dma = ioq->sense_dma_addr + ret;
+
+ cmd->common.sense_addr = cpu_to_le64(sense_dma);
+ cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+ cmd->common.command_id = pt_cmd->cid;
+
+ spraid_submit_cmd(ioq, cmd);
+
+ if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+ " opcode[0x%x] subopcode[0x%x]\n",
+ __func__, pt_cmd->cid, pt_cmd->qid, cmd->common.opcode,
+ (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff));
+ WRITE_ONCE(pt_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+ return -EINVAL;
+ }
+
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
+ }
+
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+
+ return pt_cmd->status;
+}
+
+static int spraid_user_ioq_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct spraid_bsg_request *bsg_req =
+ (struct spraid_bsg_request *)(job->request);
+ struct spraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd);
+ struct spraid_ioq_command ioq_cmd;
+ int status = 0;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+
+ if (cmd->data_len > PAGE_SIZE) {
+ dev_err(hdev->dev, "[%s] data len bigger than 4k\n", __func__);
+ return -EFAULT;
+ }
+
+ if (hdev->state != SPRAID_LIVE) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not live\n",
+ __func__, hdev->state);
+ return -EBUSY;
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init;"
+ " datalen[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode, cmd->data_len);
+
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.common.opcode = cmd->opcode;
+ ioq_cmd.common.flags = cmd->flags;
+ ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ ioq_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = spraid_bsg_map_data(hdev, job,
+ (struct spraid_admin_command *)&ioq_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
+ }
+
+ status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply,
+ &job->reply_len, timeout);
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x];"
+ " reply_len[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode,
+ status, job->reply_len);
+
+ spraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_queue *spraidq;
+ u16 hwq, cid;
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ spraidq = &hdev->queues[hwq];
+ if (READ_ONCE(iod->state) == SPRAID_CMD_COMPLETE
+ || spraid_poll_cq(spraidq, cid)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
+ cid, spraidq->qid);
+ return true;
+ }
+ return false;
+}
+
+static enum blk_eh_timer_return spraid_scmd_timeout(struct scsi_cmnd *scmd)
+{
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+ if (spraid_check_scmd_completed(scmd))
+ goto out;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+ if (cmpxchg(&iod->state,
+ SPRAID_CMD_IN_FLIGHT,
+ SPRAID_CMD_TIMEOUT) == SPRAID_CMD_IN_FLIGHT) {
+ return BLK_EH_DONE;
+ }
+ }
+out:
+ return BLK_EH_RESET_TIMER;
+}
+
+/* send abort command by admin queue temporary */
+static int spraid_send_abort_cmd(struct spraid_dev *hdev,
+ u32 hdid, u16 qid, u16 cid)
+{
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.abort.opcode = SPRAID_ADMIN_ABORT_CMD;
+ admin_cmd.abort.hdid = cpu_to_le32(hdid);
+ admin_cmd.abort.sqid = cpu_to_le16(qid);
+ admin_cmd.abort.cid = cpu_to_le16(cid);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+static int spraid_send_reset_cmd(struct spraid_dev *hdev, int type, u32 hdid)
+{
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.reset.opcode = SPRAID_ADMIN_RESET;
+ admin_cmd.reset.hdid = cpu_to_le32(hdid);
+ admin_cmd.reset.type = type;
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static bool spraid_change_host_state(struct spraid_dev *hdev,
+ enum spraid_state newstate)
+{
+ unsigned long flags;
+ enum spraid_state oldstate;
+ bool change = false;
+
+ spin_lock_irqsave(&hdev->state_lock, flags);
+
+ oldstate = hdev->state;
+ switch (newstate) {
+ case SPRAID_LIVE:
+ switch (oldstate) {
+ case SPRAID_NEW:
+ case SPRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPRAID_RESETTING:
+ switch (oldstate) {
+ case SPRAID_LIVE:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPRAID_DELETING:
+ if (oldstate != SPRAID_DELETING)
+ change = true;
+ break;
+ case SPRAID_DEAD:
+ switch (oldstate) {
+ case SPRAID_NEW:
+ case SPRAID_LIVE:
+ case SPRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (change)
+ hdev->state = newstate;
+ spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+ dev_info(hdev->dev, "[%s][%d]->[%d], change[%d]\n",
+ __func__, oldstate, newstate, change);
+
+ return change;
+}
+
+static void spraid_back_fault_cqe(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct spraid_iod *iod;
+ struct request *req;
+
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+ if (unlikely(!req || !blk_mq_request_started(req)))
+ return;
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ set_host_byte(scmd, DID_NO_CONNECT);
+ if (iod->nsge)
+ scsi_dma_unmap(scmd);
+ spraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+ dev_warn(hdev->dev, "Back fault CQE, cid[%d] qid[%d]\n",
+ cqe->cmd_id, ioq->qid);
+}
+
+static void spraid_back_all_io(struct spraid_dev *hdev)
+{
+ int i, j;
+ struct spraid_queue *ioq;
+ struct spraid_completion cqe = { 0 };
+
+ scsi_block_requests(hdev->shost);
+
+ for (i = 1; i <= hdev->shost->nr_hw_queues; i++) {
+ ioq = &hdev->queues[i];
+ for (j = 0; j < hdev->shost->can_queue; j++) {
+ cqe.cmd_id = j;
+ spraid_back_fault_cqe(ioq, &cqe);
+ }
+ }
+
+ scsi_unblock_requests(hdev->shost);
+}
+
+static void spraid_dev_disable(struct spraid_dev *hdev, bool shutdown)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+ unsigned long timeout = jiffies + 600 * HZ;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ spraid_shutdown_ctrl(hdev);
+ else
+ spraid_disable_ctrl(hdev);
+ }
+
+ while (!time_after(jiffies, timeout)) {
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_info(hdev->dev, "[%s] pci_device not present;"
+ " skip wait\n", __func__);
+ break;
+ }
+ if (!spraid_wait_ready(hdev, hdev->cap, false)) {
+ dev_info(hdev->dev,
+ "[%s] wait ready success after reset\n",
+ __func__);
+ break;
+ }
+ dev_info(hdev->dev, "[%s] waiting csts_rdy ready\n", __func__);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "[%s] warn, queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ spraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+ spraid_complete_cqes(adminq, start, end);
+
+ spraid_pci_disable(hdev);
+
+ spraid_back_all_io(hdev);
+}
+
+static void spraid_reset_work(struct work_struct *work)
+{
+ int ret;
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, reset_work);
+
+ if (hdev->state != SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host is not reset state\n",
+ __func__);
+ return;
+ }
+
+ dev_info(hdev->dev, "[%s] enter host reset\n", __func__);
+
+ if (hdev->ctrl_config & SPRAID_CC_ENABLE) {
+ dev_info(hdev->dev, "[%s] start dev_disable\n", __func__);
+ spraid_dev_disable(hdev, false);
+ }
+
+ ret = spraid_pci_enable(hdev);
+ if (ret)
+ goto out;
+
+ ret = spraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = spraid_setup_io_queues(hdev);
+ if (ret || hdev->online_queues <= hdev->shost->nr_hw_queues)
+ goto pci_disable;
+
+ spraid_change_host_state(hdev, SPRAID_LIVE);
+
+ spraid_send_all_aen(hdev);
+
+ return;
+
+pci_disable:
+ spraid_pci_disable(hdev);
+out:
+ spraid_change_host_state(hdev, SPRAID_DEAD);
+ dev_err(hdev->dev, "[%s] err, host reset failed\n", __func__);
+}
+
+static int spraid_reset_work_sync(struct spraid_dev *hdev)
+{
+ if (!spraid_change_host_state(hdev, SPRAID_RESETTING)) {
+ dev_info(hdev->dev, "[%s] can't change to reset state\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, host is already in reset state\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ flush_work(&hdev->reset_work);
+ if (hdev->state != SPRAID_LIVE)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int spraid_wait_abnl_cmd_done(struct spraid_iod *iod)
+{
+ u16 times = 0;
+
+ do {
+ if (READ_ONCE(iod->state) == SPRAID_CMD_TMO_COMPLETE)
+ break;
+ msleep(500);
+ times++;
+ } while (times <= SPRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+ /* wait command completion timeout after abort/reset success */
+ if (times >= SPRAID_WAIT_ABNL_CMD_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int spraid_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, aborting\n", cid, hwq);
+ ret = spraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+ if (ret != ADMIN_ERR_TIMEOUT) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq);
+ return SUCCESS;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n",
+ cid, hwq);
+ return FAILED;
+}
+
+static int spraid_tgt_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, target reset\n",
+ cid, hwq);
+ ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_TARGET, hostdata->hdid);
+ if (ret == 0) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d]target reset failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] target reset success\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] target reset failed\n",
+ cid, hwq, ret);
+ return FAILED;
+}
+
+static int spraid_bus_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, bus reset\n", cid, hwq);
+ ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_BUS, hostdata->hdid);
+ if (ret == 0) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d] bus reset failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] bus reset succ\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] bus reset failed\n",
+ cid, hwq, ret);
+ return FAILED;
+}
+
+static int spraid_shost_reset_handler(struct scsi_cmnd *scmd)
+{
+ u16 hwq, cid;
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+
+ scsi_print_command(scmd);
+ if (hdev->state != SPRAID_LIVE || spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset\n", cid, hwq);
+
+ if (spraid_reset_work_sync(hdev)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset failed\n",
+ cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset success\n", cid, hwq);
+
+ return SUCCESS;
+}
+
+static pci_ers_result_t spraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter pci error detect, state:%d\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(hdev->dev,
+ "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ spraid_change_host_state(hdev, SPRAID_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(hdev->dev, "channel io failure, request disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t spraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, the device is resetting state\n",
+ __func__);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ flush_work(&hdev->reset_work);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void spraid_reset_done(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter spraid reset done\n");
+}
+
+static ssize_t csts_pp_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_PP_MASK);
+ ret >>= SPRAID_CSTS_PP_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_SHST_MASK);
+ ret >>= SPRAID_CSTS_SHST_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_CFS_MASK);
+ ret >>= SPRAID_CSTS_CFS_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev))
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+
+static struct device_attribute *spraid_host_attrs[] = {
+ &dev_attr_csts_pp,
+ &dev_attr_csts_shst,
+ &dev_attr_csts_cfs,
+ &dev_attr_csts_rdy,
+ &dev_attr_fw_version,
+ NULL,
+};
+
+static int spraid_get_vd_info(struct spraid_dev *hdev,
+ struct spraid_vd_info *vd_info, u16 vid)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct spraid_vd_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_get_bgtask(struct spraid_dev *hdev,
+ struct spraid_bgtask *bgtask)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct spraid_bgtask));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ?
+ vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_bgtask *bgtask;
+ struct spraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct spraid_bgtask *)vd_info;
+ ret = spraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+
+static struct device_attribute *spraid_dev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_raid_state,
+ &dev_attr_raid_resync,
+ NULL,
+};
+
+static struct pci_error_handlers spraid_err_handler = {
+ .error_detected = spraid_pci_error_detected,
+ .slot_reset = spraid_pci_slot_reset,
+ .reset_done = spraid_reset_done,
+};
+
+static int spraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ dev_info(hdev->dev, "[%s] start sysfs host reset cmd\n", __func__);
+ ret = spraid_reset_work_sync(hdev);
+ dev_info(hdev->dev, "[%s] stop sysfs host reset cmd[%d]\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static struct scsi_host_template spraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Ramaxel Logic spraid driver",
+ .proc_name = "spraid",
+ .queuecommand = spraid_queue_command,
+ .slave_alloc = spraid_slave_alloc,
+ .slave_destroy = spraid_slave_destroy,
+ .slave_configure = spraid_slave_configure,
+ .eh_timed_out = spraid_scmd_timeout,
+ .eh_abort_handler = spraid_abort_handler,
+ .eh_target_reset_handler = spraid_tgt_reset_handler,
+ .eh_bus_reset_handler = spraid_bus_reset_handler,
+ .eh_host_reset_handler = spraid_shost_reset_handler,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .shost_attrs = spraid_host_attrs,
+ .sdev_attrs = spraid_dev_attrs,
+ .host_reset = spraid_sysfs_host_reset,
+};
+
+static void spraid_shutdown(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ spraid_remove_io_queues(hdev);
+ spraid_disable_admin_queue(hdev, true);
+}
+
+/* bsg dispatch user command */
+static int spraid_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_bsg_request *bsg_req = job->request;
+ int ret = 0;
+
+ dev_info(hdev->dev, "[%s] msgcode[%d], msglen[%d], timeout[%d];"
+ " req_nsge[%d], req_len[%d]\n",
+ __func__, bsg_req->msgcode, job->request_len,
+ rq->timeout, job->request_payload.sg_cnt,
+ job->request_payload.payload_len);
+
+ job->reply_len = 0;
+
+ switch (bsg_req->msgcode) {
+ case SPRAID_BSG_ADM:
+ ret = spraid_user_admin_cmd(hdev, job);
+ break;
+ case SPRAID_BSG_IOQ:
+ ret = spraid_user_ioq_cmd(hdev, job);
+ break;
+ default:
+ dev_info(hdev->dev, "[%s] unsupport msgcode[%d]\n",
+ __func__, bsg_req->msgcode);
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void spraid_remove_bsg(struct spraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
+static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct spraid_dev *hdev;
+ struct Scsi_Host *shost;
+ int node, ret;
+ char bsg_name[15];
+
+ shost = scsi_host_alloc(&spraid_driver_template, sizeof(*hdev));
+ if (!shost) {
+ dev_err(&pdev->dev, "Failed to allocate scsi host\n");
+ return -ENOMEM;
+ }
+ hdev = shost_priv(shost);
+ hdev->pdev = pdev;
+ hdev->dev = get_device(&pdev->dev);
+
+ node = dev_to_node(hdev->dev);
+ if (node == NUMA_NO_NODE) {
+ node = first_memory_node;
+ set_dev_node(hdev->dev, node);
+ }
+ hdev->numa_node = node;
+ hdev->shost = shost;
+ pci_set_drvdata(pdev, hdev);
+
+ ret = spraid_dev_map(hdev);
+ if (ret)
+ goto put_dev;
+
+ init_rwsem(&hdev->devices_rwsem);
+ INIT_WORK(&hdev->scan_work, spraid_scan_work);
+ INIT_WORK(&hdev->timesyn_work, spraid_timesyn_work);
+ INIT_WORK(&hdev->reset_work, spraid_reset_work);
+ INIT_WORK(&hdev->fw_act_work, spraid_fw_act_work);
+ spin_lock_init(&hdev->state_lock);
+
+ ret = spraid_alloc_resources(hdev);
+ if (ret)
+ goto dev_unmap;
+
+ ret = spraid_pci_enable(hdev);
+ if (ret)
+ goto resources_free;
+
+ ret = spraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = spraid_init_ctrl_info(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = spraid_alloc_iod_ext_mem_pool(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = spraid_setup_io_queues(hdev);
+ if (ret)
+ goto free_iod_mempool;
+
+ spraid_shost_init(hdev);
+
+ ret = scsi_add_host(hdev->shost, hdev->dev);
+ if (ret) {
+ dev_err(hdev->dev, "Add shost to system failed, ret: %d\n",
+ ret);
+ goto remove_io_queues;
+ }
+
+ snprintf(bsg_name, sizeof(bsg_name), "spraid%d", shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name,
+ spraid_bsg_host_dispatch,
+ spraid_cmd_size(hdev, true, false));
+ if (IS_ERR(hdev->bsg_queue)) {
+ dev_err(hdev->dev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
+ goto remove_io_queues;
+ }
+
+ if (hdev->online_queues == SPRAID_ADMIN_QUEUE_NUM) {
+ dev_warn(hdev->dev, "warn only admin queue can be used\n");
+ return 0;
+ }
+
+ hdev->state = SPRAID_LIVE;
+
+ spraid_send_all_aen(hdev);
+
+ ret = spraid_dev_list_init(hdev);
+ if (ret)
+ goto remove_bsg;
+
+ ret = spraid_configure_timestamp(hdev);
+ if (ret)
+ dev_warn(hdev->dev, "init set timestamp failed\n");
+
+ ret = spraid_alloc_ioq_ptcmds(hdev);
+ if (ret)
+ goto remove_bsg;
+
+ scsi_scan_host(hdev->shost);
+
+ return 0;
+
+remove_bsg:
+ spraid_remove_bsg(hdev);
+remove_io_queues:
+ spraid_remove_io_queues(hdev);
+free_iod_mempool:
+ spraid_free_iod_ext_mem_pool(hdev);
+disable_admin_q:
+ spraid_disable_admin_queue(hdev, false);
+pci_disable:
+ spraid_pci_disable(hdev);
+resources_free:
+ spraid_free_resources(hdev);
+dev_unmap:
+ spraid_dev_unmap(hdev);
+put_dev:
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ return -ENODEV;
+}
+
+static void spraid_remove(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = hdev->shost;
+
+ dev_info(hdev->dev, "enter spraid remove\n");
+
+ spraid_change_host_state(hdev, SPRAID_DELETING);
+ flush_work(&hdev->reset_work);
+
+ if (!pci_device_is_present(pdev))
+ spraid_back_all_io(hdev);
+
+ spraid_remove_bsg(hdev);
+ scsi_remove_host(shost);
+ spraid_free_ioq_ptcmds(hdev);
+ kfree(hdev->devices);
+ spraid_remove_io_queues(hdev);
+ spraid_free_iod_ext_mem_pool(hdev);
+ spraid_disable_admin_queue(hdev, false);
+ spraid_pci_disable(hdev);
+ spraid_free_resources(hdev);
+ spraid_dev_unmap(hdev);
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ dev_info(hdev->dev, "exit spraid remove\n");
+}
+
+static const struct pci_device_id spraid_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+ SPRAID_SERVER_DEVICE_HBA_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+ SPRAID_SERVER_DEVICE_RAID_DID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, spraid_id_table);
+
+static struct pci_driver spraid_driver = {
+ .name = "spraid",
+ .id_table = spraid_id_table,
+ .probe = spraid_probe,
+ .remove = spraid_remove,
+ .shutdown = spraid_shutdown,
+ .err_handler = &spraid_err_handler,
+};
+
+static int __init spraid_init(void)
+{
+ int ret;
+
+ spraid_wq = alloc_workqueue("spraid-wq", WQ_UNBOUND | WQ_MEM_RECLAIM |
+ WQ_SYSFS, 0);
+ if (!spraid_wq)
+ return -ENOMEM;
+
+ spraid_class = class_create(THIS_MODULE, "spraid");
+ if (IS_ERR(spraid_class)) {
+ ret = PTR_ERR(spraid_class);
+ goto destroy_wq;
+ }
+
+ ret = pci_register_driver(&spraid_driver);
+ if (ret < 0)
+ goto destroy_class;
+
+ return 0;
+
+destroy_class:
+ class_destroy(spraid_class);
+destroy_wq:
+ destroy_workqueue(spraid_wq);
+
+ return ret;
+}
+
+static void __exit spraid_exit(void)
+{
+ pci_unregister_driver(&spraid_driver);
+ class_destroy(spraid_class);
+ destroy_workqueue(spraid_wq);
+ ida_destroy(&spraid_instance_ida);
+}
+
+MODULE_AUTHOR("songyl(a)ramaxel.com");
+MODULE_DESCRIPTION("Ramaxel Memory Technology SPraid Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SPRAID_DRV_VERSION);
+module_init(spraid_init);
+module_exit(spraid_exit);
--
2.27.0
2
1