Kernel
  Threads by month 
                
            - ----- 2025 -----
 - November
 - October
 - September
 - August
 - July
 - June
 - May
 - April
 - March
 - February
 - January
 - ----- 2024 -----
 - December
 - November
 - October
 - September
 - August
 - July
 - June
 - May
 - April
 - March
 - February
 - January
 - ----- 2023 -----
 - December
 - November
 - October
 - September
 - August
 - July
 - June
 - May
 - April
 - March
 - February
 - January
 - ----- 2022 -----
 - December
 - November
 - October
 - September
 - August
 - July
 - June
 - May
 - April
 - March
 - February
 - January
 - ----- 2021 -----
 - December
 - November
 - October
 - September
 - August
 - July
 - June
 - May
 - April
 - March
 - February
 - January
 - ----- 2020 -----
 - December
 - November
 - October
 - September
 - August
 - July
 - June
 - May
 - April
 - March
 - February
 - January
 - ----- 2019 -----
 - December
 
December 2021
- 27 participants
 - 170 discussions
 
                        
                            
                                
                            
                            [PATCH openEuler-1.0-LTS] btrfs: unlock newly allocated extent buffer after error
                        
                        
by Yang Yingliang 27 Dec '21
                    by Yang Yingliang 27 Dec '21
27 Dec '21
                    
                        From: Qu Wenruo <wqu(a)suse.com>
mainline inclusion
from mainline-v5.15-rc6
commit 19ea40dddf1833db868533958ca066f368862211
category: bugfix
bugzilla: NA
CVE: CVE-2021-4149
--------------------------------
[BUG]
There is a bug report that injected ENOMEM error could leave a tree
block locked while we return to user-space:
  BTRFS info (device loop0): enabling ssd optimizations
  FAULT_INJECTION: forcing a failure.
  name failslab, interval 1, probability 0, space 0, times 0
  CPU: 0 PID: 7579 Comm: syz-executor Not tainted 5.15.0-rc1 #16
  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
  rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
  Call Trace:
   __dump_stack lib/dump_stack.c:88 [inline]
   dump_stack_lvl+0x8d/0xcf lib/dump_stack.c:106
   fail_dump lib/fault-inject.c:52 [inline]
   should_fail+0x13c/0x160 lib/fault-inject.c:146
   should_failslab+0x5/0x10 mm/slab_common.c:1328
   slab_pre_alloc_hook.constprop.99+0x4e/0xc0 mm/slab.h:494
   slab_alloc_node mm/slub.c:3120 [inline]
   slab_alloc mm/slub.c:3214 [inline]
   kmem_cache_alloc+0x44/0x280 mm/slub.c:3219
   btrfs_alloc_delayed_extent_op fs/btrfs/delayed-ref.h:299 [inline]
   btrfs_alloc_tree_block+0x38c/0x670 fs/btrfs/extent-tree.c:4833
   __btrfs_cow_block+0x16f/0x7d0 fs/btrfs/ctree.c:415
   btrfs_cow_block+0x12a/0x300 fs/btrfs/ctree.c:570
   btrfs_search_slot+0x6b0/0xee0 fs/btrfs/ctree.c:1768
   btrfs_insert_empty_items+0x80/0xf0 fs/btrfs/ctree.c:3905
   btrfs_new_inode+0x311/0xa60 fs/btrfs/inode.c:6530
   btrfs_create+0x12b/0x270 fs/btrfs/inode.c:6783
   lookup_open+0x660/0x780 fs/namei.c:3282
   open_last_lookups fs/namei.c:3352 [inline]
   path_openat+0x465/0xe20 fs/namei.c:3557
   do_filp_open+0xe3/0x170 fs/namei.c:3588
   do_sys_openat2+0x357/0x4a0 fs/open.c:1200
   do_sys_open+0x87/0xd0 fs/open.c:1216
   do_syscall_x64 arch/x86/entry/common.c:50 [inline]
   do_syscall_64+0x34/0xb0 arch/x86/entry/common.c:80
   entry_SYSCALL_64_after_hwframe+0x44/0xae
  RIP: 0033:0x46ae99
  Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48
  89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d
  01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
  RSP: 002b:00007f46711b9c48 EFLAGS: 00000246 ORIG_RAX: 0000000000000055
  RAX: ffffffffffffffda RBX: 000000000078c0a0 RCX: 000000000046ae99
  RDX: 0000000000000000 RSI: 00000000000000a1 RDI: 0000000020005800
  RBP: 00007f46711b9c80 R08: 0000000000000000 R09: 0000000000000000
  R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000017
  R13: 0000000000000000 R14: 000000000078c0a0 R15: 00007ffc129da6e0
  ================================================
  WARNING: lock held when returning to user space!
  5.15.0-rc1 #16 Not tainted
  ------------------------------------------------
  syz-executor/7579 is leaving the kernel with locks still held!
  1 lock held by syz-executor/7579:
   #0: ffff888104b73da8 (btrfs-tree-01/1){+.+.}-{3:3}, at:
  __btrfs_tree_lock+0x2e/0x1a0 fs/btrfs/locking.c:112
[CAUSE]
In btrfs_alloc_tree_block(), after btrfs_init_new_buffer(), the new
extent buffer @buf is locked, but if later operations like adding
delayed tree ref fail, we just free @buf without unlocking it,
resulting above warning.
[FIX]
Unlock @buf in out_free_buf: label.
Reported-by: Hao Sun <sunhao.th(a)gmail.com>
Link: https://lore.kernel.org/linux-btrfs/CACkBjsZ9O6Zr0KK1yGn=1rQi6Crh1yeCRdTSBx…
CC: stable(a)vger.kernel.org # 5.4+
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
 fs/btrfs/extent-tree.c | 1 +
 1 file changed, 1 insertion(+)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4bf93184362ec..dcbb76b62a0b0 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -8324,6 +8324,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 out_free_delayed:
 	btrfs_free_delayed_extent_op(extent_op);
 out_free_buf:
+	btrfs_tree_unlock(buf);
 	free_extent_buffer(buf);
 out_free_reserved:
 	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
-- 
2.25.1
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                        
                            
                                
                            
                            [PATCH openEuler-1.0-LTS] ext4: Fix null-ptr-deref in '__ext4_journal_ensure_credits'
                        
                        
by Yang Yingliang 25 Dec '21
                    by Yang Yingliang 25 Dec '21
25 Dec '21
                    
                        From: Ye Bin <yebin10(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 185945, https://gitee.com/openeuler/kernel/issues/I4O33F
CVE: NA
-----------------------------------------------
We got issue as follows when run syzkaller test:
[ 1901.130043] EXT4-fs error (device vda): ext4_remount:5624: comm syz-executor.5: Abort forced by user
[ 1901.130901] Aborting journal on device vda-8.
[ 1901.131437] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.16: Detected aborted journal
[ 1901.131566] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.11: Detected aborted journal
[ 1901.132586] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.18: Detected aborted journal
[ 1901.132751] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.9: Detected aborted journal
[ 1901.136149] EXT4-fs error (device vda) in ext4_reserve_inode_write:6035: Journal has aborted
[ 1901.136837] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-fuzzer: Detected aborted journal
[ 1901.136915] ==================================================================
[ 1901.138175] BUG: KASAN: null-ptr-deref in __ext4_journal_ensure_credits+0x74/0x140 [ext4]
[ 1901.138343] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.13: Detected aborted journal
[ 1901.138398] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.1: Detected aborted journal
[ 1901.138808] Read of size 8 at addr 0000000000000000 by task syz-executor.17/968
[ 1901.138817]
[ 1901.138852] EXT4-fs error (device vda): ext4_journal_check_start:61: comm syz-executor.30: Detected aborted journal
[ 1901.144779] CPU: 1 PID: 968 Comm: syz-executor.17 Not tainted 4.19.90-vhulk2111.1.0.h893.eulerosv2r10.aarch64+ #1
[ 1901.146479] Hardware name: linux,dummy-virt (DT)
[ 1901.147317] Call trace:
[ 1901.147552]  dump_backtrace+0x0/0x2d8
[ 1901.147898]  show_stack+0x28/0x38
[ 1901.148215]  dump_stack+0xec/0x15c
[ 1901.148746]  kasan_report+0x108/0x338
[ 1901.149207]  __asan_load8+0x58/0xb0
[ 1901.149753]  __ext4_journal_ensure_credits+0x74/0x140 [ext4]
[ 1901.150579]  ext4_xattr_delete_inode+0xe4/0x700 [ext4]
[ 1901.151316]  ext4_evict_inode+0x524/0xba8 [ext4]
[ 1901.151985]  evict+0x1a4/0x378
[ 1901.152353]  iput+0x310/0x428
[ 1901.152733]  do_unlinkat+0x260/0x428
[ 1901.153056]  __arm64_sys_unlinkat+0x6c/0xc0
[ 1901.153455]  el0_svc_common+0xc8/0x320
[ 1901.153799]  el0_svc_handler+0xf8/0x160
[ 1901.154265]  el0_svc+0x10/0x218
[ 1901.154682] ==================================================================
This issue may happens like this:
	Process1                               Process2
ext4_evict_inode
  ext4_journal_start
   ext4_truncate
     ext4_ind_truncate
       ext4_free_branches
         ext4_ind_truncate_ensure_credits
	   ext4_journal_ensure_credits_fn
	     ext4_journal_restart
	       handle->h_transaction = NULL;
                                           mount -o remount,abort  /mnt
					   -> trigger JBD abort
               start_this_handle -> will return failed
  ext4_xattr_delete_inode
    ext4_journal_ensure_credits
      ext4_journal_ensure_credits_fn
        __ext4_journal_ensure_credits
	  jbd2_handle_buffer_credits
	    journal = handle->h_transaction->t_journal; ->null-ptr-deref
Now, indirect truncate process didn't handle error. To solve this issue
maybe simply add check handle is abort in '__ext4_journal_ensure_credits'
is enough, and i also think this is necessary.
Signed-off-by: Ye Bin <yebin10(a)huawei.com>
Reviewed-by: Zhang Yi <yi.zhang(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
 fs/ext4/ext4_jbd2.c | 2 ++
 1 file changed, 2 insertions(+)
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 022ab8afd9499..2c6dc99292fdc 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -140,6 +140,8 @@ int __ext4_journal_ensure_credits(handle_t *handle, int check_cred,
 {
 	if (!ext4_handle_valid(handle))
 		return 0;
+	if (is_handle_aborted(handle))
+		return -EROFS;
 	if (jbd2_handle_buffer_credits(handle) >= check_cred &&
 	    handle->h_revoke_credits >= revoke_cred)
 		return 0;
-- 
2.25.1
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                        
                            
                                
                            
                            [[PATCH OLK-5.10] Revert feature — [arm64: Add memmap parameter and register pmem]] revert feature — [arm64: Add memmap parameter and register pmem]
                        
                        
by Zhuling 25 Dec '21
                    by Zhuling 25 Dec '21
25 Dec '21
                    
                        euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4O31I?from=project-issue
CVE: NA
The reserve memory of PMEM conflicts with
"add memmap interface to reserved memory for mremap syscall usage",
need to rollback, resubmit after adaptation.
Feature related commit:
1.PMEM function commit:94dc364f5eda10f49449ba573dc3322e1ea92280
2.PMEM feature config commit: 36d7a831e15ceb84e937122c87d01c14242dc377
Signed-off-by: Zhuling <zhuling8(a)huawei.com>
---
 arch/arm64/Kconfig                     | 21 --------
 arch/arm64/configs/openeuler_defconfig |  3 --
 arch/arm64/kernel/Makefile             |  1 -
 arch/arm64/kernel/pmem.c               | 35 -------------
 arch/arm64/kernel/setup.c              | 10 ----
 arch/arm64/mm/init.c                   | 94 ----------------------------------
 drivers/nvdimm/Kconfig                 |  5 --
 drivers/nvdimm/Makefile                |  1 -
 8 files changed, 170 deletions(-)
 delete mode 100644 arch/arm64/kernel/pmem.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index df90a6e..2df4b31 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1321,27 +1321,6 @@ config RODATA_FULL_DEFAULT_ENABLED
 	  This requires the linear region to be mapped down to pages,
 	  which may adversely affect performance in some cases.
 
-config ARM64_PMEM_RESERVE
-	bool "Reserve memory for persistent storage"
-	default n
-	help
-	  Use memmap=nn[KMG]!ss[KMG](memmap=100K!0x1a0000000) reserve
-	  memory for persistent storage.
-
-	  Say y here to enable this feature.
-
-config ARM64_PMEM_LEGACY_DEVICE
-	bool "Create persistent storage"
-	depends on BLK_DEV
-	depends on LIBNVDIMM
-	select ARM64_PMEM_RESERVE
-	help
-	  Use reserved memory for persistent storage when the kernel
-	  restart or update. the data in PMEM will not be lost and
-	  can be loaded faster.
-
-	  Say y if unsure.
-
 config ARM64_SW_TTBR0_PAN
 	bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
 	help
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 17bc875..b5fc851 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -416,8 +416,6 @@ CONFIG_ARM64_CPU_PARK=y
 CONFIG_FORCE_MAX_ZONEORDER=11
 CONFIG_UNMAP_KERNEL_AT_EL0=y
 CONFIG_RODATA_FULL_DEFAULT_ENABLED=y
-CONFIG_ARM64_PMEM_RESERVE=y
-CONFIG_ARM64_PMEM_LEGACY_DEVICE=y
 # CONFIG_ARM64_SW_TTBR0_PAN is not set
 CONFIG_ARM64_TAGGED_ADDR_ABI=y
 CONFIG_ARM64_ILP32=y
@@ -6026,7 +6024,6 @@ CONFIG_ND_BTT=m
 CONFIG_BTT=y
 CONFIG_OF_PMEM=m
 CONFIG_NVDIMM_KEYS=y
-CONFIG_PMEM_LEGACY=m
 CONFIG_DAX_DRIVER=y
 CONFIG_DAX=y
 CONFIG_DEV_DAX=m
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index f615325..169d90f 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -68,7 +68,6 @@ obj-$(CONFIG_ARM64_PTR_AUTH)		+= pointer_auth.o
 obj-$(CONFIG_SHADOW_CALL_STACK)		+= scs.o
 obj-$(CONFIG_ARM64_MTE)			+= mte.o
 obj-$(CONFIG_MPAM)			+= mpam/
-obj-$(CONFIG_ARM64_PMEM_LEGACY_DEVICE)   += pmem.o
 
 obj-y					+= vdso/ probes/
 obj-$(CONFIG_COMPAT_VDSO)		+= vdso32/
diff --git a/arch/arm64/kernel/pmem.c b/arch/arm64/kernel/pmem.c
deleted file mode 100644
index 16eaf70..0000000
--- a/arch/arm64/kernel/pmem.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright(c) 2021 Huawei Technologies Co., Ltd
- *
- * Derived from x86 and arm64 implement PMEM.
- */
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-
-static int found(struct resource *res, void *data)
-{
-	return 1;
-}
-
-static int __init register_e820_pmem(void)
-{
-	struct platform_device *pdev;
-	int rc;
-
-	rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
-				 IORESOURCE_MEM, 0, -1, NULL, found);
-	if (rc <= 0)
-		return 0;
-
-	/*
-	 * See drivers/nvdimm/e820.c for the implementation, this is
-	 * simply here to trigger the module to load on demand.
-	 */
-	pdev = platform_device_alloc("e820_pmem", -1);
-
-	return platform_device_add(pdev);
-}
-device_initcall(register_e820_pmem);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 88ec495..7cd0425 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -70,10 +70,6 @@ static int __init arm64_enable_cpu0_hotplug(char *str)
 __setup("arm64_cpu0_hotplug", arm64_enable_cpu0_hotplug);
 #endif
 
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-extern struct resource pmem_res;
-#endif
-
 phys_addr_t __fdt_pointer __initdata;
 
 /*
@@ -288,12 +284,6 @@ static void __init request_standard_resources(void)
 			request_resource(res, &pin_memory_resource);
 #endif
 	}
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-	if (pmem_res.end && pmem_res.start)
-		request_resource(&iomem_resource, &pmem_res);
-#endif
-
 }
 
 static int __init reserve_memblock_reserved_regions(void)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 3b9401e..e8d4461 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -55,7 +55,6 @@
  */
 s64 memstart_addr __ro_after_init = -1;
 EXPORT_SYMBOL(memstart_addr);
-phys_addr_t start_at, mem_size;
 
 #ifdef CONFIG_PIN_MEMORY
 struct resource pin_memory_resource = {
@@ -112,18 +111,6 @@ static void __init reserve_pin_memory_res(void)
  */
 phys_addr_t arm64_dma_phys_limit __ro_after_init;
 
-static unsigned long long pmem_size, pmem_start;
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-struct resource pmem_res = {
-	.name = "Persistent Memory (legacy)",
-	.start = 0,
-	.end = 0,
-	.flags = IORESOURCE_MEM,
-	.desc = IORES_DESC_PERSISTENT_MEMORY_LEGACY
-};
-#endif
-
 #ifndef CONFIG_KEXEC_CORE
 static void __init reserve_crashkernel(void)
 {
@@ -417,83 +404,6 @@ static int __init reserve_park_mem(void)
 }
 #endif
 
-static bool __init is_mem_valid(unsigned long long mem_size, unsigned long long mem_start)
-{
-	if (!memblock_is_region_memory(mem_start, mem_size)) {
-		pr_warn("cannot reserve mem: region is not memory!\n");
-		return false;
-	}
-
-	if (memblock_is_region_reserved(mem_start, mem_size)) {
-		pr_warn("cannot reserve mem: region overlaps reserved memory!\n");
-		return false;
-	}
-
-	if (!IS_ALIGNED(mem_start, SZ_2M)) {
-		pr_warn("cannot reserve mem: base address is not 2MB aligned!\n");
-		return false;
-	}
-
-	return true;
-}
-
-static int __init parse_memmap_one(char *p)
-{
-	char *oldp;
-
-	if (!p)
-		return -EINVAL;
-
-	oldp = p;
-	mem_size = memparse(p, &p);
-	if (p == oldp)
-		return -EINVAL;
-
-	if (!mem_size)
-		return -EINVAL;
-
-	mem_size = PAGE_ALIGN(mem_size);
-
-	if (*p == '!') {
-		start_at = memparse(p+1, &p);
-
-		pmem_start = start_at;
-		pmem_size = mem_size;
-	} else
-		pr_info("Unrecognized memmap option, please check the parameter.\n");
-
-	return *p == '\0' ? 0 : -EINVAL;
-}
-
-static int __init parse_memmap_opt(char *str)
-{
-	while (str) {
-		char *k = strchr(str, ',');
-
-		if (k)
-			*k++ = 0;
-		parse_memmap_one(str);
-		str = k;
-	}
-
-	return 0;
-}
-early_param("memmap", parse_memmap_opt);
-
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-static void __init reserve_pmem(void)
-{
-	if (!is_mem_valid(mem_size, start_at))
-		return;
-
-	memblock_remove(pmem_start, pmem_size);
-	pr_info("pmem reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
-		pmem_start, pmem_start + pmem_size, pmem_size >> 20);
-	pmem_res.start = pmem_start;
-	pmem_res.end = pmem_start + pmem_size - 1;
-}
-#endif
-
 void __init arm64_memblock_init(void)
 {
 	const s64 linear_region_size = BIT(vabits_actual - 1);
@@ -668,10 +578,6 @@ void __init bootmem_init(void)
 	reserve_quick_kexec();
 #endif
 
-#ifdef CONFIG_ARM64_PMEM_RESERVE
-	reserve_pmem();
-#endif
-
 	reserve_pin_memory_res();
 
 	memblock_dump_all();
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index ce4de75..b7d1eb3 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -132,8 +132,3 @@ config NVDIMM_TEST_BUILD
 	  infrastructure.
 
 endif
-
-config PMEM_LEGACY
-	tristate "Pmem_legacy"
-	select X86_PMEM_LEGACY if X86
-	select ARM64_PMEM_LEGACY_DEVICE if ARM64
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 6f8dc92..0407753 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -3,7 +3,6 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
 obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
 obj-$(CONFIG_ND_BTT) += nd_btt.o
 obj-$(CONFIG_ND_BLK) += nd_blk.o
-obj-$(CONFIG_PMEM_LEGACY) += nd_e820.o
 obj-$(CONFIG_OF_PMEM) += of_pmem.o
 obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
 
-- 
2.9.5
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                        
                            
                                
                            
                            [PATCH openEuler-1.0-LTS] net/hinic: Fix call trace when the rx_buff module parameter is grater than 2
                        
                        
by Yang Yingliang 25 Dec '21
                    by Yang Yingliang 25 Dec '21
25 Dec '21
                    
                        From: Chiqijun <chiqijun(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4O2ZZ
-----------------------------------------------------------------------
When rx_buff is greater than 2, the driver will alloc for more than 1
page of memory for network rx, but the __GFP_COMP gfp flag is not set,
resulting in the following call trace:
CPU: 3 PID: 494041 Comm: ping Kdump: loaded Tainted: G        W  OE     4.19.90-2106.3.0.0095.oe1.x86_64 #1
Hardware name: Huawei Technologies Co., Ltd. RH2288H V3/BC11HGSA0, BIOS 5.15 05/21/2019
RIP: 0010:copy_page_to_iter+0x154/0x310
Code: 31 b8 00 10 00 00 f7 c6 00 80 00 00 74 07 0f b6 49 51 48 d3 e0 48 39 c2 0f 86 ed fe ff ff 48 c7 c7 30
RSP: 0018:ffffbd6907d03bd8 EFLAGS: 00010286
RAX: 0000000000000024 RBX: ffffe0ffee5b3000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffff9edbbfcd6858 RDI: ffff9edbbfcd6858
RBP: 0000000000000001 R08: 000000000001574a R09: 0000000000000004
R10: 000000000000004e R11: 0000000000000001 R12: ffffbd6907d03ed0
R13: 0000000000002100 R14: 0000000000000030 R15: 0000000000000000
FS:  00007f9d37244dc0(0000) GS:ffff9edbbfcc0000(0000) knlGS:0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007ffe0e715f80 CR3: 000000203c018005 CR4: 00000000001606e0
Call Trace:
 skb_copy_datagram_iter+0x16c/0x2a0
 raw_recvmsg+0xd0/0x1f0
 inet_recvmsg+0x5b/0xd0
 ____sys_recvmsg+0x95/0x160
 ? import_iovec+0x37/0xd0
 ? copy_msghdr_from_user+0x5c/0x90
 ___sys_recvmsg+0x8c/0xd0
 ? __audit_syscall_exit+0x228/0x290
 ? kretprobe_trampoline+0x25/0x50
 ? __sys_recvmsg+0x5b/0xa0
 __sys_recvmsg+0x5b/0xa0
 do_syscall_64+0x5f/0x240
 entry_SYSCALL_64_after_hwframe+0x44/0xa9
Use 'dev_alloc_pages' instead of calling ’alloc_pages_node‘ directly.
Signed-off-by: Chiqijun <chiqijun(a)huawei.com>
Reviewed-by: Wangxiaoyun <cloud.wangxiaoyun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
 drivers/net/ethernet/huawei/hinic/hinic_rx.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index a9047432f3053..019cd439ce696 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -67,7 +67,7 @@ static bool rx_alloc_mapped_page(struct hinic_rxq *rxq,
 		return true;
 
 	/* alloc new page for storage */
-	page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC, nic_dev->page_order);
+	page = dev_alloc_pages(nic_dev->page_order);
 	if (unlikely(!page)) {
 		RXQ_STATS_INC(rxq, alloc_rx_buf_err);
 		return false;
-- 
2.25.1
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            1
                            
                          
                          
                            
    
                          
                        
                    
                        
                            
                                
                            
                            [PATCH openEuler-1.0-LTS 1/3] arm64/mpam: remove __init macro to support driver probe
                        
                        
by Yang Yingliang 24 Dec '21
                    by Yang Yingliang 24 Dec '21
24 Dec '21
                    
                        From: Xingang Wang <wangxingang5(a)huawei.com>
ascend inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I49RB2
CVE: NA
---------------------------------------------------
To support device tree boot for arm64 mpam, the __init macro might be
used by the dts driver. This remove the necessary __init macro for the
related functions.
Signed-off-by: Xingang Wang <wangxingang5(a)huawei.com>
Reviewed-by: Wang ShaoBo <bobo.shaobowang(a)huawei.com>
Acked-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
 arch/arm64/include/asm/resctrl.h       |  2 +-
 arch/arm64/kernel/mpam/mpam_device.c   | 14 +++++++-------
 arch/arm64/kernel/mpam/mpam_internal.h |  2 +-
 arch/arm64/kernel/mpam/mpam_resctrl.c  |  2 +-
 fs/resctrlfs.c                         |  2 +-
 include/linux/arm_mpam.h               | 14 +++++++-------
 6 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/arch/arm64/include/asm/resctrl.h b/arch/arm64/include/asm/resctrl.h
index ff4ce56430d1f..3fceaa482e5a3 100644
--- a/arch/arm64/include/asm/resctrl.h
+++ b/arch/arm64/include/asm/resctrl.h
@@ -424,7 +424,7 @@ int resctrl_update_groups_config(struct rdtgroup *rdtgrp);
 
 #define RESCTRL_MAX_CLOSID 32
 
-int __init resctrl_group_init(void);
+int resctrl_group_init(void);
 
 void post_resctrl_mount(void);
 
diff --git a/arch/arm64/kernel/mpam/mpam_device.c b/arch/arm64/kernel/mpam/mpam_device.c
index c0615f6947a1f..4139b4476a836 100644
--- a/arch/arm64/kernel/mpam/mpam_device.c
+++ b/arch/arm64/kernel/mpam/mpam_device.c
@@ -534,7 +534,7 @@ static void mpam_disable_irqs(void)
  * Scheduled by mpam_discovery_complete() once all devices have been created.
  * Also scheduled when new devices are probed when new CPUs come online.
  */
-static void __init mpam_enable(struct work_struct *work)
+static void mpam_enable(struct work_struct *work)
 {
 	int err;
 	unsigned long flags;
@@ -761,7 +761,7 @@ static struct mpam_class * __init mpam_class_get(u8 level_idx,
  * class/component structures may be allocated.
  * Returns the new device, or an ERR_PTR().
  */
-struct mpam_device * __init
+struct mpam_device *
 __mpam_device_create(u8 level_idx, enum mpam_class_types type,
 			int component_id, const struct cpumask *fw_affinity,
 			phys_addr_t hwpage_address)
@@ -810,7 +810,7 @@ __mpam_device_create(u8 level_idx, enum mpam_class_types type,
 	return dev;
 }
 
-void __init mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
+void mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
 					u32 flags)
 {
 	unsigned long irq_save_flags;
@@ -821,7 +821,7 @@ void __init mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
 	spin_unlock_irqrestore(&dev->lock, irq_save_flags);
 }
 
-void __init mpam_device_set_overflow_irq(struct mpam_device *dev, u32 irq,
+void mpam_device_set_overflow_irq(struct mpam_device *dev, u32 irq,
 					u32 flags)
 {
 	unsigned long irq_save_flags;
@@ -864,7 +864,7 @@ static inline u16 mpam_cpu_max_pmg(void)
 /*
  * prepare for initializing devices.
  */
-int __init mpam_discovery_start(void)
+int mpam_discovery_start(void)
 {
 	if (!mpam_cpus_have_feature())
 		return -EOPNOTSUPP;
@@ -1104,7 +1104,7 @@ static int mpam_cpu_offline(unsigned int cpu)
 	return 0;
 }
 
-int __init mpam_discovery_complete(void)
+int mpam_discovery_complete(void)
 {
 	int ret = 0;
 
@@ -1121,7 +1121,7 @@ int __init mpam_discovery_complete(void)
 	return ret;
 }
 
-void __init mpam_discovery_failed(void)
+void mpam_discovery_failed(void)
 {
 	struct mpam_class *class, *tmp;
 
diff --git a/arch/arm64/kernel/mpam/mpam_internal.h b/arch/arm64/kernel/mpam/mpam_internal.h
index cfaef82428aa5..7b84ea54975aa 100644
--- a/arch/arm64/kernel/mpam/mpam_internal.h
+++ b/arch/arm64/kernel/mpam/mpam_internal.h
@@ -329,7 +329,7 @@ int mpam_resctrl_setup(void);
 struct raw_resctrl_resource *
 mpam_get_raw_resctrl_resource(u32 level);
 
-int __init mpam_resctrl_init(void);
+int mpam_resctrl_init(void);
 
 int mpam_resctrl_set_default_cpu(unsigned int cpu);
 void mpam_resctrl_clear_default_cpu(unsigned int cpu);
diff --git a/arch/arm64/kernel/mpam/mpam_resctrl.c b/arch/arm64/kernel/mpam/mpam_resctrl.c
index 51cdefebaeba8..1ad4cd49762a3 100644
--- a/arch/arm64/kernel/mpam/mpam_resctrl.c
+++ b/arch/arm64/kernel/mpam/mpam_resctrl.c
@@ -2209,7 +2209,7 @@ static int __init mpam_setup(char *str)
 }
 __setup("mpam", mpam_setup);
 
-int __init mpam_resctrl_init(void)
+int mpam_resctrl_init(void)
 {
 	mpam_init_padding();
 
diff --git a/fs/resctrlfs.c b/fs/resctrlfs.c
index 11741c87eb0af..ea9df7d77b95a 100644
--- a/fs/resctrlfs.c
+++ b/fs/resctrlfs.c
@@ -1029,7 +1029,7 @@ static int __init resctrl_group_setup_root(void)
  *
  * Return: 0 on success or -errno
  */
-int __init resctrl_group_init(void)
+int resctrl_group_init(void)
 {
 	int ret = 0;
 
diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h
index 44d0690ae8c4b..a242d4fdff8b3 100644
--- a/include/linux/arm_mpam.h
+++ b/include/linux/arm_mpam.h
@@ -16,7 +16,7 @@ enum mpam_class_types {
 	MPAM_CLASS_UNKNOWN, /* Everything else, e.g. TLBs etc */
 };
 
-struct mpam_device * __init
+struct mpam_device *
 __mpam_device_create(u8 level_idx, enum mpam_class_types type,
 			int component_id, const struct cpumask *fw_affinity,
 			phys_addr_t hwpage_address);
@@ -54,9 +54,9 @@ mpam_device_create_memory(int nid, phys_addr_t hwpage_address)
 	return __mpam_device_create(~0, MPAM_CLASS_MEMORY, nid,
 			&dev_affinity, hwpage_address);
 }
-int __init mpam_discovery_start(void);
-int __init mpam_discovery_complete(void);
-void __init mpam_discovery_failed(void);
+int mpam_discovery_start(void);
+int mpam_discovery_complete(void);
+void mpam_discovery_failed(void);
 
 enum mpam_enable_type {
 	MPAM_ENABLE_DENIED = 0,
@@ -71,12 +71,12 @@ extern enum mpam_enable_type mpam_enabled;
 #define mpam_irq_flags_to_acpi(x) ((x & MPAM_IRQ_MODE_LEVEL) ?  \
 			ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE)
 
-void __init mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
+void mpam_device_set_error_irq(struct mpam_device *dev, u32 irq,
 			u32 flags);
-void __init mpam_device_set_overflow_irq(struct mpam_device *dev, u32 irq,
+void mpam_device_set_overflow_irq(struct mpam_device *dev, u32 irq,
 			u32 flags);
 
-static inline int __init mpam_register_device_irq(struct mpam_device *dev,
+static inline int mpam_register_device_irq(struct mpam_device *dev,
 			u32 overflow_interrupt, u32 overflow_flags,
 			u32 error_interrupt, u32 error_flags)
 {
-- 
2.25.1
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            2
                            
                          
                          
                            
    
                          
                        
                    
                        
                            
                                
                            
                            [openEuler-5.10 01/19] drm/hisilicon: Support i2c driver algorithms for bit-shift adapters
                        
                        
by Zheng Zengkai 23 Dec '21
                    by Zheng Zengkai 23 Dec '21
23 Dec '21
                    
                        From: Ma Hai <mahai1(a)huawei.com>
mainline inclusion
from mainline-v5.12-rc2
commit 4eb4d99dfe3018d86f4529112aa7082f43b6996a
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4M6CD?from=project-issue
CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/d…
----------------------------------------------------------------------
Adding driver implementation to support i2c driver algorithms for
bit-shift adapters, so hibmc will using the interface provided by
drm to read edid.
Signed-off-by: Ma Hai <mahai1(a)huawei.com>
Signed-off-by: Tian Tao <tiantao6(a)hisilicon.com>
Reviewed-by: Thomas Zimmermann <tzimmermann(a)suse.de>
Reviewed-by: Li Dongming <lidongming5(a)huawei.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1600778670-60370-2-git-send-e…
Acked-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
---
 drivers/gpu/drm/hisilicon/hibmc/Makefile      |  2 +-
 .../gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h   | 25 ++++-
 .../gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c   | 99 +++++++++++++++++++
 .../gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c  |  2 +-
 4 files changed, 125 insertions(+), 3 deletions(-)
 create mode 100644 drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index f99132715597..684ef794eb7c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
 
 obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 197485e2fe0b..87d2aad0bb5e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -14,11 +14,23 @@
 #ifndef HIBMC_DRM_DRV_H
 #define HIBMC_DRM_DRV_H
 
+#include <linux/gpio/consumer.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
+#include <drm/drm_edid.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_framebuffer.h>
 
 struct drm_device;
 
+struct hibmc_connector {
+	struct drm_connector base;
+
+	struct i2c_adapter adapter;
+	struct i2c_algo_bit_data bit_data;
+};
+
 struct hibmc_drm_private {
 	/* hw */
 	void __iomem   *mmio;
@@ -31,10 +43,20 @@ struct hibmc_drm_private {
 	struct drm_plane primary_plane;
 	struct drm_crtc crtc;
 	struct drm_encoder encoder;
-	struct drm_connector connector;
+	struct hibmc_connector connector;
 	bool mode_config_initialized;
 };
 
+static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
+{
+	return container_of(connector, struct hibmc_connector, base);
+}
+
+static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
+{
+	return dev->dev_private;
+}
+
 void hibmc_set_power_mode(struct hibmc_drm_private *priv,
 			  unsigned int power_mode);
 void hibmc_set_current_gate(struct hibmc_drm_private *priv,
@@ -47,6 +69,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc);
 void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
 int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
 		      struct drm_mode_create_dumb *args);
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
 
 extern const struct drm_mode_config_funcs hibmc_mode_funcs;
 
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
new file mode 100644
index 000000000000..86d712090d87
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ *      Tian Tao <tiantao6(a)hisilicon.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "hibmc_drm_drv.h"
+
+#define GPIO_DATA		0x0802A0
+#define GPIO_DATA_DIRECTION	0x0802A4
+
+#define I2C_SCL_MASK		BIT(0)
+#define I2C_SDA_MASK		BIT(1)
+
+static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
+{
+	struct hibmc_connector *hibmc_connector = data;
+	struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+	u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+	if (value) {
+		tmp_dir &= ~mask;
+		writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+	} else {
+		u32 tmp_data = readl(priv->mmio + GPIO_DATA);
+
+		tmp_data &= ~mask;
+		writel(tmp_data, priv->mmio + GPIO_DATA);
+
+		tmp_dir |= mask;
+		writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+	}
+}
+
+static int hibmc_get_i2c_signal(void *data, u32 mask)
+{
+	struct hibmc_connector *hibmc_connector = data;
+	struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+	u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+	if ((tmp_dir & mask) != mask) {
+		tmp_dir &= ~mask;
+		writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+	}
+
+	return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0;
+}
+
+static void hibmc_ddc_setsda(void *data, int state)
+{
+	hibmc_set_i2c_signal(data, I2C_SDA_MASK, state);
+}
+
+static void hibmc_ddc_setscl(void *data, int state)
+{
+	hibmc_set_i2c_signal(data, I2C_SCL_MASK, state);
+}
+
+static int hibmc_ddc_getsda(void *data)
+{
+	return hibmc_get_i2c_signal(data, I2C_SDA_MASK);
+}
+
+static int hibmc_ddc_getscl(void *data)
+{
+	return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
+}
+
+int hibmc_ddc_create(struct drm_device *drm_dev,
+		     struct hibmc_connector *connector)
+{
+	connector->adapter.owner = THIS_MODULE;
+	connector->adapter.class = I2C_CLASS_DDC;
+	snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+	connector->adapter.dev.parent = &drm_dev->pdev->dev;
+	i2c_set_adapdata(&connector->adapter, connector);
+	connector->adapter.algo_data = &connector->bit_data;
+
+	connector->bit_data.udelay = 20;
+	connector->bit_data.timeout = usecs_to_jiffies(2000);
+	connector->bit_data.data = connector;
+	connector->bit_data.setsda = hibmc_ddc_setsda;
+	connector->bit_data.setscl = hibmc_ddc_setscl;
+	connector->bit_data.getsda = hibmc_ddc_getsda;
+	connector->bit_data.getscl = hibmc_ddc_getscl;
+
+	return i2c_bit_add_bus(&connector->adapter);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 376a05ddbc2f..c8b14afbcbed 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -78,7 +78,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
 {
 	struct drm_device *dev = priv->dev;
 	struct drm_encoder *encoder = &priv->encoder;
-	struct drm_connector *connector = &priv->connector;
+	struct drm_connector *connector = &priv->connector.base;
 	int ret;
 
 	encoder->possible_crtcs = 0x1;
-- 
2.20.1
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            18
                            
                          
                          
                            
    
                          
                        
                    
                        
                            
                                
                            
                            [PATCH openEuler-1.0-LTS 1/3] arm64/mpam: Fix mpam corrupt when cpu online
                        
                        
by Yang Yingliang 23 Dec '21
                    by Yang Yingliang 23 Dec '21
23 Dec '21
                    
                        From: Wang ShaoBo <bobo.shaobowang(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I3YAI3
CVE: NA
-------------------------------------------------
The following error occurred occasionally on a machine that supports MPAM:
[   13.321386][  T658] Unable to handle kernel paging request at virtual address ffff80001115816c
[   13.326013][  T684] hid-generic 0003:12D1:0003.0002: input,hidraw1: USB HID v1.10 Mouse [Keyboard/Mouse KVM 1.1.0] on usb-0000:7a:01.0-1.1/input1
[   13.340558][  T658] Mem abort info:
[   13.340563][  T658]   ESR = 0x86000007
[   13.352567][    T5] hub 6-1:1.0: USB hub found
[   13.364750][  T658]   EC = 0x21: IABT (current EL), IL = 32 bits
[   13.369891][    T5] hub 6-1:1.0: 4 ports detected
[   13.373871][  T658]   SET = 0, FnV = 0
[   13.396107][  T658]   EA = 0, S1PTW = 0
[   13.400599][  T658] swapper pgtable: 64k pages, 48-bit VAs, pgdp=0000000029540000
[   13.408726][  T658] [ffff80001115816c] pgd=0000205fffff0003, p4d=0000205fffff0003, pud=0000205fffff0003, pmd=0000205ffffe0003, pte=0000000000000000
[   13.423346][  T658] Internal error: Oops: 86000007 [#1] SMP
[   13.429720][  T658] Modules linked in:
[   13.434243][  T658] CPU: 72 PID: 658 Comm: kworker/72:1 Not tainted 5.10.0-4.17.0.28.oe1.aarch64 #1
[   13.443966][  T658] Hardware name: Huawei TaiShan 200 (Model 2280)/BC82AMDDA, BIOS 1.70 01/07/2021
[   13.453683][  T658] Workqueue: events mpam_enable
[   13.459206][  T658] pstate: 20c00009 (nzCv daif +PAN +UAO -TCO BTYPE=--)
[   13.466625][  T658] pc : mpam_enable+0x194/0x1d8
[   13.472019][  T658] lr : mpam_enable+0x194/0x1d8
[   13.477301][  T658] sp : ffff80004664fd70
[   13.481937][  T658] x29: ffff80004664fd70 x28: 0000000000000000
[   13.488578][  T658] x27: ffff00400484a648 x26: ffff800011b71080
[   13.495306][  T658] x25: 0000000000000000 x24: ffff800011b6cda0
[   13.502001][  T658] x23: ffff800011646f18 x22: ffff800011b6cd80
[   13.508684][  T658] x21: ffff800011b6c000 x20: ffff800011646f08
[   13.515425][  T658] x19: ffff800011646f70 x18: 0000000000000020
[   13.522075][  T658] x17: 000000001790b332 x16: 0000000000000001
[   13.528785][  T658] x15: ffffffffffffffff x14: ff00000000000000
[   13.535464][  T658] x13: ffffffffffffffff x12: 0000000000000006
[   13.542045][  T658] x11: 00000091cea718e2 x10: 0000000000000b90
[   13.548735][  T658] x9 : ffff80001009ebac x8 : ffff2040061aabf0
[   13.555383][  T658] x7 : ffffa05f8dca0000 x6 : 000000000000000f
[   13.561924][  T658] x5 : 0000000000000000 x4 : ffff2040061aa000
[   13.568613][  T658] x3 : ffff80001164dfa0 x2 : 00000000ffffffff
[   13.575267][  T658] x1 : ffffa05f8dca0000 x0 : 00000000000000c1
[   13.581813][  T658] Call trace:
[   13.585600][  T658]  mpam_enable+0x194/0x1d8
[   13.590450][  T658]  process_one_work+0x1cc/0x390
[   13.595654][  T658]  worker_thread+0x70/0x2f0
[   13.600499][  T658]  kthread+0x118/0x120
[   13.604935][  T658]  ret_from_fork+0x10/0x18
[   13.609717][  T658] Code: bad PC value
[   13.613944][  T658] ---[ end trace f1e305d2c339f67f ]---
[   13.753818][  T658] Kernel panic - not syncing: Oops: Fatal exception
[   13.760885][  T658] SMP: stopping secondary CPUs
[   13.765933][  T658] Kernel Offset: disabled
[   13.770516][  T658] CPU features: 0x8040002,22208a38
[   13.775862][  T658] Memory Limit: none
[   13.913929][  T658] ---[ end Kernel panic - not syncing:
The process of MPAM devices initialization is like this:
mpam_discovery_start()
       ...                           // discover devices
mpam_discovery_complete()            // hang up the mpam_online/offline_cpu callbacks
   -=> mpam_cpu_online()             // probe all devices
       -=> mpam_enable()             // prepare for resctrl
       (1) -=> cpuhp_remove_state()  // clean resctrl internal structure
       (2) -=> cpuhp_setup_state()   // rehang mpam_online/offline_cpu callbacks
               -=> mpam_cpu_online() // it does not call mpam_enable again
                   -=> mpam_resctrl_cpu_online() // pull up resctrl
Re-hang process of mpam_cpu_online/offline callbacks should not be
disturbed by irqs, to ensure that CPU context is reliable before
re-entering mpam_cpu_online(), which always happens between (1) and (2).
Fixes: 2ab89c893faf ("arm64/mpam: resctrl: Re-synchronise resctrl's view of online CPUs")
Signed-off-by: Wang ShaoBo <bobo.shaobowang(a)huawei.com>
Reviewed-by: Cheng Jian <cj.chengjian(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
 arch/arm64/kernel/mpam/mpam_device.c | 2 ++
 1 file changed, 2 insertions(+)
diff --git a/arch/arm64/kernel/mpam/mpam_device.c b/arch/arm64/kernel/mpam/mpam_device.c
index f8840274b902f..c0615f6947a1f 100644
--- a/arch/arm64/kernel/mpam/mpam_device.c
+++ b/arch/arm64/kernel/mpam/mpam_device.c
@@ -593,9 +593,11 @@ static void __init mpam_enable(struct work_struct *work)
 		pr_err("Failed to setup/init resctrl\n");
 	mutex_unlock(&mpam_devices_lock);
 
+	local_irq_disable();
 	mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
 						"mpam:online", mpam_cpu_online,
 						mpam_cpu_offline);
+	local_irq_enable();
 	if (mpam_cpuhp_state <= 0)
 		pr_err("Failed to re-register 'dyn' cpuhp callbacks");
 	mutex_unlock(&mpam_cpuhp_lock);
-- 
2.25.1
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            2
                            
                          
                          
                            
    
                          
                        
                    
                        
                            
                                
                            
                            [PATCH openEuler-1.0-LTS 1/2] kprobes: Set unoptimized flag after unoptimizing code
                        
                        
by Yang Yingliang 22 Dec '21
                    by Yang Yingliang 22 Dec '21
22 Dec '21
                    
                        From: Masami Hiramatsu <mhiramat(a)kernel.org>
stable inclusion
from stable-v4.19.108
commit 39af044d1ccb207da43af5c060bb0fb0dd548b3e
category: bugfix
bugzilla: NA
CVE: NA
-------------------------------------------------
commit f66c0447cca1281116224d474cdb37d6a18e4b5b upstream.
Set the unoptimized flag after confirming the code is completely
unoptimized. Without this fix, when a kprobe hits the intermediate
modified instruction (the first byte is replaced by an INT3, but
later bytes can still be a jump address operand) while unoptimizing,
it can return to the middle byte of the modified code, which causes
an invalid instruction exception in the kernel.
Usually, this is a rare case, but if we put a probe on the function
call while text patching, it always causes a kernel panic as below:
 # echo p text_poke+5 > kprobe_events
 # echo 1 > events/kprobes/enable
 # echo 0 > events/kprobes/enable
invalid opcode: 0000 [#1] PREEMPT SMP PTI
 RIP: 0010:text_poke+0x9/0x50
 Call Trace:
  arch_unoptimize_kprobe+0x22/0x28
  arch_unoptimize_kprobes+0x39/0x87
  kprobe_optimizer+0x6e/0x290
  process_one_work+0x2a0/0x610
  worker_thread+0x28/0x3d0
  ? process_one_work+0x610/0x610
  kthread+0x10d/0x130
  ? kthread_park+0x80/0x80
  ret_from_fork+0x3a/0x50
text_poke() is used for patching the code in optprobes.
This can happen even if we blacklist text_poke() and other functions,
because there is a small time window during which we show the intermediate
code to other CPUs.
 [ mingo: Edited the changelog. ]
Tested-by: Alexei Starovoitov <ast(a)kernel.org>
Signed-off-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Steven Rostedt <rostedt(a)goodmis.org>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: bristot(a)redhat.com
Fixes: 6274de4984a6 ("kprobes: Support delayed unoptimizing")
Link: https://lkml.kernel.org/r/157483422375.25881.13508326028469515760.stgit@dev…
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Jihong <yangjihong1(a)huawei.com>
Reviewed-by: Kuohai Xu <xukuohai(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
 kernel/kprobes.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f6e6edaf964c1..666243fff573d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -523,6 +523,8 @@ static void do_unoptimize_kprobes(void)
 	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 	/* Loop free_list for disarming */
 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+		/* Switching from detour code to origin */
+		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 		/* Disarm probes if marked disabled */
 		if (kprobe_disabled(&op->kp))
 			arch_disarm_kprobe(&op->kp);
@@ -662,6 +664,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 {
 	lockdep_assert_cpus_held();
 	arch_unoptimize_kprobe(op);
+	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 	if (kprobe_disabled(&op->kp))
 		arch_disarm_kprobe(&op->kp);
 }
@@ -689,7 +692,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 		return;
 	}
 
-	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 	if (!list_empty(&op->list)) {
 		/* Dequeue from the optimization queue */
 		list_del_init(&op->list);
-- 
2.25.1
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            1
                            
                          
                          
                            
    
                          
                        
                    
                        
                            
                                
                            
                            [PATCH openEuler-1.0-LTS] cpufreq: schedutil: Destroy mutex before kobject_put() frees the memory
                        
                        
by Yang Yingliang 22 Dec '21
                    by Yang Yingliang 22 Dec '21
22 Dec '21
                    
                        From: James Morse <james.morse(a)arm.com>
stable inclusion
from linux-4.19.209
commit 3cb9595e23d879824ebcefeabdf5156bc288000c
--------------------------------
[ Upstream commit cdef1196608892b9a46caa5f2b64095a7f0be60c ]
Since commit e5c6b312ce3c ("cpufreq: schedutil: Use kobject release()
method to free sugov_tunables") kobject_put() has kfree()d the
attr_set before gov_attr_set_put() returns.
kobject_put() isn't the last user of attr_set in gov_attr_set_put(),
the subsequent mutex_destroy() triggers a use-after-free:
| BUG: KASAN: use-after-free in mutex_is_locked+0x20/0x60
| Read of size 8 at addr ffff000800ca4250 by task cpuhp/2/20
|
| CPU: 2 PID: 20 Comm: cpuhp/2 Not tainted 5.15.0-rc1 #12369
| Hardware name: ARM LTD ARM Juno Development Platform/ARM Juno Development
| Platform, BIOS EDK II Jul 30 2018
| Call trace:
|  dump_backtrace+0x0/0x380
|  show_stack+0x1c/0x30
|  dump_stack_lvl+0x8c/0xb8
|  print_address_description.constprop.0+0x74/0x2b8
|  kasan_report+0x1f4/0x210
|  kasan_check_range+0xfc/0x1a4
|  __kasan_check_read+0x38/0x60
|  mutex_is_locked+0x20/0x60
|  mutex_destroy+0x80/0x100
|  gov_attr_set_put+0xfc/0x150
|  sugov_exit+0x78/0x190
|  cpufreq_offline.isra.0+0x2c0/0x660
|  cpuhp_cpufreq_offline+0x14/0x24
|  cpuhp_invoke_callback+0x430/0x6d0
|  cpuhp_thread_fun+0x1b0/0x624
|  smpboot_thread_fn+0x5e0/0xa6c
|  kthread+0x3a0/0x450
|  ret_from_fork+0x10/0x20
Swap the order of the calls.
Fixes: e5c6b312ce3c ("cpufreq: schedutil: Use kobject release() method to free sugov_tunables")
Cc: 4.7+ <stable(a)vger.kernel.org> # 4.7+
Signed-off-by: James Morse <james.morse(a)arm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki(a)intel.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
 drivers/cpufreq/cpufreq_governor_attr_set.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
index 52841f807a7eb..45fdf30cade39 100644
--- a/drivers/cpufreq/cpufreq_governor_attr_set.c
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -77,8 +77,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
 	if (count)
 		return count;
 
-	kobject_put(&attr_set->kobj);
 	mutex_destroy(&attr_set->update_lock);
+	kobject_put(&attr_set->kobj);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(gov_attr_set_put);
-- 
2.25.1
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                    
                        
                            
                                
                            
                            [PATCH openEuler-1.0-LTS v3] scsi:spraid: support Ramaxel's spraid driver
                        
                        
by Yanling Song 22 Dec '21
                    by Yanling Song 22 Dec '21
22 Dec '21
                    
                        Ramaxel inclusion
category: features
bugzilla: https://gitee.com/openeuler/kernel/issues/I4NJIB
CVE: NA
v2->v3:
1. Add deconfig for hulk in arm64 and x86
v1->v2:
1. Add deconfig for arm64 and x86
Support Ramaxel's SPRxxx Raid controller
Signed-off-by: Yanling Song <songyl(a)ramaxel.com>
Reviewed-by: Jiang Yu<yujiang(a)ramaxel.com>
---
 arch/arm64/configs/hulk_defconfig      |    1 +
 arch/arm64/configs/openeuler_defconfig |    1 +
 arch/x86/configs/hulk_defconfig        |    3 +-
 arch/x86/configs/openeuler_defconfig   |    1 +
 drivers/scsi/Kconfig                   |    1 +
 drivers/scsi/Makefile                  |    1 +
 drivers/scsi/spraid/Kconfig            |   13 +
 drivers/scsi/spraid/Makefile           |    7 +
 drivers/scsi/spraid/spraid.h           |  746 +++++
 drivers/scsi/spraid/spraid_main.c      | 3875 ++++++++++++++++++++++++
 10 files changed, 4648 insertions(+), 1 deletion(-)
 create mode 100644 drivers/scsi/spraid/Kconfig
 create mode 100644 drivers/scsi/spraid/Makefile
 create mode 100644 drivers/scsi/spraid/spraid.h
 create mode 100644 drivers/scsi/spraid/spraid_main.c
diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig
index bc350c41b333..71517c091c43 100644
--- a/arch/arm64/configs/hulk_defconfig
+++ b/arch/arm64/configs/hulk_defconfig
@@ -2149,6 +2149,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
 CONFIG_SCSI_MPT3SAS_MAX_SGE=128
 CONFIG_SCSI_MPT2SAS=m
 CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
 # CONFIG_SCSI_UFSHCD is not set
 # CONFIG_SCSI_HPTIOP is not set
 CONFIG_LIBFC=m
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index d2167c80757f..47b0931b91bf 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2143,6 +2143,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
 CONFIG_SCSI_MPT3SAS_MAX_SGE=128
 CONFIG_SCSI_MPT2SAS=m
 CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
 # CONFIG_SCSI_UFSHCD is not set
 # CONFIG_SCSI_HPTIOP is not set
 CONFIG_LIBFC=m
diff --git a/arch/x86/configs/hulk_defconfig b/arch/x86/configs/hulk_defconfig
index 983d52ccad82..536ffb38c00c 100644
--- a/arch/x86/configs/hulk_defconfig
+++ b/arch/x86/configs/hulk_defconfig
@@ -1,4 +1,4 @@
-#
+
 # Automatically generated file; DO NOT EDIT.
 # Linux/x86 4.19.21 Kernel Configuration
 #
@@ -2177,6 +2177,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
 CONFIG_SCSI_MPT3SAS_MAX_SGE=128
 CONFIG_SCSI_MPT2SAS=m
 CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
 # CONFIG_SCSI_UFSHCD is not set
 # CONFIG_SCSI_HPTIOP is not set
 # CONFIG_SCSI_BUSLOGIC is not set
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 7043426d9780..786186288fea 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2182,6 +2182,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
 CONFIG_SCSI_MPT3SAS_MAX_SGE=128
 CONFIG_SCSI_MPT2SAS=m
 CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
 # CONFIG_SCSI_UFSHCD is not set
 # CONFIG_SCSI_HPTIOP is not set
 # CONFIG_SCSI_BUSLOGIC is not set
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8450484184e3..63d2aaa22834 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -522,6 +522,7 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/mpt3sas/Kconfig"
 source "drivers/scsi/smartpqi/Kconfig"
 source "drivers/scsi/ufs/Kconfig"
+source "drivers/scsi/spraid/Kconfig"
 
 config SCSI_HPTIOP
 	tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2973693f6dcc..4056cf26e09e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_SCSI_ZALON)	+= zalon7xx.o
 obj-$(CONFIG_SCSI_DC395x)	+= dc395x.o
 obj-$(CONFIG_SCSI_AM53C974)	+= esp_scsi.o	am53c974.o
 obj-$(CONFIG_CXLFLASH)		+= cxlflash/
+obj-$(CONFIG_RAMAXEL_SPRAID)	+= spraid/
 obj-$(CONFIG_MEGARAID_LEGACY)	+= megaraid.o
 obj-$(CONFIG_MEGARAID_NEWGEN)	+= megaraid/
 obj-$(CONFIG_MEGARAID_SAS)	+= megaraid/
diff --git a/drivers/scsi/spraid/Kconfig b/drivers/scsi/spraid/Kconfig
new file mode 100644
index 000000000000..bfbba3db8db0
--- /dev/null
+++ b/drivers/scsi/spraid/Kconfig
@@ -0,0 +1,13 @@
+#
+# Ramaxel driver configuration
+#
+
+config RAMAXEL_SPRAID
+	tristate "Ramaxel spraid Adapter"
+	depends on PCI && SCSI
+	select BLK_DEV_BSGLIB
+	depends on ARM64 || X86_64
+	help
+	This driver supports Ramaxel SPRxxx serial
+	raid controller, which has PCIE Gen4 interface
+	with host and supports SAS/SATA Hdd/ssd.
diff --git a/drivers/scsi/spraid/Makefile b/drivers/scsi/spraid/Makefile
new file mode 100644
index 000000000000..aadc2ffd37eb
--- /dev/null
+++ b/drivers/scsi/spraid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Ramaxel device drivers.
+#
+
+obj-$(CONFIG_RAMAXEL_SPRAID) += spraid.o
+
+spraid-objs := spraid_main.o
\ No newline at end of file
diff --git a/drivers/scsi/spraid/spraid.h b/drivers/scsi/spraid/spraid.h
new file mode 100644
index 000000000000..c1e4980e18e5
--- /dev/null
+++ b/drivers/scsi/spraid/spraid.h
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+#ifndef __SPRAID_H_
+#define __SPRAID_H_
+
+#define SPRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define SPRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define SPRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define SPRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define SPRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define SPRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define SPRAID_DEFAULT_MAX_CHANNEL 4
+#define SPRAID_DEFAULT_MAX_ID 240
+#define SPRAID_DEFAULT_MAX_LUN_PER_HOST 8
+#define MAX_SECTORS 2048
+
+#define IO_SQE_SIZE sizeof(struct spraid_ioq_command)
+#define ADMIN_SQE_SIZE sizeof(struct spraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct spraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth)	((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define SPRAID_AQ_DEPTH 128
+#define SPRAID_NR_AEN_COMMANDS 16
+#define SPRAID_AQ_BLK_MQ_DEPTH (SPRAID_AQ_DEPTH - SPRAID_NR_AEN_COMMANDS)
+#define SPRAID_AQ_MQ_TAG_DEPTH (SPRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define SPRAID_ADMIN_QUEUE_NUM 1
+#define SPRAID_PTCMDS_PERQ 1
+#define SPRAID_IO_BLK_MQ_DEPTH (hdev->shost->can_queue)
+#define SPRAID_NR_IOQ_PTCMDS (SPRAID_PTCMDS_PERQ * hdev->shost->nr_hw_queues)
+
+#define FUA_MASK 0x08
+#define SPRAID_MINORS BIT(MINORBITS)
+
+#define COMMAND_IS_WRITE(cmd) ((cmd)->common.opcode & 1)
+
+#define SPRAID_IO_IOSQES 7
+#define SPRAID_IO_IOCQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define SMALL_POOL_SIZE 256
+#define MAX_SMALL_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 32
+
+#define SPRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03))
+
+#define CQE_STATUS_SUCCESS (0x0)
+
+#define PCI_VENDOR_ID_RAMAXEL_LOGIC 0x1E81
+
+#define SPRAID_SERVER_DEVICE_HBA_DID		0x2100
+#define SPRAID_SERVER_DEVICE_RAID_DID		0x2200
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define SPRAID_INT_PAGES 2
+#define SPRAID_INT_BYTES(hdev) (SPRAID_INT_PAGES * (hdev)->page_size)
+
+enum {
+	SPRAID_REQ_CANCELLED = (1 << 0),
+	SPRAID_REQ_USERCMD = (1 << 1),
+};
+
+enum {
+	SPRAID_SC_SUCCESS = 0x0,
+	SPRAID_SC_INVALID_OPCODE = 0x1,
+	SPRAID_SC_INVALID_FIELD  = 0x2,
+
+	SPRAID_SC_ABORT_LIMIT = 0x103,
+	SPRAID_SC_ABORT_MISSING = 0x104,
+	SPRAID_SC_ASYNC_LIMIT = 0x105,
+
+	SPRAID_SC_DNR = 0x4000,
+};
+
+enum {
+	SPRAID_REG_CAP  = 0x0000,
+	SPRAID_REG_CC   = 0x0014,
+	SPRAID_REG_CSTS = 0x001c,
+	SPRAID_REG_AQA  = 0x0024,
+	SPRAID_REG_ASQ  = 0x0028,
+	SPRAID_REG_ACQ  = 0x0030,
+	SPRAID_REG_DBS  = 0x1000,
+};
+
+enum {
+	SPRAID_CC_ENABLE     = 1 << 0,
+	SPRAID_CC_CSS_NVM    = 0 << 4,
+	SPRAID_CC_MPS_SHIFT  = 7,
+	SPRAID_CC_AMS_SHIFT  = 11,
+	SPRAID_CC_SHN_SHIFT  = 14,
+	SPRAID_CC_IOSQES_SHIFT = 16,
+	SPRAID_CC_IOCQES_SHIFT = 20,
+	SPRAID_CC_AMS_RR       = 0 << SPRAID_CC_AMS_SHIFT,
+	SPRAID_CC_SHN_NONE     = 0 << SPRAID_CC_SHN_SHIFT,
+	SPRAID_CC_IOSQES       = SPRAID_IO_IOSQES << SPRAID_CC_IOSQES_SHIFT,
+	SPRAID_CC_IOCQES       = SPRAID_IO_IOCQES << SPRAID_CC_IOCQES_SHIFT,
+	SPRAID_CC_SHN_NORMAL   = 1 << SPRAID_CC_SHN_SHIFT,
+	SPRAID_CC_SHN_MASK     = 3 << SPRAID_CC_SHN_SHIFT,
+	SPRAID_CSTS_CFS_SHIFT  = 1,
+	SPRAID_CSTS_SHST_SHIFT = 2,
+	SPRAID_CSTS_PP_SHIFT   = 5,
+	SPRAID_CSTS_RDY	       = 1 << 0,
+	SPRAID_CSTS_SHST_CMPLT = 2 << 2,
+	SPRAID_CSTS_SHST_MASK  = 3 << 2,
+	SPRAID_CSTS_CFS_MASK   = 1 << SPRAID_CSTS_CFS_SHIFT,
+	SPRAID_CSTS_PP_MASK    = 1 << SPRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+	SPRAID_ADMIN_DELETE_SQ = 0x00,
+	SPRAID_ADMIN_CREATE_SQ = 0x01,
+	SPRAID_ADMIN_DELETE_CQ = 0x04,
+	SPRAID_ADMIN_CREATE_CQ = 0x05,
+	SPRAID_ADMIN_ABORT_CMD = 0x08,
+	SPRAID_ADMIN_SET_FEATURES = 0x09,
+	SPRAID_ADMIN_ASYNC_EVENT = 0x0c,
+	SPRAID_ADMIN_GET_INFO = 0xc6,
+	SPRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+	SPRAID_GET_INFO_CTRL = 0,
+	SPRAID_GET_INFO_DEV_LIST = 1,
+};
+
+enum {
+	SPRAID_RESET_TARGET = 0,
+	SPRAID_RESET_BUS = 1,
+};
+
+enum {
+	SPRAID_AEN_ERROR = 0,
+	SPRAID_AEN_NOTICE = 2,
+	SPRAID_AEN_VS = 7,
+};
+
+enum {
+	SPRAID_AEN_DEV_CHANGED = 0x00,
+	SPRAID_AEN_FW_ACT_START = 0x01,
+	SPRAID_AEN_HOST_PROBING = 0x10,
+};
+
+enum {
+	SPRAID_AEN_TIMESYN = 0x00,
+	SPRAID_AEN_FW_ACT_FINISH = 0x02,
+	SPRAID_AEN_EVENT_MIN = 0x80,
+	SPRAID_AEN_EVENT_MAX = 0xff,
+};
+
+enum {
+	SPRAID_CMD_WRITE = 0x01,
+	SPRAID_CMD_READ = 0x02,
+
+	SPRAID_CMD_NONIO_NONE = 0x80,
+	SPRAID_CMD_NONIO_TODEV = 0x81,
+	SPRAID_CMD_NONIO_FROMDEV = 0x82,
+};
+
+enum {
+	SPRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+	SPRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+	SPRAID_FEAT_NUM_QUEUES = 0x07,
+	SPRAID_FEAT_ASYNC_EVENT = 0x0b,
+	SPRAID_FEAT_TIMESTAMP = 0x0e,
+};
+
+enum spraid_state {
+	SPRAID_NEW,
+	SPRAID_LIVE,
+	SPRAID_RESETTING,
+	SPRAID_DELETING,
+	SPRAID_DEAD,
+};
+
+enum {
+	SPRAID_CARD_HBA,
+	SPRAID_CARD_RAID,
+};
+
+enum spraid_cmd_type {
+	SPRAID_CMD_ADM,
+	SPRAID_CMD_IOPT,
+};
+
+struct spraid_completion {
+	__le32 result;
+	union {
+		struct {
+			__u8	sense_len;
+			__u8	resv[3];
+		};
+		__le32	result1;
+	};
+	__le16 sq_head;
+	__le16 sq_id;
+	__u16  cmd_id;
+	__le16 status;
+};
+
+struct spraid_ctrl_info {
+	__le32 nd;
+	__le16 max_cmds;
+	__le16 max_channel;
+	__le32 max_tgt_id;
+	__le16 max_lun;
+	__le16 max_num_sge;
+	__le16 lun_num_in_boot;
+	__u8   mdts;
+	__u8   acl;
+	__u8   aerl;
+	__u8   card_type;
+	__u16  rsvd;
+	__u32  rtd3e;
+	__u8   sn[32];
+	__u8   fr[16];
+	__u8   rsvd1[4020];
+};
+
+struct spraid_dev {
+	struct pci_dev *pdev;
+	struct device *dev;
+	struct Scsi_Host *shost;
+	struct spraid_queue *queues;
+	struct dma_pool *prp_page_pool;
+	struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM];
+	mempool_t *iod_mempool;
+	void __iomem *bar;
+	u32 max_qid;
+	u32 num_vecs;
+	u32 queue_count;
+	u32 ioq_depth;
+	int db_stride;
+	u32 __iomem *dbs;
+	struct rw_semaphore devices_rwsem;
+	int numa_node;
+	u32 page_size;
+	u32 ctrl_config;
+	u32 online_queues;
+	u64 cap;
+	int instance;
+	struct spraid_ctrl_info *ctrl_info;
+	struct spraid_dev_info *devices;
+
+	struct spraid_cmd *adm_cmds;
+	struct list_head adm_cmd_list;
+	spinlock_t adm_cmd_lock;
+
+	struct spraid_cmd *ioq_ptcmds;
+	struct list_head ioq_pt_list;
+	spinlock_t ioq_pt_lock;
+
+	struct work_struct scan_work;
+	struct work_struct timesyn_work;
+	struct work_struct reset_work;
+	struct work_struct fw_act_work;
+
+	enum spraid_state state;
+	spinlock_t state_lock;
+
+	struct request_queue *bsg_queue;
+};
+
+struct spraid_sgl_desc {
+	__le64 addr;
+	__le32 length;
+	__u8   rsvd[3];
+	__u8   type;
+};
+
+union spraid_data_ptr {
+	struct {
+		__le64 prp1;
+		__le64 prp2;
+	};
+	struct spraid_sgl_desc sgl;
+};
+
+struct spraid_admin_common_command {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__le32	hdid;
+	__le32	cdw2[4];
+	union spraid_data_ptr	dptr;
+	__le32	cdw10;
+	__le32	cdw11;
+	__le32	cdw12;
+	__le32	cdw13;
+	__le32	cdw14;
+	__le32	cdw15;
+};
+
+struct spraid_features {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__le32	hdid;
+	__u64	rsvd2[2];
+	union spraid_data_ptr dptr;
+	__le32	fid;
+	__le32	dword11;
+	__le32	dword12;
+	__le32	dword13;
+	__le32	dword14;
+	__le32	dword15;
+};
+
+struct spraid_create_cq {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__u32	rsvd1[5];
+	__le64	prp1;
+	__u64	rsvd8;
+	__le16	cqid;
+	__le16	qsize;
+	__le16	cq_flags;
+	__le16	irq_vector;
+	__u32	rsvd12[4];
+};
+
+struct spraid_create_sq {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__u32	rsvd1[5];
+	__le64	prp1;
+	__u64	rsvd8;
+	__le16	sqid;
+	__le16	qsize;
+	__le16	sq_flags;
+	__le16	cqid;
+	__u32	rsvd12[4];
+};
+
+struct spraid_delete_queue {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__u32	rsvd1[9];
+	__le16	qid;
+	__u16	rsvd10;
+	__u32	rsvd11[5];
+};
+
+struct spraid_get_info {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__le32	hdid;
+	__u32	rsvd2[4];
+	union spraid_data_ptr	dptr;
+	__u8	type;
+	__u8	rsvd10[3];
+	__le32	cdw11;
+	__u32	rsvd12[4];
+};
+
+struct spraid_usr_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__le32	hdid;
+	union {
+		struct {
+			__le16 subopcode;
+			__le16 rsvd1;
+		} info_0;
+		__le32 cdw2;
+	};
+	union {
+		struct {
+			__le16 data_len;
+			__le16 param_len;
+		} info_1;
+		__le32 cdw3;
+	};
+	__u64 metadata;
+	union spraid_data_ptr	dptr;
+	__le32 cdw10;
+	__le32 cdw11;
+	__le32 cdw12;
+	__le32 cdw13;
+	__le32 cdw14;
+	__le32 cdw15;
+};
+
+enum {
+	SPRAID_CMD_FLAG_SGL_METABUF = (1 << 6),
+	SPRAID_CMD_FLAG_SGL_METASEG = (1 << 7),
+	SPRAID_CMD_FLAG_SGL_ALL     = SPRAID_CMD_FLAG_SGL_METABUF |
+				      SPRAID_CMD_FLAG_SGL_METASEG,
+};
+
+enum spraid_cmd_state {
+	SPRAID_CMD_IDLE = 0,
+	SPRAID_CMD_IN_FLIGHT = 1,
+	SPRAID_CMD_COMPLETE = 2,
+	SPRAID_CMD_TIMEOUT = 3,
+	SPRAID_CMD_TMO_COMPLETE = 4,
+};
+
+struct spraid_abort_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__le32	hdid;
+	__u64	rsvd2[4];
+	__le16	sqid;
+	__le16	cid;
+	__u32	rsvd11[5];
+};
+
+struct spraid_reset_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__le32	hdid;
+	__u64	rsvd2[4];
+	__u8	type;
+	__u8	rsvd10[3];
+	__u32	rsvd11[5];
+};
+
+struct spraid_admin_command {
+	union {
+		struct spraid_admin_common_command common;
+		struct spraid_features features;
+		struct spraid_create_cq create_cq;
+		struct spraid_create_sq create_sq;
+		struct spraid_delete_queue delete_queue;
+		struct spraid_get_info get_info;
+		struct spraid_abort_cmd abort;
+		struct spraid_reset_cmd reset;
+		struct spraid_usr_cmd usr_cmd;
+	};
+};
+
+struct spraid_ioq_common_command {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__le32	hdid;
+	__le16	sense_len;
+	__u8	cdb_len;
+	__u8	rsvd2;
+	__le32	cdw3[3];
+	union spraid_data_ptr	dptr;
+	__le32	cdw10[6];
+	__u8	cdb[32];
+	__le64	sense_addr;
+	__le32	cdw26[6];
+};
+
+struct spraid_rw_command {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__le32	hdid;
+	__le16	sense_len;
+	__u8	cdb_len;
+	__u8	rsvd2;
+	__u32	rsvd3[3];
+	union spraid_data_ptr	dptr;
+	__le64	slba;
+	__le16	nlb;
+	__le16	control;
+	__u32	rsvd13[3];
+	__u8	cdb[32];
+	__le64	sense_addr;
+	__u32	rsvd26[6];
+};
+
+struct spraid_scsi_nonio {
+	__u8	opcode;
+	__u8	flags;
+	__u16	command_id;
+	__le32	hdid;
+	__le16	sense_len;
+	__u8	cdb_length;
+	__u8	rsvd2;
+	__u32	rsvd3[3];
+	union spraid_data_ptr	dptr;
+	__u32	rsvd10[5];
+	__le32	buffer_len;
+	__u8	cdb[32];
+	__le64	sense_addr;
+	__u32	rsvd26[6];
+};
+
+struct spraid_ioq_command {
+	union {
+		struct spraid_ioq_common_command common;
+		struct spraid_rw_command rw;
+		struct spraid_scsi_nonio scsi_nonio;
+	};
+};
+
+struct spraid_passthru_common_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__u16	rsvd0;
+	__u32	nsid;
+	union {
+		struct {
+			__u16 subopcode;
+			__u16 rsvd1;
+		} info_0;
+		__u32 cdw2;
+	};
+	union {
+		struct {
+			__u16 data_len;
+			__u16 param_len;
+		} info_1;
+		__u32 cdw3;
+	};
+	__u64 metadata;
+
+	__u64 addr;
+	__u64 prp2;
+
+	__u32 cdw10;
+	__u32 cdw11;
+	__u32 cdw12;
+	__u32 cdw13;
+	__u32 cdw14;
+	__u32 cdw15;
+	__u32 timeout_ms;
+	__u32 result0;
+	__u32 result1;
+};
+
+struct spraid_ioq_passthru_cmd {
+	__u8  opcode;
+	__u8  flags;
+	__u16 rsvd0;
+	__u32 nsid;
+	union {
+		struct {
+			__u16 res_sense_len;
+			__u8  cdb_len;
+			__u8  rsvd0;
+		} info_0;
+		__u32 cdw2;
+	};
+	union {
+		struct {
+			__u16 subopcode;
+			__u16 rsvd1;
+		} info_1;
+		__u32 cdw3;
+	};
+	union {
+		struct {
+			__u16 rsvd;
+			__u16 param_len;
+		} info_2;
+		__u32 cdw4;
+	};
+	__u32 cdw5;
+	__u64 addr;
+	__u64 prp2;
+	union {
+		struct {
+			__u16 eid;
+			__u16 sid;
+		} info_3;
+		__u32 cdw10;
+	};
+	union {
+		struct {
+			__u16 did;
+			__u8  did_flag;
+			__u8  rsvd2;
+		} info_4;
+		__u32 cdw11;
+	};
+	__u32 cdw12;
+	__u32 cdw13;
+	__u32 cdw14;
+	__u32 data_len;
+	__u32 cdw16;
+	__u32 cdw17;
+	__u32 cdw18;
+	__u32 cdw19;
+	__u32 cdw20;
+	__u32 cdw21;
+	__u32 cdw22;
+	__u32 cdw23;
+	__u64 sense_addr;
+	__u32 cdw26[4];
+	__u32 timeout_ms;
+	__u32 result0;
+	__u32 result1;
+};
+
+struct spraid_bsg_request {
+	u32  msgcode;
+	u32 control;
+	union {
+		struct spraid_passthru_common_cmd admcmd;
+		struct spraid_ioq_passthru_cmd    ioqcmd;
+	};
+};
+
+enum {
+	SPRAID_BSG_ADM,
+	SPRAID_BSG_IOQ,
+};
+
+struct spraid_cmd {
+	int qid;
+	int cid;
+	u32 result0;
+	u32 result1;
+	u16 status;
+	void *priv;
+	enum spraid_cmd_state state;
+	struct completion cmd_done;
+	struct list_head list;
+};
+
+struct spraid_queue {
+	struct spraid_dev *hdev;
+	spinlock_t sq_lock;
+
+	spinlock_t cq_lock ____cacheline_aligned_in_smp;
+
+	void *sq_cmds;
+
+	struct spraid_completion *cqes;
+
+	dma_addr_t sq_dma_addr;
+	dma_addr_t cq_dma_addr;
+	u32 __iomem *q_db;
+	u8 cq_phase;
+	u8 sqes;
+	u16 qid;
+	u16 sq_tail;
+	u16 cq_head;
+	u16 last_cq_head;
+	u16 q_depth;
+	s16 cq_vector;
+	void *sense;
+	dma_addr_t sense_dma_addr;
+	struct dma_pool *prp_small_pool;
+};
+
+struct spraid_iod {
+	struct spraid_queue *spraidq;
+	enum spraid_cmd_state state;
+	int npages;
+	u32 nsge;
+	u32 length;
+	bool use_sgl;
+	bool sg_drv_mgmt;
+	dma_addr_t first_dma;
+	void *sense;
+	dma_addr_t sense_dma;
+	struct scatterlist *sg;
+	struct scatterlist inline_sg[0];
+};
+
+#define SPRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define SPRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define SPRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define SPRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+
+#define SPRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define SPRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+struct spraid_vd_info {
+	__u8 name[32];
+	__le16 id;
+	__u8 rg_id;
+	__u8 rg_level;
+	__u8 sg_num;
+	__u8 sg_disk_num;
+	__u8 vd_status;
+	__u8 vd_type;
+	__u8 rsvd1[4056];
+};
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+struct bgtask_info {
+	__u8 type;
+	__u8 progress;
+	__u8 rate;
+	__u8 rsvd0;
+	__le16 vd_id;
+	__le16 time_left;
+	__u8 rsvd1[4];
+};
+
+struct spraid_bgtask {
+	__u8 sw;
+	__u8 task_num;
+	__u8 rsvd[6];
+	struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+struct spraid_dev_info {
+	__le32	hdid;
+	__le16	target;
+	__u8	channel;
+	__u8	lun;
+	__u8	attr;
+	__u8	flag;
+	__le16	max_io_kb;
+};
+
+#define MAX_DEV_ENTRY_PER_PAGE_4K	340
+struct spraid_dev_list {
+	__le32	dev_num;
+	__u32	rsvd0[3];
+	struct spraid_dev_info	devices[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+struct spraid_sdev_hostdata {
+	u32 hdid;
+	u16 max_io_kb;
+	u8 attr;
+	u8 flag;
+	u8 rg_id;
+	u8 rsvd[3];
+};
+
+#endif
+
diff --git a/drivers/scsi/spraid/spraid_main.c b/drivers/scsi/spraid/spraid_main.c
new file mode 100644
index 000000000000..519b39f44e91
--- /dev/null
+++ b/drivers/scsi/spraid/spraid_main.c
@@ -0,0 +1,3875 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+/* Ramaxel Raid SPXXX Series Linux Driver */
+
+#define pr_fmt(fmt) "spraid: " fmt
+
+#include <linux/sched/signal.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/once.h>
+#include <linux/debugfs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
+#include <target/target_core_backend.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+
+#include "spraid.h"
+
+static u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_nonpt = 180;
+module_param(scmd_tmout_nonpt, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_nonpt,
+		 "scsi commands timeout for rawdisk&raid(seconds)");
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops ioq_depth_ops = {
+	.set = ioq_depth_set,
+	.get = param_get_uint,
+};
+
+static u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &ioq_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static int log_debug_switch_set(const char *val, const struct kernel_param *kp)
+{
+	u8 n = 0;
+	int ret;
+
+	ret = kstrtou8(val, 10, &n);
+	if (ret != 0)
+		return -EINVAL;
+
+	return param_set_byte(val, kp);
+}
+
+static const struct kernel_param_ops log_debug_switch_ops = {
+	.set = log_debug_switch_set,
+	.get = param_get_byte,
+};
+
+static unsigned char log_debug_switch;
+module_param_cb(log_debug_switch, &log_debug_switch_ops,
+		&log_debug_switch, 0644);
+MODULE_PARM_DESC(log_debug_switch,
+		 "set log state, default non-zero for switch on");
+
+static int small_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+	u8 n = 0;
+	int ret;
+
+	ret = kstrtou8(val, 10, &n);
+	if (ret != 0)
+		return -EINVAL;
+	if (n > MAX_SMALL_POOL_NUM)
+		n = MAX_SMALL_POOL_NUM;
+	if (n < 1)
+		n = 1;
+	*((u8 *)kp->arg) = n;
+
+	return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+	.set = small_pool_num_set,
+	.get = param_get_byte,
+};
+
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+static unsigned char small_pool_num = 4;
+module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644);
+MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16");
+
+static void spraid_free_queue(struct spraid_queue *spraidq);
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result);
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+				 u32 result, u32 result1);
+
+static DEFINE_IDA(spraid_instance_ida);
+
+static struct class *spraid_class;
+
+#define SPRAID_CAP_TIMEOUT_UNIT_MS	(HZ / 2)
+
+static struct workqueue_struct *spraid_wq;
+
+#define dev_log_dbg(dev, fmt, ...)	do { \
+	if (unlikely(log_debug_switch))	\
+		dev_info(dev, "[%s] [%d] " fmt,	\
+			__func__, __LINE__, ##__VA_ARGS__);	\
+} while (0)
+
+#define SPRAID_DRV_VERSION	"1.0.0.0"
+
+#define ADMIN_TIMEOUT		(admin_tmout * HZ)
+#define ADMIN_ERR_TIMEOUT	32757
+
+#define SPRAID_WAIT_ABNL_CMD_TIMEOUT	(3 * 2)
+
+#define SPRAID_DMA_MSK_BIT_MAX	64
+
+enum FW_STAT_CODE {
+	FW_STAT_OK = 0,
+	FW_STAT_NEED_CHECK,
+	FW_STAT_ERROR,
+	FW_STAT_EP_PCIE_ERROR,
+	FW_STAT_NAC_DMA_ERROR,
+	FW_STAT_ABORTED,
+	FW_STAT_NEED_RETRY
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60",
+					   "NA"};
+
+static const char * const raid_states[] = {
+	"NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED",
+	"FORMATTING", "SANITIZING", "INITIALIZING", "INITIALIZE_FAIL",
+	"DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp)
+{
+	int n = 0;
+	int ret;
+
+	ret = kstrtoint(val, 10, &n);
+	if (ret != 0 || n < 2)
+		return -EINVAL;
+
+	return param_set_int(val, kp);
+}
+
+static int spraid_remap_bar(struct spraid_dev *hdev, u32 size)
+{
+	struct pci_dev *pdev = hdev->pdev;
+
+	if (size > pci_resource_len(pdev, 0)) {
+		dev_err(hdev->dev, "Input size[%u] exceed bar0 length[%llu]\n",
+			size, pci_resource_len(pdev, 0));
+		return -ENOMEM;
+	}
+
+	if (hdev->bar)
+		iounmap(hdev->bar);
+
+	hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+	if (!hdev->bar) {
+		dev_err(hdev->dev, "ioremap for bar0 failed\n");
+		return -ENOMEM;
+	}
+	hdev->dbs = hdev->bar + SPRAID_REG_DBS;
+
+	return 0;
+}
+
+static int spraid_dev_map(struct spraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	int ret;
+
+	ret = pci_request_mem_regions(pdev, "spraid");
+	if (ret) {
+		dev_err(hdev->dev, "fail to request memory regions\n");
+		return ret;
+	}
+
+	ret = spraid_remap_bar(hdev, SPRAID_REG_DBS + 4096);
+	if (ret) {
+		pci_release_mem_regions(pdev);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void spraid_dev_unmap(struct spraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+
+	if (hdev->bar) {
+		iounmap(hdev->bar);
+		hdev->bar = NULL;
+	}
+	pci_release_mem_regions(pdev);
+}
+
+static int spraid_pci_enable(struct spraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	int ret = -ENOMEM;
+	u64 maskbit = SPRAID_DMA_MSK_BIT_MAX;
+
+	if (pci_enable_device_mem(pdev)) {
+		dev_err(hdev->dev,
+			"Enable pci device memory resources failed\n");
+		return ret;
+	}
+	pci_set_master(pdev);
+
+	if (readl(hdev->bar + SPRAID_REG_CSTS) == U32_MAX) {
+		ret = -ENODEV;
+		dev_err(hdev->dev, "Read csts register failed\n");
+		goto disable;
+	}
+
+	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+	if (ret < 0) {
+		dev_err(hdev->dev,
+			"Allocate one IRQ for setup admin channel failed\n");
+		goto disable;
+	}
+
+	hdev->cap = lo_hi_readq(hdev->bar + SPRAID_REG_CAP);
+	hdev->ioq_depth = min_t(u32, SPRAID_CAP_MQES(hdev->cap) + 1,
+				io_queue_depth);
+	hdev->db_stride = 1 << SPRAID_CAP_STRIDE(hdev->cap);
+
+	maskbit = SPRAID_CAP_DMAMASK(hdev->cap);
+	if (maskbit < 32 || maskbit > SPRAID_DMA_MSK_BIT_MAX) {
+		dev_err(hdev->dev,
+			"err, dma mask invalid[%llu], set to default\n",
+			maskbit);
+		maskbit = SPRAID_DMA_MSK_BIT_MAX;
+	}
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+		dev_err(hdev->dev, "set dma mask and coherent failed\n");
+		goto disable;
+	}
+
+	dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit);
+
+	pci_enable_pcie_error_reporting(pdev);
+	pci_save_state(pdev);
+
+	return 0;
+
+disable:
+	pci_disable_device(pdev);
+	return ret;
+}
+
+static int spraid_npages_prp(u32 size, struct spraid_dev *hdev)
+{
+	u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+	return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, PAGE_SIZE - PRP_ENTRY_SIZE);
+}
+
+static int spraid_npages_sgl(u32 nseg)
+{
+	return DIV_ROUND_UP(nseg * sizeof(struct spraid_sgl_desc), PAGE_SIZE);
+}
+
+static void **spraid_iod_list(struct spraid_iod *iod)
+{
+	return (void **)(iod->inline_sg + (iod->sg_drv_mgmt ? iod->nsge : 0));
+}
+
+static u32 spraid_iod_ext_size(struct spraid_dev *hdev, u32 size, u32 nsge,
+			       bool sg_drv_mgmt, bool use_sgl)
+{
+	size_t alloc_size, sg_size;
+
+	if (use_sgl)
+		alloc_size = sizeof(__le64 *) * spraid_npages_sgl(nsge);
+	else
+		alloc_size = sizeof(__le64 *) * spraid_npages_prp(size, hdev);
+
+	sg_size = sg_drv_mgmt ? (sizeof(struct scatterlist) * nsge) : 0;
+	return sg_size + alloc_size;
+}
+
+static u32 spraid_cmd_size(struct spraid_dev *hdev,
+			   bool sg_drv_mgmt, bool use_sgl)
+{
+	u32 alloc_size = spraid_iod_ext_size(hdev, SPRAID_INT_BYTES(hdev),
+				SPRAID_INT_PAGES, sg_drv_mgmt, use_sgl);
+
+	dev_info(hdev->dev, "sg_drv_mgmt: %s, use_sgl: %s, iod size: %lu;"
+		 " alloc_size: %u\n", sg_drv_mgmt ? "true" : "false",
+		 use_sgl ? "true" : "false",
+		 sizeof(struct spraid_iod), alloc_size);
+
+	return sizeof(struct spraid_iod) + alloc_size;
+}
+
+static int spraid_setup_prps(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+	struct scatterlist *sg = iod->sg;
+	u64 dma_addr = sg_dma_address(sg);
+	int dma_len = sg_dma_len(sg);
+	__le64 *prp_list, *old_prp_list;
+	u32 page_size = hdev->page_size;
+	int offset = dma_addr & (page_size - 1);
+	void **list = spraid_iod_list(iod);
+	int length = iod->length;
+	struct dma_pool *pool;
+	dma_addr_t prp_dma;
+	int nprps, i;
+
+	length -= (page_size - offset);
+	if (length <= 0) {
+		iod->first_dma = 0;
+		return 0;
+	}
+
+	dma_len -= (page_size - offset);
+	if (dma_len) {
+		dma_addr += (page_size - offset);
+	} else {
+		sg = sg_next(sg);
+		dma_addr = sg_dma_address(sg);
+		dma_len = sg_dma_len(sg);
+	}
+
+	if (length <= page_size) {
+		iod->first_dma = dma_addr;
+		return 0;
+	}
+
+	nprps = DIV_ROUND_UP(length, page_size);
+	if (nprps <= (SMALL_POOL_SIZE / PRP_ENTRY_SIZE)) {
+		pool = iod->spraidq->prp_small_pool;
+		iod->npages = 0;
+	} else {
+		pool = hdev->prp_page_pool;
+		iod->npages = 1;
+	}
+
+	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+	if (!prp_list) {
+		dev_err_ratelimited(hdev->dev,
+				    "Allocate first prp_list memory failed\n");
+		iod->first_dma = dma_addr;
+		iod->npages = -1;
+		return -ENOMEM;
+	}
+	list[0] = prp_list;
+	iod->first_dma = prp_dma;
+	i = 0;
+	for (;;) {
+		if (i == page_size / PRP_ENTRY_SIZE) {
+			old_prp_list = prp_list;
+
+			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+			if (!prp_list) {
+				dev_err_ratelimited(hdev->dev, "Allocate %dth;"
+						    " prp_list memory failed\n",
+						    iod->npages + 1);
+				return -ENOMEM;
+			}
+			list[iod->npages++] = prp_list;
+			prp_list[0] = old_prp_list[i - 1];
+			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+			i = 1;
+		}
+		prp_list[i++] = cpu_to_le64(dma_addr);
+		dma_len -= page_size;
+		dma_addr += page_size;
+		length -= page_size;
+		if (length <= 0)
+			break;
+		if (dma_len > 0)
+			continue;
+		if (unlikely(dma_len < 0))
+			goto bad_sgl;
+		sg = sg_next(sg);
+		dma_addr = sg_dma_address(sg);
+		dma_len = sg_dma_len(sg);
+	}
+
+	return 0;
+
+bad_sgl:
+	dev_err(hdev->dev,
+		"Setup prps, invalid SGL for payload: %d nents: %d\n",
+		iod->length, iod->nsge);
+	return -EIO;
+}
+
+#define SGES_PER_PAGE    (PAGE_SIZE / sizeof(struct spraid_sgl_desc))
+
+static void spraid_submit_cmd(struct spraid_queue *spraidq, const void *cmd)
+{
+	u32 sqes = SQE_SIZE(spraidq->qid);
+	unsigned long flags;
+	struct spraid_admin_common_command *acd =
+		(struct spraid_admin_common_command *)cmd;
+
+	spin_lock_irqsave(&spraidq->sq_lock, flags);
+	memcpy((spraidq->sq_cmds + sqes * spraidq->sq_tail), cmd, sqes);
+	if (++spraidq->sq_tail == spraidq->q_depth)
+		spraidq->sq_tail = 0;
+
+	writel(spraidq->sq_tail, spraidq->q_db);
+	spin_unlock_irqrestore(&spraidq->sq_lock, flags);
+
+	dev_log_dbg(spraidq->hdev->dev,
+		    "cid[%d] qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
+		    acd->command_id, spraidq->qid, acd->opcode,
+		    acd->flags, le32_to_cpu(acd->hdid));
+}
+
+static u32 spraid_mod64(u64 dividend, u32 divisor)
+{
+	u64 d;
+	u32 remainder;
+
+	if (!divisor)
+		pr_err("DIVISOR is zero, in div fn\n");
+
+	d = dividend;
+	remainder = do_div(d, divisor);
+	return remainder;
+}
+
+static inline bool spraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+	switch (scmd->cmnd[0]) {
+	case READ_6:
+	case READ_10:
+	case READ_12:
+	case READ_16:
+	case READ_32:
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_12:
+	case WRITE_16:
+	case WRITE_32:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool spraid_is_prp(struct spraid_dev *hdev,
+			  struct scsi_cmnd *scmd, u32 nsge)
+{
+	struct scatterlist *sg = scsi_sglist(scmd);
+	u32 page_size = hdev->page_size;
+	bool is_prp = true;
+	int i = 0;
+
+	scsi_for_each_sg(scmd, sg, nsge, i) {
+		if (i != 0 && i != nsge - 1) {
+			if (spraid_mod64(sg_dma_len(sg), page_size) ||
+			    spraid_mod64(sg_dma_address(sg), page_size)) {
+				is_prp = false;
+				break;
+			}
+		}
+
+		if (nsge > 1 && i == 0) {
+			if ((spraid_mod64((sg_dma_address(sg) + sg_dma_len(sg)),
+					  page_size))) {
+				is_prp = false;
+				break;
+			}
+		}
+
+		if (nsge > 1 && i == (nsge - 1)) {
+			if (spraid_mod64(sg_dma_address(sg), page_size)) {
+				is_prp = false;
+				break;
+			}
+		}
+	}
+
+	return is_prp;
+}
+
+enum {
+	SPRAID_SGL_FMT_DATA_DESC     = 0x00,
+	SPRAID_SGL_FMT_SEG_DESC      = 0x02,
+	SPRAID_SGL_FMT_LAST_SEG_DESC    = 0x03,
+	SPRAID_KEY_SGL_FMT_DATA_DESC    = 0x04,
+	SPRAID_TRANSPORT_SGL_DATA_DESC  = 0x05
+};
+
+static void spraid_sgl_set_data(struct spraid_sgl_desc *sge,
+				struct scatterlist *sg)
+{
+	sge->addr = cpu_to_le64(sg_dma_address(sg));
+	sge->length = cpu_to_le32(sg_dma_len(sg));
+	sge->type = SPRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void spraid_sgl_set_seg(struct spraid_sgl_desc *sge,
+			       dma_addr_t dma_addr, int entries)
+{
+	sge->addr = cpu_to_le64(dma_addr);
+	if (entries <= SGES_PER_PAGE) {
+		sge->length = cpu_to_le32(entries * sizeof(*sge));
+		sge->type = SPRAID_SGL_FMT_LAST_SEG_DESC << 4;
+	} else {
+		sge->length = cpu_to_le32(PAGE_SIZE);
+		sge->type = SPRAID_SGL_FMT_SEG_DESC << 4;
+	}
+}
+
+static int spraid_setup_ioq_cmd_sgl(struct spraid_dev *hdev,
+				    struct scsi_cmnd *scmd,
+				    struct spraid_ioq_command *ioq_cmd,
+				    struct spraid_iod *iod)
+{
+	struct spraid_sgl_desc *sg_list, *link, *old_sg_list;
+	struct scatterlist *sg = scsi_sglist(scmd);
+	void **list = spraid_iod_list(iod);
+	struct dma_pool *pool;
+	int nsge = iod->nsge;
+	dma_addr_t sgl_dma;
+	int i = 0;
+
+	ioq_cmd->common.flags |= SPRAID_CMD_FLAG_SGL_METABUF;
+
+	if (nsge == 1) {
+		spraid_sgl_set_data(&ioq_cmd->common.dptr.sgl, sg);
+		return 0;
+	}
+
+	if (nsge <= (SMALL_POOL_SIZE / sizeof(struct spraid_sgl_desc))) {
+		pool = iod->spraidq->prp_small_pool;
+		iod->npages = 0;
+	} else {
+		pool = hdev->prp_page_pool;
+		iod->npages = 1;
+	}
+
+	sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+	if (!sg_list) {
+		dev_err_ratelimited(hdev->dev,
+				    "Allocate first sgl_list failed\n");
+		iod->npages = -1;
+		return -ENOMEM;
+	}
+
+	list[0] = sg_list;
+	iod->first_dma = sgl_dma;
+	spraid_sgl_set_seg(&ioq_cmd->common.dptr.sgl, sgl_dma, nsge);
+	do {
+		if (i == SGES_PER_PAGE) {
+			old_sg_list = sg_list;
+			link = &old_sg_list[SGES_PER_PAGE - 1];
+
+			sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+			if (!sg_list) {
+				dev_err_ratelimited(hdev->dev,
+						    "Allocate %dth sgl_list;"
+						    " failed\n",
+						    iod->npages + 1);
+				return -ENOMEM;
+			}
+			list[iod->npages++] = sg_list;
+
+			i = 0;
+			memcpy(&sg_list[i++], link, sizeof(*link));
+			spraid_sgl_set_seg(link, sgl_dma, nsge);
+		}
+
+		spraid_sgl_set_data(&sg_list[i++], sg);
+		sg = sg_next(sg);
+	} while (--nsge > 0);
+
+	return 0;
+}
+
+#define SPRAID_RW_FUA	BIT(14)
+
+static void spraid_setup_rw_cmd(struct spraid_dev *hdev,
+				struct spraid_rw_command *rw,
+				struct scsi_cmnd *scmd)
+{
+	u32 start_lba_lo, start_lba_hi;
+	u32 datalength = 0;
+	u16 control = 0;
+
+	start_lba_lo = 0;
+	start_lba_hi = 0;
+
+	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+		rw->opcode = SPRAID_CMD_WRITE;
+	} else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+		rw->opcode = SPRAID_CMD_READ;
+	} else {
+		dev_err(hdev->dev,
+			"Invalid IO for unsupported data direction: %d\n",
+			scmd->sc_data_direction);
+		WARN_ON(1);
+	}
+
+	/* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+	if (scmd->cmd_len == 6) {
+		datalength = (u32)(scmd->cmnd[4] == 0 ?
+				IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+		start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+		start_lba_lo &= 0x1FFFFF;
+	}
+
+	/* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+	else if (scmd->cmd_len == 10) {
+		datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+		start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+		if (scmd->cmnd[1] & FUA_MASK)
+			control |= SPRAID_RW_FUA;
+	}
+
+	/* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+	else if (scmd->cmd_len == 12) {
+		datalength = get_unaligned_be32(&scmd->cmnd[6]);
+		start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+		if (scmd->cmnd[1] & FUA_MASK)
+			control |= SPRAID_RW_FUA;
+	}
+	/* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+	else if (scmd->cmd_len == 16) {
+		datalength = get_unaligned_be32(&scmd->cmnd[10]);
+		start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+		start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+		if (scmd->cmnd[1] & FUA_MASK)
+			control |= SPRAID_RW_FUA;
+	}
+	/* 32-byte READ(0x88) or WRITE(0x8A) cdb */
+	else if (scmd->cmd_len == 32) {
+		datalength = get_unaligned_be32(&scmd->cmnd[28]);
+		start_lba_lo = get_unaligned_be32(&scmd->cmnd[16]);
+		start_lba_hi = get_unaligned_be32(&scmd->cmnd[12]);
+
+		if (scmd->cmnd[10] & FUA_MASK)
+			control |= SPRAID_RW_FUA;
+	}
+
+	if (unlikely(datalength > U16_MAX || datalength == 0)) {
+		dev_err(hdev->dev,
+			"Invalid IO for illegal transfer data length: %u\n",
+			datalength);
+		WARN_ON(1);
+	}
+
+	rw->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+	/* 0base for nlb */
+	rw->nlb = cpu_to_le16((u16)(datalength - 1));
+	rw->control = cpu_to_le16(control);
+}
+
+static void spraid_setup_nonio_cmd(struct spraid_dev *hdev,
+				   struct spraid_scsi_nonio *scsi_nonio,
+				   struct scsi_cmnd *scmd)
+{
+	scsi_nonio->buffer_len = cpu_to_le32(scsi_bufflen(scmd));
+
+	switch (scmd->sc_data_direction) {
+	case DMA_NONE:
+		scsi_nonio->opcode = SPRAID_CMD_NONIO_NONE;
+		break;
+	case DMA_TO_DEVICE:
+		scsi_nonio->opcode = SPRAID_CMD_NONIO_TODEV;
+		break;
+	case DMA_FROM_DEVICE:
+		scsi_nonio->opcode = SPRAID_CMD_NONIO_FROMDEV;
+		break;
+	default:
+		dev_err(hdev->dev,
+			"Invalid IO for unsupported data direction: %d\n",
+			scmd->sc_data_direction);
+		WARN_ON(1);
+	}
+}
+
+static void spraid_setup_ioq_cmd(struct spraid_dev *hdev,
+				 struct spraid_ioq_command *ioq_cmd,
+				 struct scsi_cmnd *scmd)
+{
+	memcpy(ioq_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+	ioq_cmd->common.cdb_len = scmd->cmd_len;
+
+	if (spraid_is_rw_scmd(scmd))
+		spraid_setup_rw_cmd(hdev, &ioq_cmd->rw, scmd);
+	else
+		spraid_setup_nonio_cmd(hdev, &ioq_cmd->scsi_nonio, scmd);
+}
+
+static int spraid_init_iod(struct spraid_dev *hdev, struct spraid_iod *iod,
+			   struct spraid_ioq_command *ioq_cmd,
+			   struct scsi_cmnd *scmd)
+{
+	if (unlikely(!iod->sense)) {
+		dev_err(hdev->dev, "Allocate sense data buffer failed\n");
+		return -ENOMEM;
+	}
+	ioq_cmd->common.sense_addr = cpu_to_le64(iod->sense_dma);
+	ioq_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+	iod->nsge = 0;
+	iod->npages = -1;
+	iod->use_sgl = 0;
+	iod->sg_drv_mgmt = false;
+	WRITE_ONCE(iod->state, SPRAID_CMD_IDLE);
+
+	return 0;
+}
+
+static void spraid_free_iod_res(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+	const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+	dma_addr_t dma_addr, next_dma_addr;
+	struct spraid_sgl_desc *sg_list;
+	__le64 *prp_list;
+	void *addr;
+	int i;
+
+	dma_addr = iod->first_dma;
+	if (iod->npages == 0)
+		dma_pool_free(iod->spraidq->prp_small_pool,
+			      spraid_iod_list(iod)[0], dma_addr);
+
+	for (i = 0; i < iod->npages; i++) {
+		addr = spraid_iod_list(iod)[i];
+
+		if (iod->use_sgl) {
+			sg_list = addr;
+			next_dma_addr =
+				le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+		} else {
+			prp_list = addr;
+			next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+		}
+
+		dma_pool_free(hdev->prp_page_pool, addr, dma_addr);
+		dma_addr = next_dma_addr;
+	}
+
+	if (iod->sg_drv_mgmt && iod->sg != iod->inline_sg) {
+		iod->sg_drv_mgmt = false;
+		mempool_free(iod->sg, hdev->iod_mempool);
+	}
+
+	iod->sense = NULL;
+	iod->npages = -1;
+}
+
+static int spraid_io_map_data(struct spraid_dev *hdev, struct spraid_iod *iod,
+			      struct scsi_cmnd *scmd,
+			      struct spraid_ioq_command *ioq_cmd)
+{
+	int ret;
+
+	iod->nsge = scsi_dma_map(scmd);
+
+	/* No data to DMA, it may be scsi no-rw command */
+	if (unlikely(iod->nsge == 0))
+		return 0;
+
+	iod->length = scsi_bufflen(scmd);
+	iod->sg = scsi_sglist(scmd);
+	iod->use_sgl = !spraid_is_prp(hdev, scmd, iod->nsge);
+
+	if (iod->use_sgl) {
+		ret = spraid_setup_ioq_cmd_sgl(hdev, scmd, ioq_cmd, iod);
+	} else {
+		ret = spraid_setup_prps(hdev, iod);
+		ioq_cmd->common.dptr.prp1 =
+				cpu_to_le64(sg_dma_address(iod->sg));
+		ioq_cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+	}
+
+	if (ret)
+		scsi_dma_unmap(scmd);
+
+	return ret;
+}
+
+static void spraid_map_status(struct spraid_iod *iod, struct scsi_cmnd *scmd,
+			      struct spraid_completion *cqe)
+{
+	scsi_set_resid(scmd, 0);
+
+	switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+	case FW_STAT_OK:
+		set_host_byte(scmd, DID_OK);
+		break;
+	case FW_STAT_NEED_CHECK:
+		set_host_byte(scmd, DID_OK);
+		scmd->result |= le16_to_cpu(cqe->status) >> 8;
+		if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+			memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+			memcpy(scmd->sense_buffer, iod->sense,
+			       SCSI_SENSE_BUFFERSIZE);
+			scmd->result =
+			(scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+		}
+		break;
+	case FW_STAT_ABORTED:
+		set_host_byte(scmd, DID_ABORT);
+		break;
+	case FW_STAT_NEED_RETRY:
+		set_host_byte(scmd, DID_REQUEUE);
+		break;
+	default:
+		set_host_byte(scmd, DID_BAD_TARGET);
+		break;
+	}
+}
+
+static inline void spraid_get_tag_from_scmd(struct scsi_cmnd *scmd,
+					    u16 *qid, u16 *cid)
+{
+	u32 tag = blk_mq_unique_tag(scmd->request);
+
+	*qid = blk_mq_unique_tag_to_hwq(tag) + 1;
+	*cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+	struct spraid_iod *iod = scsi_cmd_priv(scmd);
+	struct spraid_dev *hdev = shost_priv(shost);
+	struct scsi_device *sdev = scmd->device;
+	struct spraid_sdev_hostdata *hostdata;
+	struct spraid_ioq_command ioq_cmd;
+	struct spraid_queue *ioq;
+	unsigned long elapsed;
+	u16 hwq, cid;
+	int ret;
+
+	if (unlikely(!scmd)) {
+		dev_err(hdev->dev, "err, scmd is null\n");
+		return 0;
+	}
+
+	if (unlikely(hdev->state != SPRAID_LIVE)) {
+		set_host_byte(scmd, DID_NO_CONNECT);
+		scmd->scsi_done(scmd);
+		return 0;
+	}
+
+	if (log_debug_switch)
+		scsi_print_command(scmd);
+
+	spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+	hostdata = sdev->hostdata;
+	ioq = &hdev->queues[hwq];
+	memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+	ioq_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+	ioq_cmd.rw.command_id = cid;
+
+	spraid_setup_ioq_cmd(hdev, &ioq_cmd, scmd);
+
+	ret = cid * SCSI_SENSE_BUFFERSIZE;
+	iod->sense = ioq->sense + ret;
+	iod->sense_dma = ioq->sense_dma_addr + ret;
+
+	ret = spraid_init_iod(hdev, iod, &ioq_cmd, scmd);
+	if (unlikely(ret))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	iod->spraidq = ioq;
+	ret = spraid_io_map_data(hdev, iod, scmd, &ioq_cmd);
+	if (unlikely(ret)) {
+		dev_err(hdev->dev, "spraid_io_map_data Err.\n");
+		set_host_byte(scmd, DID_ERROR);
+		scmd->scsi_done(scmd);
+		ret = 0;
+		goto deinit_iod;
+	}
+
+	WRITE_ONCE(iod->state, SPRAID_CMD_IN_FLIGHT);
+	spraid_submit_cmd(ioq, &ioq_cmd);
+	elapsed = jiffies - scmd->jiffies_at_alloc;
+	dev_log_dbg(hdev->dev,
+		    "cid[%d] qid[%d] submit IO cost %3ld.%3ld seconds\n",
+		    cid, hwq, elapsed / HZ, elapsed % HZ);
+	return 0;
+
+deinit_iod:
+	spraid_free_iod_res(hdev, iod);
+	return ret;
+}
+
+static int spraid_match_dev(struct spraid_dev *hdev, u16 idx,
+			    struct scsi_device *sdev)
+{
+	if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[idx].flag)) {
+		if (sdev->channel == hdev->devices[idx].channel &&
+		    sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+		    sdev->lun < hdev->devices[idx].lun) {
+			dev_info(hdev->dev,
+				"Match device success, channel;"
+				"target:lun[%d:%d:%d]\n",
+				 hdev->devices[idx].channel,
+				 hdev->devices[idx].target,
+				 hdev->devices[idx].lun);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static int spraid_slave_alloc(struct scsi_device *sdev)
+{
+	struct spraid_sdev_hostdata *hostdata;
+	struct spraid_dev *hdev;
+	u16 idx;
+
+	hdev = shost_priv(sdev->host);
+	hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+	if (!hostdata) {
+		dev_err(hdev->dev, "Alloc scsi host data memory failed\n");
+		return -ENOMEM;
+	}
+
+	down_read(&hdev->devices_rwsem);
+	for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+		if (spraid_match_dev(hdev, idx, sdev))
+			goto scan_host;
+	}
+	up_read(&hdev->devices_rwsem);
+
+	kfree(hostdata);
+	return -ENXIO;
+
+scan_host:
+	hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid);
+	hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb);
+	hostdata->attr = hdev->devices[idx].attr;
+	hostdata->flag = hdev->devices[idx].flag;
+	hostdata->rg_id = 0xff;
+	sdev->hostdata = hostdata;
+	up_read(&hdev->devices_rwsem);
+	return 0;
+}
+
+static void spraid_slave_destroy(struct scsi_device *sdev)
+{
+	kfree(sdev->hostdata);
+	sdev->hostdata = NULL;
+}
+
+static int spraid_slave_configure(struct scsi_device *sdev)
+{
+	u16 idx;
+	unsigned int timeout = scmd_tmout_nonpt * HZ;
+	struct spraid_dev *hdev = shost_priv(sdev->host);
+	struct spraid_sdev_hostdata *hostdata = sdev->hostdata;
+	u32 max_sec = sdev->host->max_sectors;
+
+	if (hostdata) {
+		idx = hostdata->hdid - 1;
+		if (sdev->channel == hdev->devices[idx].channel &&
+		    sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+		    sdev->lun < hdev->devices[idx].lun) {
+			if (SPRAID_DEV_INFO_ATTR_PT(hdev->devices[idx].attr))
+				timeout = 30 * HZ;
+			else
+				timeout = scmd_tmout_nonpt * HZ;
+			max_sec = le16_to_cpu(hdev->devices[idx].max_io_kb)
+				  << 1;
+		} else {
+			dev_err(hdev->dev,
+				"[%s] err, sdev->channel:id:lun[%d:%d:%lld];"
+				"devices[%d], channel:target:lun[%d:%d:%d]\n",
+				__func__, sdev->channel, sdev->id, sdev->lun,
+				idx, hdev->devices[idx].channel,
+				hdev->devices[idx].target,
+				hdev->devices[idx].lun);
+		}
+	} else {
+		dev_err(hdev->dev, "[%s] err, sdev->hostdata is null\n",
+			__func__);
+	}
+
+	blk_queue_rq_timeout(sdev->request_queue, timeout);
+	sdev->eh_timeout = timeout;
+
+	if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+		max_sec = sdev->host->max_sectors;
+
+	dev_info(hdev->dev,
+		"[%s] sdev->channel:id:lun[%d:%d:%lld];"
+		" scmd_timeout[%d]s, maxsec[%d]\n",
+		 __func__, sdev->channel, sdev->id,
+		 sdev->lun, timeout / HZ, max_sec);
+
+	return 0;
+}
+
+static void spraid_shost_init(struct spraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	u8 domain, bus;
+	u32 dev_func;
+
+	domain = pci_domain_nr(pdev->bus);
+	bus = pdev->bus->number;
+	dev_func = pdev->devfn;
+
+	hdev->shost->nr_hw_queues = hdev->online_queues - 1;
+	hdev->shost->can_queue = (hdev->ioq_depth - SPRAID_PTCMDS_PERQ);
+
+	hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+	/* 512B per sector */
+	hdev->shost->max_sectors =
+		(1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+	hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+	hdev->shost->max_channel =
+		le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+	hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+	hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+	hdev->shost->this_id = -1;
+	hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+	hdev->shost->max_cmd_len = MAX_CDB_LEN;
+	hdev->shost->hostt->cmd_size = max(spraid_cmd_size(hdev, false, true),
+					   spraid_cmd_size(hdev, false, false));
+}
+
+static inline void spraid_host_deinit(struct spraid_dev *hdev)
+{
+	ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static int spraid_alloc_queue(struct spraid_dev *hdev, u16 qid, u16 depth)
+{
+	struct spraid_queue *spraidq = &hdev->queues[qid];
+	int ret = 0;
+
+	if (hdev->queue_count > qid) {
+		dev_info(hdev->dev, "[%s] warn: queue[%d] is exist\n",
+			 __func__, qid);
+		return 0;
+	}
+
+	spraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth),
+					   &spraidq->cq_dma_addr,
+					   GFP_KERNEL | __GFP_ZERO);
+	if (!spraidq->cqes)
+		return -ENOMEM;
+
+	spraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth),
+					      &spraidq->sq_dma_addr,
+					      GFP_KERNEL);
+	if (!spraidq->sq_cmds) {
+		ret = -ENOMEM;
+		goto  free_cqes;
+	}
+
+	spin_lock_init(&spraidq->sq_lock);
+	spin_lock_init(&spraidq->cq_lock);
+	spraidq->hdev = hdev;
+	spraidq->q_depth = depth;
+	spraidq->qid = qid;
+	spraidq->cq_vector = -1;
+	hdev->queue_count++;
+
+	/* alloc sense buffer */
+	spraidq->sense = dma_alloc_coherent(hdev->dev, SENSE_SIZE(depth),
+					    &spraidq->sense_dma_addr,
+					    GFP_KERNEL | __GFP_ZERO);
+	if (!spraidq->sense) {
+		ret = -ENOMEM;
+		goto free_sq_cmds;
+	}
+
+	return 0;
+
+free_sq_cmds:
+	dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth),
+			  (void *)spraidq->sq_cmds, spraidq->sq_dma_addr);
+free_cqes:
+	dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)spraidq->cqes,
+			  spraidq->cq_dma_addr);
+	return ret;
+}
+
+static int spraid_wait_ready(struct spraid_dev *hdev, u64 cap, bool enabled)
+{
+	unsigned long timeout =
+	((SPRAID_CAP_TIMEOUT(cap) + 1) * SPRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+	u32 bit = enabled ? SPRAID_CSTS_RDY : 0;
+
+	while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY) != bit) {
+		usleep_range(1000, 2000);
+		if (fatal_signal_pending(current))
+			return -EINTR;
+
+		if (time_after(jiffies, timeout)) {
+			dev_err(hdev->dev, "Device not ready; aborting %s\n",
+				enabled ? "initialisation" : "reset");
+			return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+static int spraid_shutdown_ctrl(struct spraid_dev *hdev)
+{
+	unsigned long timeout = hdev->ctrl_info->rtd3e + jiffies;
+
+	hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+	hdev->ctrl_config |= SPRAID_CC_SHN_NORMAL;
+	writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+	while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_SHST_MASK) !=
+		SPRAID_CSTS_SHST_CMPLT) {
+		msleep(100);
+		if (fatal_signal_pending(current))
+			return -EINTR;
+		if (time_after(jiffies, timeout)) {
+			dev_err(hdev->dev,
+				"Device shutdown incomplete; abort shutdown\n");
+			return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+static int spraid_disable_ctrl(struct spraid_dev *hdev)
+{
+	hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+	hdev->ctrl_config &= ~SPRAID_CC_ENABLE;
+	writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+	return spraid_wait_ready(hdev, hdev->cap, false);
+}
+
+static int spraid_enable_ctrl(struct spraid_dev *hdev)
+{
+	u64 cap = hdev->cap;
+	u32 dev_page_min = SPRAID_CAP_MPSMIN(cap) + 12;
+	u32 page_shift = PAGE_SHIFT;
+
+	if (page_shift < dev_page_min) {
+		dev_err(hdev->dev,
+			"Minimum device page size[%u], too large for host[%u]\n",
+			1U << dev_page_min, 1U << page_shift);
+		return -ENODEV;
+	}
+
+	page_shift = min_t(unsigned int, SPRAID_CAP_MPSMAX(cap) + 12,
+			   PAGE_SHIFT);
+	hdev->page_size = 1U << page_shift;
+
+	hdev->ctrl_config = SPRAID_CC_CSS_NVM;
+	hdev->ctrl_config |= (page_shift - 12) << SPRAID_CC_MPS_SHIFT;
+	hdev->ctrl_config |= SPRAID_CC_AMS_RR | SPRAID_CC_SHN_NONE;
+	hdev->ctrl_config |= SPRAID_CC_IOSQES | SPRAID_CC_IOCQES;
+	hdev->ctrl_config |= SPRAID_CC_ENABLE;
+	writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+	return spraid_wait_ready(hdev, cap, true);
+}
+
+static void spraid_init_queue(struct spraid_queue *spraidq, u16 qid)
+{
+	struct spraid_dev *hdev = spraidq->hdev;
+
+	memset((void *)spraidq->cqes, 0, CQ_SIZE(spraidq->q_depth));
+
+	spraidq->sq_tail = 0;
+	spraidq->cq_head = 0;
+	spraidq->cq_phase = 1;
+	spraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride];
+	spraidq->prp_small_pool = hdev->prp_small_pool[qid % small_pool_num];
+	hdev->online_queues++;
+}
+
+static inline bool spraid_cqe_pending(struct spraid_queue *spraidq)
+{
+	return (le16_to_cpu(spraidq->cqes[spraidq->cq_head].status) & 1) ==
+		spraidq->cq_phase;
+}
+
+static void spraid_sata_report_zone_handle(struct scsi_cmnd *scmd,
+					   struct spraid_iod *iod)
+{
+	int i = 0;
+	unsigned int bytes = 0;
+	struct scatterlist *sg = scsi_sglist(scmd);
+
+	scsi_for_each_sg(scmd, sg, iod->nsge, i) {
+		unsigned int offset = 0;
+
+		if (bytes == 0) {
+			char *hdr;
+			u32 list_length;
+			u64 max_lba, opt_lba;
+			u16 same;
+
+			hdr = sg_virt(sg);
+
+			list_length = get_unaligned_le32(&hdr[0]);
+			same = get_unaligned_le16(&hdr[4]);
+			max_lba = get_unaligned_le64(&hdr[8]);
+			opt_lba = get_unaligned_le64(&hdr[16]);
+			put_unaligned_be32(list_length, &hdr[0]);
+			hdr[4] = same & 0xf;
+			put_unaligned_be64(max_lba, &hdr[8]);
+			put_unaligned_be64(opt_lba, &hdr[16]);
+			offset += 64;
+			bytes += 64;
+		}
+		while (offset < sg_dma_len(sg)) {
+			char *rec;
+			u8 cond, type, non_seq, reset;
+			u64 size, start, wp;
+
+			rec = sg_virt(sg) + offset;
+			type = rec[0] & 0xf;
+			cond = (rec[1] >> 4) & 0xf;
+			non_seq = (rec[1] & 2);
+			reset = (rec[1] & 1);
+			size = get_unaligned_le64(&rec[8]);
+			start = get_unaligned_le64(&rec[16]);
+			wp = get_unaligned_le64(&rec[24]);
+			rec[0] = type;
+			rec[1] = (cond << 4) | non_seq | reset;
+			put_unaligned_be64(size, &rec[8]);
+			put_unaligned_be64(start, &rec[16]);
+			put_unaligned_be64(wp, &rec[24]);
+			WARN_ON(offset + 64 > sg_dma_len(sg));
+			offset += 64;
+			bytes += 64;
+		}
+	}
+}
+
+static inline void spraid_handle_ata_cmd(struct spraid_dev *hdev,
+					 struct scsi_cmnd *scmd,
+					 struct spraid_iod *iod)
+{
+	if (hdev->ctrl_info->card_type != SPRAID_CARD_HBA)
+		return;
+
+	switch (scmd->cmnd[0]) {
+	case ZBC_IN:
+		dev_info(hdev->dev, "[%s] process report zone\n", __func__);
+		spraid_sata_report_zone_handle(scmd, iod);
+		break;
+	default:
+		break;
+	}
+}
+
+static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq,
+				     struct spraid_completion *cqe)
+{
+	struct spraid_dev *hdev = ioq->hdev;
+	struct blk_mq_tags *tags;
+	struct scsi_cmnd *scmd;
+	struct spraid_iod *iod;
+	struct request *req;
+	unsigned long elapsed;
+
+	tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+	req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+	if (unlikely(!req || !blk_mq_request_started(req))) {
+		dev_warn(hdev->dev, "Invalid id %d completed on queue %d\n",
+			 cqe->cmd_id, ioq->qid);
+		return;
+	}
+
+	scmd = blk_mq_rq_to_pdu(req);
+	iod = scsi_cmd_priv(scmd);
+
+	elapsed = jiffies - scmd->jiffies_at_alloc;
+	dev_log_dbg(hdev->dev,
+		    "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
+		    cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+
+	if (cmpxchg(&iod->state, SPRAID_CMD_IN_FLIGHT, SPRAID_CMD_COMPLETE) !=
+		SPRAID_CMD_IN_FLIGHT) {
+		dev_warn(hdev->dev,
+			"cid[%d] qid[%d] enters abnormal handler;"
+			" cost %3ld.%3ld seconds\n",
+			 cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+		WRITE_ONCE(iod->state, SPRAID_CMD_TMO_COMPLETE);
+
+		if (iod->nsge) {
+			iod->nsge = 0;
+			scsi_dma_unmap(scmd);
+		}
+		spraid_free_iod_res(hdev, iod);
+
+		return;
+	}
+
+	spraid_handle_ata_cmd(hdev, scmd, iod);
+
+	spraid_map_status(iod, scmd, cqe);
+	if (iod->nsge) {
+		iod->nsge = 0;
+		scsi_dma_unmap(scmd);
+	}
+	spraid_free_iod_res(hdev, iod);
+	scmd->scsi_done(scmd);
+}
+
+static void spraid_complete_adminq_cmnd(struct spraid_queue *adminq,
+					struct spraid_completion *cqe)
+{
+	struct spraid_dev *hdev = adminq->hdev;
+	struct spraid_cmd *adm_cmd;
+
+	adm_cmd = hdev->adm_cmds + cqe->cmd_id;
+	if (unlikely(adm_cmd->state == SPRAID_CMD_IDLE)) {
+		dev_warn(adminq->hdev->dev,
+			"Invalid id %d completed on queue %d\n",
+			 cqe->cmd_id, le16_to_cpu(cqe->sq_id));
+		return;
+	}
+
+	adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+	adm_cmd->result0 = le32_to_cpu(cqe->result);
+	adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+	complete(&adm_cmd->cmd_done);
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid);
+
+static void spraid_complete_aen(struct spraid_queue *spraidq,
+				struct spraid_completion *cqe)
+{
+	struct spraid_dev *hdev = spraidq->hdev;
+	u32 result = le32_to_cpu(cqe->result);
+
+	dev_info(hdev->dev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n",
+		 cqe->cmd_id, le16_to_cpu(cqe->status) >> 1, result);
+
+	spraid_send_aen(hdev, cqe->cmd_id);
+
+	if ((le16_to_cpu(cqe->status) >> 1) != SPRAID_SC_SUCCESS)
+		return;
+	switch (result & 0x7) {
+	case SPRAID_AEN_NOTICE:
+		spraid_handle_aen_notice(hdev, result);
+		break;
+	case SPRAID_AEN_VS:
+		spraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1));
+		break;
+	default:
+		dev_warn(hdev->dev, "Unsupported async event type: %u\n",
+			 result & 0x7);
+		break;
+	}
+}
+
+static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq,
+					  struct spraid_completion *cqe)
+{
+	struct spraid_dev *hdev = ioq->hdev;
+	struct spraid_cmd *ptcmd;
+
+	ptcmd = hdev->ioq_ptcmds + (ioq->qid - 1) * SPRAID_PTCMDS_PERQ +
+		cqe->cmd_id - SPRAID_IO_BLK_MQ_DEPTH;
+
+	ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+	ptcmd->result0 = le32_to_cpu(cqe->result);
+	ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+	complete(&ptcmd->cmd_done);
+}
+
+static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx)
+{
+	struct spraid_completion *cqe = &spraidq->cqes[idx];
+	struct spraid_dev *hdev = spraidq->hdev;
+
+	if (unlikely(cqe->cmd_id >= spraidq->q_depth)) {
+		dev_err(hdev->dev,
+			"Invalid command id[%d] completed on queue %d\n",
+			cqe->cmd_id, cqe->sq_id);
+		return;
+	}
+
+	dev_log_dbg(hdev->dev, "cid[%d] qid[%d];"
+		    " result[0x%x], sq_id[%d], status[0x%x]\n",
+		    cqe->cmd_id, spraidq->qid, le32_to_cpu(cqe->result),
+		    le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+	if (unlikely(spraidq->qid == 0
+		     && cqe->cmd_id >= SPRAID_AQ_BLK_MQ_DEPTH)) {
+		spraid_complete_aen(spraidq, cqe);
+		return;
+	}
+
+	if (unlikely(spraidq->qid && cqe->cmd_id >= SPRAID_IO_BLK_MQ_DEPTH)) {
+		spraid_complete_ioq_sync_cmnd(spraidq, cqe);
+		return;
+	}
+
+	if (spraidq->qid)
+		spraid_complete_ioq_cmnd(spraidq, cqe);
+	else
+		spraid_complete_adminq_cmnd(spraidq, cqe);
+}
+
+static void spraid_complete_cqes(struct spraid_queue *spraidq,
+				 u16 start, u16 end)
+{
+	while (start != end) {
+		spraid_handle_cqe(spraidq, start);
+		if (++start == spraidq->q_depth)
+			start = 0;
+	}
+}
+
+static inline void spraid_update_cq_head(struct spraid_queue *spraidq)
+{
+	if (++spraidq->cq_head == spraidq->q_depth) {
+		spraidq->cq_head = 0;
+		spraidq->cq_phase = !spraidq->cq_phase;
+	}
+}
+
+static inline bool spraid_process_cq(struct spraid_queue *spraidq,
+				     u16 *start, u16 *end, int tag)
+{
+	bool found = false;
+
+	*start = spraidq->cq_head;
+	while (!found && spraid_cqe_pending(spraidq)) {
+		if (spraidq->cqes[spraidq->cq_head].cmd_id == tag)
+			found = true;
+		spraid_update_cq_head(spraidq);
+	}
+	*end = spraidq->cq_head;
+
+	if (*start != *end)
+		writel(spraidq->cq_head,
+		       spraidq->q_db + spraidq->hdev->db_stride);
+
+	return found;
+}
+
+static bool spraid_poll_cq(struct spraid_queue *spraidq, int cid)
+{
+	u16 start, end;
+	bool found;
+
+	if (!spraid_cqe_pending(spraidq))
+		return 0;
+
+	spin_lock_irq(&spraidq->cq_lock);
+	found = spraid_process_cq(spraidq, &start, &end, cid);
+	spin_unlock_irq(&spraidq->cq_lock);
+
+	spraid_complete_cqes(spraidq, start, end);
+	return found;
+}
+
+static irqreturn_t spraid_irq(int irq, void *data)
+{
+	struct spraid_queue *spraidq = data;
+	irqreturn_t ret = IRQ_NONE;
+	u16 start, end;
+
+	spin_lock(&spraidq->cq_lock);
+	if (spraidq->cq_head != spraidq->last_cq_head)
+		ret = IRQ_HANDLED;
+
+	spraid_process_cq(spraidq, &start, &end, -1);
+	spraidq->last_cq_head = spraidq->cq_head;
+	spin_unlock(&spraidq->cq_lock);
+
+	if (start != end) {
+		spraid_complete_cqes(spraidq, start, end);
+		ret = IRQ_HANDLED;
+	}
+	return ret;
+}
+
+static int spraid_setup_admin_queue(struct spraid_dev *hdev)
+{
+	struct spraid_queue *adminq = &hdev->queues[0];
+	u32 aqa;
+	int ret;
+
+	dev_info(hdev->dev, "[%s] start disable ctrl\n", __func__);
+
+	ret = spraid_disable_ctrl(hdev);
+	if (ret)
+		return ret;
+
+	ret = spraid_alloc_queue(hdev, 0, SPRAID_AQ_DEPTH);
+	if (ret)
+		return ret;
+
+	aqa = adminq->q_depth - 1;
+	aqa |= aqa << 16;
+	writel(aqa, hdev->bar + SPRAID_REG_AQA);
+	lo_hi_writeq(adminq->sq_dma_addr, hdev->bar + SPRAID_REG_ASQ);
+	lo_hi_writeq(adminq->cq_dma_addr, hdev->bar + SPRAID_REG_ACQ);
+
+	dev_info(hdev->dev, "[%s] start enable ctrl\n", __func__);
+
+	ret = spraid_enable_ctrl(hdev);
+	if (ret) {
+		ret = -ENODEV;
+		goto free_queue;
+	}
+
+	adminq->cq_vector = 0;
+	spraid_init_queue(adminq, 0);
+	ret = pci_request_irq(hdev->pdev, adminq->cq_vector, spraid_irq, NULL,
+			      adminq, "spraid%d_q%d",
+			      hdev->instance, adminq->qid);
+
+	if (ret) {
+		adminq->cq_vector = -1;
+		hdev->online_queues--;
+		goto free_queue;
+	}
+
+	dev_info(hdev->dev, "[%s] success, queuecount:[%d], onlinequeue:[%d]\n",
+		 __func__, hdev->queue_count, hdev->online_queues);
+
+	return 0;
+
+free_queue:
+	spraid_free_queue(adminq);
+	return ret;
+}
+
+static u32 spraid_bar_size(struct spraid_dev *hdev, u32 nr_ioqs)
+{
+	return (SPRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
+}
+
+static int spraid_alloc_admin_cmds(struct spraid_dev *hdev)
+{
+	int i;
+
+	INIT_LIST_HEAD(&hdev->adm_cmd_list);
+	spin_lock_init(&hdev->adm_cmd_lock);
+
+	hdev->adm_cmds = kcalloc_node(SPRAID_AQ_BLK_MQ_DEPTH,
+				      sizeof(struct spraid_cmd),
+				      GFP_KERNEL, hdev->numa_node);
+
+	if (!hdev->adm_cmds) {
+		dev_err(hdev->dev, "Alloc admin cmds failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < SPRAID_AQ_BLK_MQ_DEPTH; i++) {
+		hdev->adm_cmds[i].qid = 0;
+		hdev->adm_cmds[i].cid = i;
+		list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+	}
+
+	dev_info(hdev->dev, "Alloc admin cmds success, num[%d]\n",
+		 SPRAID_AQ_BLK_MQ_DEPTH);
+
+	return 0;
+}
+
+static void spraid_free_admin_cmds(struct spraid_dev *hdev)
+{
+	kfree(hdev->adm_cmds);
+	hdev->adm_cmds = NULL;
+	INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct spraid_cmd *spraid_get_cmd(struct spraid_dev *hdev,
+					 enum spraid_cmd_type type)
+{
+	struct spraid_cmd *cmd = NULL;
+	unsigned long flags;
+	struct list_head *head = &hdev->adm_cmd_list;
+	spinlock_t *slock = &hdev->adm_cmd_lock;
+
+	if (type == SPRAID_CMD_IOPT) {
+		head = &hdev->ioq_pt_list;
+		slock = &hdev->ioq_pt_lock;
+	}
+
+	spin_lock_irqsave(slock, flags);
+	if (list_empty(head)) {
+		spin_unlock_irqrestore(slock, flags);
+		dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+		return NULL;
+	}
+	cmd = list_entry(head->next, struct spraid_cmd, list);
+	list_del_init(&cmd->list);
+	spin_unlock_irqrestore(slock, flags);
+
+	WRITE_ONCE(cmd->state, SPRAID_CMD_IN_FLIGHT);
+
+	return cmd;
+}
+
+static void spraid_put_cmd(struct spraid_dev *hdev, struct spraid_cmd *cmd,
+			   enum spraid_cmd_type type)
+{
+	unsigned long flags;
+	struct list_head *head = &hdev->adm_cmd_list;
+	spinlock_t *slock = &hdev->adm_cmd_lock;
+
+	if (type == SPRAID_CMD_IOPT) {
+		head = &hdev->ioq_pt_list;
+		slock = &hdev->ioq_pt_lock;
+	}
+
+	spin_lock_irqsave(slock, flags);
+	WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE);
+	list_add_tail(&cmd->list, head);
+	spin_unlock_irqrestore(slock, flags);
+}
+
+
+static int spraid_submit_admin_sync_cmd(struct spraid_dev *hdev,
+					struct spraid_admin_command *cmd,
+					u32 *result0, u32 *result1, u32 timeout)
+{
+	struct spraid_cmd *adm_cmd = spraid_get_cmd(hdev, SPRAID_CMD_ADM);
+
+	if (!adm_cmd) {
+		dev_err(hdev->dev, "err, get admin cmd failed\n");
+		return -EFAULT;
+	}
+
+	timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+	init_completion(&adm_cmd->cmd_done);
+
+	cmd->common.command_id = adm_cmd->cid;
+	spraid_submit_cmd(&hdev->queues[0], cmd);
+
+	if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+		dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+			" opcode[0x%x] subopcode[0x%x]\n",
+			__func__, adm_cmd->cid, adm_cmd->qid,
+			cmd->usr_cmd.opcode, cmd->usr_cmd.info_0.subopcode);
+		WRITE_ONCE(adm_cmd->state, SPRAID_CMD_TIMEOUT);
+		spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+		return -EINVAL;
+	}
+
+	if (result0)
+		*result0 = adm_cmd->result0;
+	if (result1)
+		*result1 = adm_cmd->result1;
+
+	spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+
+	return adm_cmd->status;
+}
+
+static int spraid_create_cq(struct spraid_dev *hdev, u16 qid,
+			    struct spraid_queue *spraidq, u16 cq_vector)
+{
+	struct spraid_admin_command admin_cmd;
+	int flags = SPRAID_QUEUE_PHYS_CONTIG | SPRAID_CQ_IRQ_ENABLED;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.create_cq.opcode = SPRAID_ADMIN_CREATE_CQ;
+	admin_cmd.create_cq.prp1 = cpu_to_le64(spraidq->cq_dma_addr);
+	admin_cmd.create_cq.cqid = cpu_to_le16(qid);
+	admin_cmd.create_cq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+	admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+	admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
+
+	return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int spraid_create_sq(struct spraid_dev *hdev, u16 qid,
+			    struct spraid_queue *spraidq)
+{
+	struct spraid_admin_command admin_cmd;
+	int flags = SPRAID_QUEUE_PHYS_CONTIG;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.create_sq.opcode = SPRAID_ADMIN_CREATE_SQ;
+	admin_cmd.create_sq.prp1 = cpu_to_le64(spraidq->sq_dma_addr);
+	admin_cmd.create_sq.sqid = cpu_to_le16(qid);
+	admin_cmd.create_sq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+	admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+	admin_cmd.create_sq.cqid = cpu_to_le16(qid);
+
+	return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static void spraid_free_queue(struct spraid_queue *spraidq)
+{
+	struct spraid_dev *hdev = spraidq->hdev;
+
+	hdev->queue_count--;
+	dma_free_coherent(hdev->dev, CQ_SIZE(spraidq->q_depth),
+			  (void *)spraidq->cqes, spraidq->cq_dma_addr);
+	dma_free_coherent(hdev->dev, SQ_SIZE(spraidq->qid, spraidq->q_depth),
+			  spraidq->sq_cmds, spraidq->sq_dma_addr);
+	dma_free_coherent(hdev->dev, SENSE_SIZE(spraidq->q_depth),
+			  spraidq->sense, spraidq->sense_dma_addr);
+}
+
+static void spraid_free_admin_queue(struct spraid_dev *hdev)
+{
+	spraid_free_queue(&hdev->queues[0]);
+}
+
+static void spraid_free_io_queues(struct spraid_dev *hdev)
+{
+	int i;
+
+	for (i = hdev->queue_count - 1; i >= 1; i--)
+		spraid_free_queue(&hdev->queues[i]);
+}
+
+static int spraid_delete_queue(struct spraid_dev *hdev, u8 op, u16 id)
+{
+	struct spraid_admin_command admin_cmd;
+	int ret;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.delete_queue.opcode = op;
+	admin_cmd.delete_queue.qid = cpu_to_le16(id);
+
+	ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+
+	if (ret)
+		dev_err(hdev->dev, "Delete %s:[%d] failed\n",
+			(op == SPRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", id);
+
+	return ret;
+}
+
+static int spraid_delete_cq(struct spraid_dev *hdev, u16 cqid)
+{
+	return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_CQ, cqid);
+}
+
+static int spraid_delete_sq(struct spraid_dev *hdev, u16 sqid)
+{
+	return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_SQ, sqid);
+}
+
+static int spraid_create_queue(struct spraid_queue *spraidq, u16 qid)
+{
+	struct spraid_dev *hdev = spraidq->hdev;
+	u16 cq_vector;
+	int ret;
+
+	cq_vector = (hdev->num_vecs == 1) ? 0 : qid;
+	ret = spraid_create_cq(hdev, qid, spraidq, cq_vector);
+	if (ret)
+		return ret;
+
+	ret = spraid_create_sq(hdev, qid, spraidq);
+	if (ret)
+		goto delete_cq;
+
+	spraid_init_queue(spraidq, qid);
+	spraidq->cq_vector = cq_vector;
+
+	ret = pci_request_irq(hdev->pdev, cq_vector, spraid_irq, NULL,
+			      spraidq, "spraid%d_q%d", hdev->instance, qid);
+
+	if (ret) {
+		dev_err(hdev->dev, "Request queue[%d] irq failed\n", qid);
+		goto delete_sq;
+	}
+
+	return 0;
+
+delete_sq:
+	spraidq->cq_vector = -1;
+	hdev->online_queues--;
+	spraid_delete_sq(hdev, qid);
+delete_cq:
+	spraid_delete_cq(hdev, qid);
+
+	return ret;
+}
+
+static int spraid_create_io_queues(struct spraid_dev *hdev)
+{
+	u32 i, max;
+	int ret = 0;
+
+	max = min(hdev->max_qid, hdev->queue_count - 1);
+	for (i = hdev->online_queues; i <= max; i++) {
+		ret = spraid_create_queue(&hdev->queues[i], i);
+		if (ret) {
+			dev_err(hdev->dev, "Create queue[%d] failed\n", i);
+			break;
+		}
+	}
+
+	dev_info(hdev->dev, "[%s] queue_count[%d], online_queue[%d]",
+		 __func__, hdev->queue_count, hdev->online_queues);
+
+	return ret >= 0 ? 0 : ret;
+}
+
+static int spraid_set_features(struct spraid_dev *hdev, u32 fid,
+			       u32 dword11, void *buffer,
+			       size_t buflen, u32 *result)
+{
+	struct spraid_admin_command admin_cmd;
+	int ret;
+	u8 *data_ptr = NULL;
+	dma_addr_t data_dma = 0;
+
+	if (buffer && buflen) {
+		data_ptr = dma_alloc_coherent(hdev->dev, buflen,
+					      &data_dma, GFP_KERNEL);
+		if (!data_ptr)
+			return -ENOMEM;
+
+		memcpy(data_ptr, buffer, buflen);
+	}
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.features.opcode = SPRAID_ADMIN_SET_FEATURES;
+	admin_cmd.features.fid = cpu_to_le32(fid);
+	admin_cmd.features.dword11 = cpu_to_le32(dword11);
+	admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+	ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0);
+
+	if (data_ptr)
+		dma_free_coherent(hdev->dev, buflen, data_ptr, data_dma);
+
+	return ret;
+}
+
+static int spraid_configure_timestamp(struct spraid_dev *hdev)
+{
+	__le64 ts;
+	int ret;
+
+	ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+	ret = spraid_set_features(hdev, SPRAID_FEAT_TIMESTAMP,
+				  0, &ts, sizeof(ts), NULL);
+
+	if (ret)
+		dev_err(hdev->dev, "set timestamp failed: %d\n", ret);
+	return ret;
+}
+
+static int spraid_set_queue_cnt(struct spraid_dev *hdev, u32 *cnt)
+{
+	u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+	u32 nr_ioqs, result;
+	int status;
+
+	status = spraid_set_features(hdev, SPRAID_FEAT_NUM_QUEUES,
+				     q_cnt, NULL, 0, &result);
+	if (status) {
+		dev_err(hdev->dev, "Set queue count failed, status: %d\n",
+			status);
+		return -EIO;
+	}
+
+	nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+	*cnt = min(*cnt, nr_ioqs);
+	if (*cnt == 0) {
+		dev_err(hdev->dev, "Illegal queue count: zero\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static int spraid_setup_io_queues(struct spraid_dev *hdev)
+{
+	struct spraid_queue *adminq = &hdev->queues[0];
+	struct pci_dev *pdev = hdev->pdev;
+	u32 nr_ioqs = num_online_cpus();
+	u32 i, size;
+	int ret;
+
+	struct irq_affinity affd = {
+		.pre_vectors = 1
+	};
+
+	ret = spraid_set_queue_cnt(hdev, &nr_ioqs);
+	if (ret < 0)
+		return ret;
+
+	size = spraid_bar_size(hdev, nr_ioqs);
+	ret = spraid_remap_bar(hdev, size);
+	if (ret)
+		return -ENOMEM;
+
+	adminq->q_db = hdev->dbs;
+
+	pci_free_irq(pdev, 0, adminq);
+	pci_free_irq_vectors(pdev);
+
+	ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1),
+		PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+	if (ret <= 0)
+		return -EIO;
+
+	hdev->num_vecs = ret;
+
+	hdev->max_qid = max(ret - 1, 1);
+
+	ret = pci_request_irq(pdev, adminq->cq_vector, spraid_irq, NULL,
+			      adminq, "spraid%d_q%d",
+			      hdev->instance, adminq->qid);
+	if (ret) {
+		dev_err(hdev->dev, "Request admin irq failed\n");
+		adminq->cq_vector = -1;
+		return ret;
+	}
+
+	for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
+		ret = spraid_alloc_queue(hdev, i, hdev->ioq_depth);
+		if (ret)
+			break;
+	}
+	dev_info(hdev->dev, "[%s] max_qid: %d, queue_count: %d;"
+		 " online_queue: %d, ioq_depth: %d\n",
+		 __func__, hdev->max_qid, hdev->queue_count,
+		 hdev->online_queues, hdev->ioq_depth);
+
+	return spraid_create_io_queues(hdev);
+}
+
+static void spraid_delete_io_queues(struct spraid_dev *hdev)
+{
+	u16 queues = hdev->online_queues - 1;
+	u8 opcode = SPRAID_ADMIN_DELETE_SQ;
+	u16 i, pass;
+
+	if (!pci_device_is_present(hdev->pdev)) {
+		dev_err(hdev->dev,
+			"pci_device is not present, skip disable io queues\n");
+		return;
+	}
+
+	if (hdev->online_queues < 2) {
+		dev_err(hdev->dev, "[%s] err, io queue has been delete\n",
+			__func__);
+		return;
+	}
+
+	for (pass = 0; pass < 2; pass++) {
+		for (i = queues; i > 0; i--)
+			if (spraid_delete_queue(hdev, opcode, i))
+				break;
+
+		opcode = SPRAID_ADMIN_DELETE_CQ;
+	}
+}
+
+static void spraid_remove_io_queues(struct spraid_dev *hdev)
+{
+	spraid_delete_io_queues(hdev);
+	spraid_free_io_queues(hdev);
+}
+
+static void spraid_pci_disable(struct spraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	u32 i;
+
+	for (i = 0; i < hdev->online_queues; i++)
+		pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]);
+	pci_free_irq_vectors(pdev);
+	if (pci_is_enabled(pdev)) {
+		pci_disable_pcie_error_reporting(pdev);
+		pci_disable_device(pdev);
+	}
+	hdev->online_queues = 0;
+}
+
+static void spraid_disable_admin_queue(struct spraid_dev *hdev, bool shutdown)
+{
+	struct spraid_queue *adminq = &hdev->queues[0];
+	u16 start, end;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		if (shutdown)
+			spraid_shutdown_ctrl(hdev);
+		else
+			spraid_disable_ctrl(hdev);
+	}
+
+	if (hdev->queue_count == 0) {
+		dev_err(hdev->dev, "[%s] err, admin queue has been delete\n",
+			__func__);
+		return;
+	}
+
+	spin_lock_irq(&adminq->cq_lock);
+	spraid_process_cq(adminq, &start, &end, -1);
+	spin_unlock_irq(&adminq->cq_lock);
+
+	spraid_complete_cqes(adminq, start, end);
+	spraid_free_admin_queue(hdev);
+}
+
+static int spraid_create_dma_pools(struct spraid_dev *hdev)
+{
+	int i;
+	char poolname[20] = { 0 };
+
+	hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev,
+					      PAGE_SIZE, PAGE_SIZE, 0);
+
+	if (!hdev->prp_page_pool) {
+		dev_err(hdev->dev, "create prp_page_pool failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < small_pool_num; i++) {
+		sprintf(poolname, "prp_list_256_%d", i);
+		hdev->prp_small_pool[i] =
+			dma_pool_create(poolname, hdev->dev, SMALL_POOL_SIZE,
+					SMALL_POOL_SIZE, 0);
+
+		if (!hdev->prp_small_pool[i]) {
+			dev_err(hdev->dev, "create prp_small_pool %d failed\n",
+				i);
+			goto destroy_prp_small_pool;
+		}
+	}
+
+	return 0;
+
+destroy_prp_small_pool:
+	while (i > 0)
+		dma_pool_destroy(hdev->prp_small_pool[--i]);
+	dma_pool_destroy(hdev->prp_page_pool);
+
+	return -ENOMEM;
+}
+
+static void spraid_destroy_dma_pools(struct spraid_dev *hdev)
+{
+	int i;
+
+	for (i = 0; i < small_pool_num; i++)
+		dma_pool_destroy(hdev->prp_small_pool[i]);
+	dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int spraid_get_dev_list(struct spraid_dev *hdev,
+			       struct spraid_dev_info *devices)
+{
+	u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+	struct spraid_admin_command admin_cmd;
+	struct spraid_dev_list *list_buf;
+	dma_addr_t data_dma = 0;
+	u32 i, idx, hdid, ndev;
+	int ret = 0;
+
+	list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+				      &data_dma, GFP_KERNEL);
+	if (!list_buf)
+		return -ENOMEM;
+
+	for (idx = 0; idx < nd;) {
+		memset(&admin_cmd, 0, sizeof(admin_cmd));
+		admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+		admin_cmd.get_info.type = SPRAID_GET_INFO_DEV_LIST;
+		admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+		admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+		ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd,
+						   NULL, NULL, 0);
+
+		if (ret) {
+			dev_err(hdev->dev, "Get device list failed, nd: %u;"
+				"idx: %u, ret: %d\n",
+				nd, idx, ret);
+			goto out;
+		}
+		ndev = le32_to_cpu(list_buf->dev_num);
+
+		dev_info(hdev->dev, "ndev numbers: %u\n", ndev);
+
+		for (i = 0; i < ndev; i++) {
+			hdid = le32_to_cpu(list_buf->devices[i].hdid);
+			dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u;"
+				 "target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+				 i, hdid,
+				 le16_to_cpu(list_buf->devices[i].target),
+				 list_buf->devices[i].channel,
+				 list_buf->devices[i].lun,
+				 list_buf->devices[i].attr);
+			if (hdid > nd || hdid == 0) {
+				dev_err(hdev->dev, "err, hdid[%d] invalid\n",
+					hdid);
+				continue;
+			}
+			memcpy(&devices[hdid - 1], &list_buf->devices[i],
+			       sizeof(struct spraid_dev_info));
+		}
+		idx += ndev;
+
+		if (idx < MAX_DEV_ENTRY_PER_PAGE_4K)
+			break;
+	}
+
+out:
+	dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, data_dma);
+	return ret;
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid)
+{
+	struct spraid_queue *adminq = &hdev->queues[0];
+	struct spraid_admin_command admin_cmd;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.common.opcode = SPRAID_ADMIN_ASYNC_EVENT;
+	admin_cmd.common.command_id = cid;
+
+	spraid_submit_cmd(adminq, &admin_cmd);
+	dev_info(hdev->dev, "send aen, cid[%d]\n", cid);
+}
+
+static inline void spraid_send_all_aen(struct spraid_dev *hdev)
+{
+	u16 i;
+
+	for (i = 0; i < hdev->ctrl_info->aerl; i++)
+		spraid_send_aen(hdev, i + SPRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int spraid_add_device(struct spraid_dev *hdev,
+			     struct spraid_dev_info *device)
+{
+	struct Scsi_Host *shost = hdev->shost;
+	struct scsi_device *sdev;
+
+	dev_info(hdev->dev, "add device, hdid: %u target: %d, channel: %d;"
+		 " lun: %d, attr[0x%x]\n",
+		 le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+		 device->channel, device->lun, device->attr);
+
+	sdev = scsi_device_lookup(shost, device->channel,
+				  le16_to_cpu(device->target), 0);
+	if (sdev) {
+		dev_warn(hdev->dev, "Device is already exist, channel: %d;"
+			 " target_id: %d, lun: %d\n",
+			 device->channel, le16_to_cpu(device->target), 0);
+		scsi_device_put(sdev);
+		return -EEXIST;
+	}
+	scsi_add_device(shost, device->channel, le16_to_cpu(device->target), 0);
+	return 0;
+}
+
+static int spraid_rescan_device(struct spraid_dev *hdev,
+				struct spraid_dev_info *device)
+{
+	struct Scsi_Host *shost = hdev->shost;
+	struct scsi_device *sdev;
+
+	dev_info(hdev->dev, "rescan device, hdid: %u target: %d, channel: %d;"
+			" lun: %d, attr[0x%x]\n",
+			le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+			device->channel, device->lun, device->attr);
+
+	sdev = scsi_device_lookup(shost, device->channel,
+				  le16_to_cpu(device->target), 0);
+	if (!sdev) {
+		dev_warn(hdev->dev, "device is not exit rescan it, channel: %d;"
+			 " target_id: %d, lun: %d\n",
+			 device->channel, le16_to_cpu(device->target), 0);
+		return -ENODEV;
+	}
+
+	scsi_rescan_device(&sdev->sdev_gendev);
+	scsi_device_put(sdev);
+	return 0;
+}
+
+static int spraid_remove_device(struct spraid_dev *hdev,
+				struct spraid_dev_info *org_device)
+{
+	struct Scsi_Host *shost = hdev->shost;
+	struct scsi_device *sdev;
+
+	dev_info(hdev->dev, "remove device, hdid: %u target: %d, channel: %d;"
+		 " lun: %d, attr[0x%x]\n",
+		 le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target),
+		 org_device->channel, org_device->lun, org_device->attr);
+
+	sdev = scsi_device_lookup(shost, org_device->channel,
+				  le16_to_cpu(org_device->target), 0);
+	if (!sdev) {
+		dev_warn(hdev->dev, "device is not exit remove it, channel: %d;"
+			 " target_id: %d, lun: %d\n",
+			 org_device->channel,
+			 le16_to_cpu(org_device->target), 0);
+		return -ENODEV;
+	}
+
+	scsi_remove_device(sdev);
+	scsi_device_put(sdev);
+	return 0;
+}
+
+static int spraid_dev_list_init(struct spraid_dev *hdev)
+{
+	u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+	int i, ret;
+
+	hdev->devices = kzalloc_node(nd * sizeof(struct spraid_dev_info),
+				     GFP_KERNEL, hdev->numa_node);
+	if (!hdev->devices)
+		return -ENOMEM;
+
+	ret = spraid_get_dev_list(hdev, hdev->devices);
+	if (ret) {
+		dev_err(hdev->dev,
+			"Ignore failure of getting device list;"
+			" within initialization\n");
+		return 0;
+	}
+
+	for (i = 0; i < nd; i++) {
+		if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[i].flag) &&
+		    SPRAID_DEV_INFO_ATTR_BOOT(hdev->devices[i].attr)) {
+			spraid_add_device(hdev, &hdev->devices[i]);
+			break;
+		}
+	}
+	return 0;
+}
+
+static int luntarget_cmp_func(const void *l, const void *r)
+{
+	const struct spraid_dev_info *ln = l;
+	const struct spraid_dev_info *rn = r;
+
+	if (ln->channel == rn->channel)
+		return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+	return ln->channel - rn->channel;
+}
+
+static void spraid_scan_work(struct work_struct *work)
+{
+	struct spraid_dev *hdev =
+		container_of(work, struct spraid_dev, scan_work);
+	struct spraid_dev_info *devices, *org_devices;
+	struct spraid_dev_info *sortdevice;
+	u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+	u8 flag, org_flag;
+	int i, ret;
+	int count = 0;
+
+	devices = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+	if (!devices)
+		return;
+
+	sortdevice = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+	if (!sortdevice)
+		goto free_list;
+
+	ret = spraid_get_dev_list(hdev, devices);
+	if (ret)
+		goto free_all;
+	org_devices = hdev->devices;
+	for (i = 0; i < nd; i++) {
+		org_flag = org_devices[i].flag;
+		flag = devices[i].flag;
+
+		dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n",
+			    i, org_flag, flag);
+
+		if (SPRAID_DEV_INFO_FLAG_VALID(flag)) {
+			if (!SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+				down_write(&hdev->devices_rwsem);
+				memcpy(&org_devices[i], &devices[i],
+						sizeof(struct spraid_dev_info));
+				memcpy(&sortdevice[count++], &devices[i],
+						sizeof(struct spraid_dev_info));
+				up_write(&hdev->devices_rwsem);
+			} else if (SPRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+				spraid_rescan_device(hdev, &devices[i]);
+			}
+		} else {
+			if (SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+				down_write(&hdev->devices_rwsem);
+				org_devices[i].flag &= 0xfe;
+				up_write(&hdev->devices_rwsem);
+				spraid_remove_device(hdev, &org_devices[i]);
+			}
+		}
+	}
+
+	dev_info(hdev->dev, "scan work add device count = %d\n", count);
+
+	sort(sortdevice, count, sizeof(sortdevice[0]),
+	     luntarget_cmp_func, NULL);
+
+	for (i = 0; i < count; i++)
+		spraid_add_device(hdev, &sortdevice[i]);
+
+free_all:
+	kfree(sortdevice);
+free_list:
+	kfree(devices);
+}
+
+static void spraid_timesyn_work(struct work_struct *work)
+{
+	struct spraid_dev *hdev =
+		container_of(work, struct spraid_dev, timesyn_work);
+
+	spraid_configure_timestamp(hdev);
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev);
+static void spraid_fw_act_work(struct work_struct *work)
+{
+	struct spraid_dev *hdev =
+		container_of(work, struct spraid_dev, fw_act_work);
+
+	if (spraid_init_ctrl_info(hdev))
+		dev_err(hdev->dev, "get ctrl info failed after fw act\n");
+}
+
+static void spraid_queue_scan(struct spraid_dev *hdev)
+{
+	queue_work(spraid_wq, &hdev->scan_work);
+}
+
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result)
+{
+	switch ((result & 0xff00) >> 8) {
+	case SPRAID_AEN_DEV_CHANGED:
+		spraid_queue_scan(hdev);
+		break;
+	case SPRAID_AEN_FW_ACT_START:
+		dev_info(hdev->dev, "fw activation starting\n");
+		break;
+	case SPRAID_AEN_HOST_PROBING:
+		break;
+	default:
+		dev_warn(hdev->dev, "async event result %08x\n", result);
+	}
+}
+
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+				 u32 result, u32 result1)
+{
+	switch ((result & 0xff00) >> 8) {
+	case SPRAID_AEN_TIMESYN:
+		queue_work(spraid_wq, &hdev->timesyn_work);
+		break;
+	case SPRAID_AEN_FW_ACT_FINISH:
+		dev_info(hdev->dev, "fw activation finish\n");
+		queue_work(spraid_wq, &hdev->fw_act_work);
+		break;
+	case SPRAID_AEN_EVENT_MIN ... SPRAID_AEN_EVENT_MAX:
+		dev_info(hdev->dev, "rcv card event[%d];"
+			 " param1[0x%x] param2[0x%x]\n",
+			 (result & 0xff00) >> 8, result, result1);
+		break;
+	default:
+		dev_warn(hdev->dev, "async event result: 0x%x\n", result);
+	}
+}
+
+static int spraid_alloc_resources(struct spraid_dev *hdev)
+{
+	int ret, nqueue;
+
+	ret = ida_alloc(&spraid_instance_ida, GFP_KERNEL);
+	if (ret < 0) {
+		dev_err(hdev->dev, "Get instance id failed\n");
+		return ret;
+	}
+	hdev->instance = ret;
+
+	hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info),
+				       GFP_KERNEL, hdev->numa_node);
+	if (!hdev->ctrl_info) {
+		ret = -ENOMEM;
+		goto release_instance;
+	}
+
+	ret = spraid_create_dma_pools(hdev);
+	if (ret)
+		goto free_ctrl_info;
+	nqueue = num_possible_cpus() + 1;
+	hdev->queues = kcalloc_node(nqueue, sizeof(struct spraid_queue),
+				    GFP_KERNEL, hdev->numa_node);
+	if (!hdev->queues) {
+		ret = -ENOMEM;
+		goto destroy_dma_pools;
+	}
+
+	ret = spraid_alloc_admin_cmds(hdev);
+	if (ret)
+		goto free_queues;
+
+	dev_info(hdev->dev, "[%s] queues num: %d\n", __func__, nqueue);
+
+	return 0;
+
+free_queues:
+	kfree(hdev->queues);
+destroy_dma_pools:
+	spraid_destroy_dma_pools(hdev);
+free_ctrl_info:
+	kfree(hdev->ctrl_info);
+release_instance:
+	ida_free(&spraid_instance_ida, hdev->instance);
+	return ret;
+}
+
+static void spraid_free_resources(struct spraid_dev *hdev)
+{
+	spraid_free_admin_cmds(hdev);
+	kfree(hdev->queues);
+	spraid_destroy_dma_pools(hdev);
+	kfree(hdev->ctrl_info);
+	ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static void spraid_bsg_unmap_data(struct spraid_dev *hdev, struct bsg_job *job)
+{
+	struct request *rq = blk_mq_rq_from_pdu(job);
+	struct spraid_iod *iod = job->dd_data;
+	enum dma_data_direction dma_dir =
+		rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	if (iod->nsge)
+		dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+
+	spraid_free_iod_res(hdev, iod);
+}
+
+static int spraid_bsg_map_data(struct spraid_dev *hdev, struct bsg_job *job,
+			       struct spraid_admin_command *cmd)
+{
+	struct request *rq = blk_mq_rq_from_pdu(job);
+	struct spraid_iod *iod = job->dd_data;
+	enum dma_data_direction dma_dir =
+		rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+	int ret = 0;
+
+	iod->sg = job->request_payload.sg_list;
+	iod->nsge = job->request_payload.sg_cnt;
+	iod->length = job->request_payload.payload_len;
+	iod->use_sgl = false;
+	iod->npages = -1;
+	iod->sg_drv_mgmt = false;
+
+	if (!iod->nsge)
+		goto out;
+
+	ret = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge,
+			       dma_dir, DMA_ATTR_NO_WARN);
+	if (!ret)
+		goto out;
+
+	ret = spraid_setup_prps(hdev, iod);
+	if (ret)
+		goto unmap;
+
+	cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+	cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+
+	return 0;
+
+unmap:
+	dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+out:
+	return ret;
+}
+
+static int spraid_get_ctrl_info(struct spraid_dev *hdev,
+				struct spraid_ctrl_info *ctrl_info)
+{
+	struct spraid_admin_command admin_cmd;
+	u8 *data_ptr = NULL;
+	dma_addr_t data_dma = 0;
+	int ret;
+
+	data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+				      &data_dma, GFP_KERNEL);
+	if (!data_ptr)
+		return -ENOMEM;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+	admin_cmd.get_info.type = SPRAID_GET_INFO_CTRL;
+	admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+	ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+	if (!ret)
+		memcpy(ctrl_info, data_ptr, sizeof(struct spraid_ctrl_info));
+
+	dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+	return ret;
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev)
+{
+	int ret;
+
+	hdev->ctrl_info->nd = cpu_to_le32(240);
+	hdev->ctrl_info->mdts = 8;
+	hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+	hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+	hdev->ctrl_info->max_channel = cpu_to_le16(4);
+	hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+	hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+	ret = spraid_get_ctrl_info(hdev, hdev->ctrl_info);
+	if (ret)
+		dev_err(hdev->dev, "get controller info failed: %d\n", ret);
+
+	dev_info(hdev->dev, "[%s]nd = %d\n", __func__, hdev->ctrl_info->nd);
+	dev_info(hdev->dev, "[%s]max_cmd = %d\n",
+		 __func__, hdev->ctrl_info->max_cmds);
+	dev_info(hdev->dev, "[%s]max_channel = %d\n",
+		 __func__, hdev->ctrl_info->max_channel);
+	dev_info(hdev->dev, "[%s]max_tgt_id = %d\n",
+		 __func__, hdev->ctrl_info->max_tgt_id);
+	dev_info(hdev->dev, "[%s]max_lun = %d\n",
+		 __func__, hdev->ctrl_info->max_lun);
+	dev_info(hdev->dev, "[%s]max_num_sge = %d\n",
+		 __func__, hdev->ctrl_info->max_num_sge);
+	dev_info(hdev->dev, "[%s]lun_num_boot = %d\n",
+		 __func__, hdev->ctrl_info->lun_num_in_boot);
+	dev_info(hdev->dev, "[%s]mdts = %d\n", __func__, hdev->ctrl_info->mdts);
+	dev_info(hdev->dev, "[%s]acl = %d\n", __func__, hdev->ctrl_info->acl);
+	dev_info(hdev->dev, "[%s]aer1 = %d\n", __func__, hdev->ctrl_info->aerl);
+	dev_info(hdev->dev, "[%s]card_type = %d\n",
+		 __func__, hdev->ctrl_info->card_type);
+	dev_info(hdev->dev, "[%s]rtd3e = %d\n",
+		 __func__, hdev->ctrl_info->rtd3e);
+	dev_info(hdev->dev, "[%s]sn = %s\n", __func__, hdev->ctrl_info->sn);
+	dev_info(hdev->dev, "[%s]fr = %s\n", __func__, hdev->ctrl_info->fr);
+
+	if (!hdev->ctrl_info->aerl)
+		hdev->ctrl_info->aerl = 1;
+	if (hdev->ctrl_info->aerl > SPRAID_NR_AEN_COMMANDS)
+		hdev->ctrl_info->aerl = SPRAID_NR_AEN_COMMANDS;
+
+	return 0;
+}
+
+#define SPRAID_MAX_ADMIN_PAYLOAD_SIZE	BIT(16)
+static int spraid_alloc_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+	u16 max_sge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+	size_t alloc_size;
+
+	alloc_size = spraid_iod_ext_size(hdev, SPRAID_MAX_ADMIN_PAYLOAD_SIZE,
+					 max_sge, true, false);
+	if (alloc_size > PAGE_SIZE)
+		dev_warn(hdev->dev, "It is unreasonable ;"
+			 " sg allocation more than one page\n");
+	hdev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+						mempool_kfree,
+						(void *)alloc_size, GFP_KERNEL,
+						hdev->numa_node);
+	if (!hdev->iod_mempool) {
+		dev_err(hdev->dev, "Create iod extension memory pool failed\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void spraid_free_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+	mempool_destroy(hdev->iod_mempool);
+}
+
+static int spraid_user_admin_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+	struct spraid_bsg_request *bsg_req = job->request;
+	struct spraid_passthru_common_cmd *cmd = &(bsg_req->admcmd);
+	struct spraid_admin_command admin_cmd;
+	u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+	u32 result[2] = {0};
+	int status;
+
+	if (hdev->state >= SPRAID_RESETTING) {
+		dev_err(hdev->dev, "[%s] err, host state:[%d] is not right\n",
+			__func__, hdev->state);
+		return -EBUSY;
+	}
+
+	dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init\n",
+		 __func__, cmd->opcode, cmd->info_0.subopcode);
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.common.opcode = cmd->opcode;
+	admin_cmd.common.flags = cmd->flags;
+	admin_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+	admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2);
+	admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3);
+	admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10);
+	admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11);
+	admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12);
+	admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13);
+	admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14);
+	admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15);
+
+	status = spraid_bsg_map_data(hdev, job, &admin_cmd);
+	if (status) {
+		dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+		return status;
+	}
+
+	status = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0],
+					      &result[1], timeout);
+	if (status >= 0) {
+		job->reply_len = sizeof(result);
+		memcpy(job->reply, result, sizeof(result));
+	}
+
+	dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x];"
+		 " status[0x%x] result0[0x%x] result1[0x%x]\n",
+		 __func__, cmd->opcode, cmd->info_0.subopcode,
+		 status, result[0], result[1]);
+
+	spraid_bsg_unmap_data(hdev, job);
+
+	return status;
+}
+
+static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev)
+{
+	int i;
+	int ptnum = SPRAID_NR_IOQ_PTCMDS;
+
+	INIT_LIST_HEAD(&hdev->ioq_pt_list);
+	spin_lock_init(&hdev->ioq_pt_lock);
+
+	hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_cmd),
+					GFP_KERNEL, hdev->numa_node);
+
+	if (!hdev->ioq_ptcmds) {
+		dev_err(hdev->dev, "Alloc ioq_ptcmds failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < ptnum; i++) {
+		hdev->ioq_ptcmds[i].qid = i / SPRAID_PTCMDS_PERQ + 1;
+		hdev->ioq_ptcmds[i].cid = i % SPRAID_PTCMDS_PERQ
+					  + SPRAID_IO_BLK_MQ_DEPTH;
+		list_add_tail(&(hdev->ioq_ptcmds[i].list), &hdev->ioq_pt_list);
+	}
+
+	dev_info(hdev->dev, "Alloc ioq_ptcmds success, ptnum[%d]\n", ptnum);
+
+	return 0;
+}
+
+static void spraid_free_ioq_ptcmds(struct spraid_dev *hdev)
+{
+	kfree(hdev->ioq_ptcmds);
+	hdev->ioq_ptcmds = NULL;
+
+	INIT_LIST_HEAD(&hdev->ioq_pt_list);
+}
+
+static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev,
+				      struct spraid_ioq_command *cmd,
+				      u32 *result, u32 *reslen, u32 timeout)
+{
+	int ret;
+	dma_addr_t sense_dma;
+	struct spraid_queue *ioq;
+	void *sense_addr = NULL;
+	struct spraid_cmd *pt_cmd = spraid_get_cmd(hdev, SPRAID_CMD_IOPT);
+
+	if (!pt_cmd) {
+		dev_err(hdev->dev, "err, get ioq cmd failed\n");
+		return -EFAULT;
+	}
+
+	timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+	init_completion(&pt_cmd->cmd_done);
+
+	ioq = &hdev->queues[pt_cmd->qid];
+	ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+	sense_addr = ioq->sense + ret;
+	sense_dma = ioq->sense_dma_addr + ret;
+
+	cmd->common.sense_addr = cpu_to_le64(sense_dma);
+	cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+	cmd->common.command_id = pt_cmd->cid;
+
+	spraid_submit_cmd(ioq, cmd);
+
+	if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+		dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+			" opcode[0x%x] subopcode[0x%x]\n",
+			__func__, pt_cmd->cid, pt_cmd->qid, cmd->common.opcode,
+			(le32_to_cpu(cmd->common.cdw3[0]) & 0xffff));
+		WRITE_ONCE(pt_cmd->state, SPRAID_CMD_TIMEOUT);
+		spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+		return -EINVAL;
+	}
+
+	if (result && reslen) {
+		if ((pt_cmd->status & 0x17f) == 0x101) {
+			memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+			*reslen = SCSI_SENSE_BUFFERSIZE;
+		}
+	}
+
+	spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+
+	return pt_cmd->status;
+}
+
+static int spraid_user_ioq_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+	struct spraid_bsg_request *bsg_req =
+		(struct spraid_bsg_request *)(job->request);
+	struct spraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd);
+	struct spraid_ioq_command ioq_cmd;
+	int status = 0;
+	u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+
+	if (cmd->data_len > PAGE_SIZE) {
+		dev_err(hdev->dev, "[%s] data len bigger than 4k\n", __func__);
+		return -EFAULT;
+	}
+
+	if (hdev->state != SPRAID_LIVE) {
+		dev_err(hdev->dev, "[%s] err, host state:[%d] is not live\n",
+			__func__, hdev->state);
+		return -EBUSY;
+	}
+
+	dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init;"
+		 " datalen[%d]\n",
+		 __func__, cmd->opcode, cmd->info_1.subopcode, cmd->data_len);
+
+	memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+	ioq_cmd.common.opcode = cmd->opcode;
+	ioq_cmd.common.flags = cmd->flags;
+	ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+	ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+	ioq_cmd.common.cdb_len = cmd->info_0.cdb_len;
+	ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+	ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+	ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+	ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+	ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+	ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+	ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+	ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+	ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+	ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+	memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+	ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+	ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+	ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+	ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+	status = spraid_bsg_map_data(hdev, job,
+				     (struct spraid_admin_command *)&ioq_cmd);
+	if (status) {
+		dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+		return status;
+	}
+
+	status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply,
+					    &job->reply_len, timeout);
+
+	dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x];"
+		 " reply_len[%d]\n",
+		 __func__, cmd->opcode, cmd->info_1.subopcode,
+		 status, job->reply_len);
+
+	spraid_bsg_unmap_data(hdev, job);
+
+	return status;
+}
+
+static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd)
+{
+	struct spraid_dev *hdev = shost_priv(scmd->device->host);
+	struct spraid_iod *iod = scsi_cmd_priv(scmd);
+	struct spraid_queue *spraidq;
+	u16 hwq, cid;
+
+	spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+	spraidq = &hdev->queues[hwq];
+	if (READ_ONCE(iod->state) == SPRAID_CMD_COMPLETE
+	    || spraid_poll_cq(spraidq, cid)) {
+		dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
+			 cid, spraidq->qid);
+		return true;
+	}
+	return false;
+}
+
+static enum blk_eh_timer_return spraid_scmd_timeout(struct scsi_cmnd *scmd)
+{
+	struct spraid_iod *iod = scsi_cmd_priv(scmd);
+	unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+	if (spraid_check_scmd_completed(scmd))
+		goto out;
+
+	if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+		if (cmpxchg(&iod->state,
+			    SPRAID_CMD_IN_FLIGHT,
+			    SPRAID_CMD_TIMEOUT) == SPRAID_CMD_IN_FLIGHT) {
+			return BLK_EH_DONE;
+		}
+	}
+out:
+	return BLK_EH_RESET_TIMER;
+}
+
+/* send abort command by admin queue temporary */
+static int spraid_send_abort_cmd(struct spraid_dev *hdev,
+				 u32 hdid, u16 qid, u16 cid)
+{
+	struct spraid_admin_command admin_cmd;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.abort.opcode = SPRAID_ADMIN_ABORT_CMD;
+	admin_cmd.abort.hdid = cpu_to_le32(hdid);
+	admin_cmd.abort.sqid = cpu_to_le16(qid);
+	admin_cmd.abort.cid = cpu_to_le16(cid);
+
+	return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+static int spraid_send_reset_cmd(struct spraid_dev *hdev, int type, u32 hdid)
+{
+	struct spraid_admin_command admin_cmd;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.reset.opcode = SPRAID_ADMIN_RESET;
+	admin_cmd.reset.hdid = cpu_to_le32(hdid);
+	admin_cmd.reset.type = type;
+
+	return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static bool spraid_change_host_state(struct spraid_dev *hdev,
+				     enum spraid_state newstate)
+{
+	unsigned long flags;
+	enum spraid_state oldstate;
+	bool change = false;
+
+	spin_lock_irqsave(&hdev->state_lock, flags);
+
+	oldstate = hdev->state;
+	switch (newstate) {
+	case SPRAID_LIVE:
+		switch (oldstate) {
+		case SPRAID_NEW:
+		case SPRAID_RESETTING:
+			change = true;
+			break;
+		default:
+			break;
+		}
+		break;
+	case SPRAID_RESETTING:
+		switch (oldstate) {
+		case SPRAID_LIVE:
+			change = true;
+			break;
+		default:
+			break;
+		}
+		break;
+	case SPRAID_DELETING:
+		if (oldstate != SPRAID_DELETING)
+			change = true;
+		break;
+	case SPRAID_DEAD:
+		switch (oldstate) {
+		case SPRAID_NEW:
+		case SPRAID_LIVE:
+		case SPRAID_RESETTING:
+			change = true;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	if (change)
+		hdev->state = newstate;
+	spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+	dev_info(hdev->dev, "[%s][%d]->[%d], change[%d]\n",
+		 __func__, oldstate, newstate, change);
+
+	return change;
+}
+
+static void spraid_back_fault_cqe(struct spraid_queue *ioq,
+				  struct spraid_completion *cqe)
+{
+	struct spraid_dev *hdev = ioq->hdev;
+	struct blk_mq_tags *tags;
+	struct scsi_cmnd *scmd;
+	struct spraid_iod *iod;
+	struct request *req;
+
+	tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+	req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+	if (unlikely(!req || !blk_mq_request_started(req)))
+		return;
+
+	scmd = blk_mq_rq_to_pdu(req);
+	iod = scsi_cmd_priv(scmd);
+
+	set_host_byte(scmd, DID_NO_CONNECT);
+	if (iod->nsge)
+		scsi_dma_unmap(scmd);
+	spraid_free_iod_res(hdev, iod);
+	scmd->scsi_done(scmd);
+	dev_warn(hdev->dev, "Back fault CQE, cid[%d] qid[%d]\n",
+		 cqe->cmd_id, ioq->qid);
+}
+
+static void spraid_back_all_io(struct spraid_dev *hdev)
+{
+	int i, j;
+	struct spraid_queue *ioq;
+	struct spraid_completion cqe = { 0 };
+
+	scsi_block_requests(hdev->shost);
+
+	for (i = 1; i <= hdev->shost->nr_hw_queues; i++) {
+		ioq = &hdev->queues[i];
+		for (j = 0; j < hdev->shost->can_queue; j++) {
+			cqe.cmd_id = j;
+			spraid_back_fault_cqe(ioq, &cqe);
+		}
+	}
+
+	scsi_unblock_requests(hdev->shost);
+}
+
+static void spraid_dev_disable(struct spraid_dev *hdev, bool shutdown)
+{
+	struct spraid_queue *adminq = &hdev->queues[0];
+	u16 start, end;
+	unsigned long timeout = jiffies + 600 * HZ;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		if (shutdown)
+			spraid_shutdown_ctrl(hdev);
+		else
+			spraid_disable_ctrl(hdev);
+	}
+
+	while (!time_after(jiffies, timeout)) {
+		if (!pci_device_is_present(hdev->pdev)) {
+			dev_info(hdev->dev, "[%s] pci_device not present;"
+				 " skip wait\n", __func__);
+			break;
+		}
+		if (!spraid_wait_ready(hdev, hdev->cap, false)) {
+			dev_info(hdev->dev,
+				"[%s] wait ready success after reset\n",
+				__func__);
+			break;
+		}
+		dev_info(hdev->dev, "[%s] waiting csts_rdy ready\n", __func__);
+	}
+
+	if (hdev->queue_count == 0) {
+		dev_err(hdev->dev, "[%s] warn, queue has been delete\n",
+			__func__);
+		return;
+	}
+
+	spin_lock_irq(&adminq->cq_lock);
+	spraid_process_cq(adminq, &start, &end, -1);
+	spin_unlock_irq(&adminq->cq_lock);
+	spraid_complete_cqes(adminq, start, end);
+
+	spraid_pci_disable(hdev);
+
+	spraid_back_all_io(hdev);
+}
+
+static void spraid_reset_work(struct work_struct *work)
+{
+	int ret;
+	struct spraid_dev *hdev =
+	       container_of(work, struct spraid_dev, reset_work);
+
+	if (hdev->state != SPRAID_RESETTING) {
+		dev_err(hdev->dev, "[%s] err, host is not reset state\n",
+			__func__);
+		return;
+	}
+
+	dev_info(hdev->dev, "[%s] enter host reset\n", __func__);
+
+	if (hdev->ctrl_config & SPRAID_CC_ENABLE) {
+		dev_info(hdev->dev, "[%s] start dev_disable\n", __func__);
+		spraid_dev_disable(hdev, false);
+	}
+
+	ret = spraid_pci_enable(hdev);
+	if (ret)
+		goto out;
+
+	ret = spraid_setup_admin_queue(hdev);
+	if (ret)
+		goto pci_disable;
+
+	ret = spraid_setup_io_queues(hdev);
+	if (ret || hdev->online_queues <= hdev->shost->nr_hw_queues)
+		goto pci_disable;
+
+	spraid_change_host_state(hdev, SPRAID_LIVE);
+
+	spraid_send_all_aen(hdev);
+
+	return;
+
+pci_disable:
+	spraid_pci_disable(hdev);
+out:
+	spraid_change_host_state(hdev, SPRAID_DEAD);
+	dev_err(hdev->dev, "[%s] err, host reset failed\n", __func__);
+}
+
+static int spraid_reset_work_sync(struct spraid_dev *hdev)
+{
+	if (!spraid_change_host_state(hdev, SPRAID_RESETTING)) {
+		dev_info(hdev->dev, "[%s] can't change to reset state\n",
+			 __func__);
+		return -EBUSY;
+	}
+
+	if (!queue_work(spraid_wq, &hdev->reset_work)) {
+		dev_err(hdev->dev, "[%s] err, host is already in reset state\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	flush_work(&hdev->reset_work);
+	if (hdev->state != SPRAID_LIVE)
+		return -ENODEV;
+
+	return 0;
+}
+
+static int spraid_wait_abnl_cmd_done(struct spraid_iod *iod)
+{
+	u16 times = 0;
+
+	do {
+		if (READ_ONCE(iod->state) == SPRAID_CMD_TMO_COMPLETE)
+			break;
+		msleep(500);
+		times++;
+	} while (times <= SPRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+	/* wait command completion timeout after abort/reset success */
+	if (times >= SPRAID_WAIT_ABNL_CMD_TIMEOUT)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int spraid_abort_handler(struct scsi_cmnd *scmd)
+{
+	struct spraid_dev *hdev = shost_priv(scmd->device->host);
+	struct spraid_iod *iod = scsi_cmd_priv(scmd);
+	struct spraid_sdev_hostdata *hostdata;
+	u16 hwq, cid;
+	int ret;
+
+	scsi_print_command(scmd);
+
+	if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+		spraid_check_scmd_completed(scmd))
+		return SUCCESS;
+
+	hostdata = scmd->device->hostdata;
+	spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+	dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, aborting\n", cid, hwq);
+	ret = spraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+	if (ret != ADMIN_ERR_TIMEOUT) {
+		ret = spraid_wait_abnl_cmd_done(iod);
+		if (ret) {
+			dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed;"
+				" not found\n", cid, hwq);
+			return FAILED;
+		}
+		dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq);
+		return SUCCESS;
+	}
+	dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n",
+		 cid, hwq);
+	return FAILED;
+}
+
+static int spraid_tgt_reset_handler(struct scsi_cmnd *scmd)
+{
+	struct spraid_dev *hdev = shost_priv(scmd->device->host);
+	struct spraid_iod *iod = scsi_cmd_priv(scmd);
+	struct spraid_sdev_hostdata *hostdata;
+	u16 hwq, cid;
+	int ret;
+
+	scsi_print_command(scmd);
+
+	if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+		spraid_check_scmd_completed(scmd))
+		return SUCCESS;
+
+	hostdata = scmd->device->hostdata;
+	spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+	dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, target reset\n",
+		 cid, hwq);
+	ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_TARGET, hostdata->hdid);
+	if (ret == 0) {
+		ret = spraid_wait_abnl_cmd_done(iod);
+		if (ret) {
+			dev_warn(hdev->dev,
+				"cid[%d] qid[%d]target reset failed;"
+				" not found\n", cid, hwq);
+			return FAILED;
+		}
+
+		dev_warn(hdev->dev, "cid[%d] qid[%d] target reset success\n",
+			 cid, hwq);
+		return SUCCESS;
+	}
+
+	dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] target reset failed\n",
+		 cid, hwq, ret);
+	return FAILED;
+}
+
+static int spraid_bus_reset_handler(struct scsi_cmnd *scmd)
+{
+	struct spraid_dev *hdev = shost_priv(scmd->device->host);
+	struct spraid_iod *iod = scsi_cmd_priv(scmd);
+	struct spraid_sdev_hostdata *hostdata;
+	u16 hwq, cid;
+	int ret;
+
+	scsi_print_command(scmd);
+
+	if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+		spraid_check_scmd_completed(scmd))
+		return SUCCESS;
+
+	hostdata = scmd->device->hostdata;
+	spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+	dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, bus reset\n", cid, hwq);
+	ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_BUS, hostdata->hdid);
+	if (ret == 0) {
+		ret = spraid_wait_abnl_cmd_done(iod);
+		if (ret) {
+			dev_warn(hdev->dev,
+				"cid[%d] qid[%d] bus reset failed;"
+				 " not found\n", cid, hwq);
+			return FAILED;
+		}
+
+		dev_warn(hdev->dev, "cid[%d] qid[%d] bus reset succ\n",
+			 cid, hwq);
+		return SUCCESS;
+	}
+
+	dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] bus reset failed\n",
+		 cid, hwq, ret);
+	return FAILED;
+}
+
+static int spraid_shost_reset_handler(struct scsi_cmnd *scmd)
+{
+	u16 hwq, cid;
+	struct spraid_dev *hdev = shost_priv(scmd->device->host);
+
+	scsi_print_command(scmd);
+	if (hdev->state != SPRAID_LIVE || spraid_check_scmd_completed(scmd))
+		return SUCCESS;
+
+	spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+	dev_warn(hdev->dev, "cid[%d] qid[%d] host reset\n", cid, hwq);
+
+	if (spraid_reset_work_sync(hdev)) {
+		dev_warn(hdev->dev, "cid[%d] qid[%d] host reset failed\n",
+			 cid, hwq);
+		return FAILED;
+	}
+
+	dev_warn(hdev->dev, "cid[%d] qid[%d] host reset success\n", cid, hwq);
+
+	return SUCCESS;
+}
+
+static pci_ers_result_t spraid_pci_error_detected(struct pci_dev *pdev,
+						  pci_channel_state_t state)
+{
+	struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+	dev_info(hdev->dev, "enter pci error detect, state:%d\n", state);
+
+	switch (state) {
+	case pci_channel_io_normal:
+		dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		dev_warn(hdev->dev,
+			"channel io frozen, need reset controller\n");
+
+		scsi_block_requests(hdev->shost);
+
+		spraid_change_host_state(hdev, SPRAID_RESETTING);
+
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		dev_warn(hdev->dev, "channel io failure, request disconnect\n");
+
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t spraid_pci_slot_reset(struct pci_dev *pdev)
+{
+	struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+	dev_info(hdev->dev, "restart after slot reset\n");
+
+	pci_restore_state(pdev);
+
+	if (!queue_work(spraid_wq, &hdev->reset_work)) {
+		dev_err(hdev->dev, "[%s] err, the device is resetting state\n",
+			__func__);
+		return PCI_ERS_RESULT_NONE;
+	}
+
+	flush_work(&hdev->reset_work);
+
+	scsi_unblock_requests(hdev->shost);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void spraid_reset_done(struct pci_dev *pdev)
+{
+	struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+	dev_info(hdev->dev, "enter spraid reset done\n");
+}
+
+static ssize_t csts_pp_show(struct device *cdev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct spraid_dev *hdev = shost_priv(shost);
+	int ret = -1;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+		       & SPRAID_CSTS_PP_MASK);
+		ret >>= SPRAID_CSTS_PP_SHIFT;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct spraid_dev *hdev = shost_priv(shost);
+	int ret = -1;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+		       & SPRAID_CSTS_SHST_MASK);
+		ret >>= SPRAID_CSTS_SHST_SHIFT;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct spraid_dev *hdev = shost_priv(shost);
+	int ret = -1;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+		       & SPRAID_CSTS_CFS_MASK);
+		ret >>= SPRAID_CSTS_CFS_SHIFT;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct spraid_dev *hdev = shost_priv(shost);
+	int ret = -1;
+
+	if (pci_device_is_present(hdev->pdev))
+		ret = (readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct spraid_dev *hdev = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+
+static struct device_attribute *spraid_host_attrs[] = {
+	&dev_attr_csts_pp,
+	&dev_attr_csts_shst,
+	&dev_attr_csts_cfs,
+	&dev_attr_csts_rdy,
+	&dev_attr_fw_version,
+	NULL,
+};
+
+static int spraid_get_vd_info(struct spraid_dev *hdev,
+			      struct spraid_vd_info *vd_info, u16 vid)
+{
+	struct spraid_admin_command admin_cmd;
+	u8 *data_ptr = NULL;
+	dma_addr_t data_dma = 0;
+	int ret;
+
+	data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+				      &data_dma, GFP_KERNEL);
+	if (!data_ptr)
+		return -ENOMEM;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+	admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+	admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+	admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+	admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+	admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+	ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+	if (!ret)
+		memcpy(vd_info, data_ptr, sizeof(struct spraid_vd_info));
+
+	dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+	return ret;
+}
+
+static int spraid_get_bgtask(struct spraid_dev *hdev,
+			     struct spraid_bgtask *bgtask)
+{
+	struct spraid_admin_command admin_cmd;
+	u8 *data_ptr = NULL;
+	dma_addr_t data_dma = 0;
+	int ret;
+
+	data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+				      &data_dma, GFP_KERNEL);
+	if (!data_ptr)
+		return -ENOMEM;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+	admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+	admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+	admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+	ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+	if (!ret)
+		memcpy(bgtask, data_ptr, sizeof(struct spraid_bgtask));
+
+	dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+	return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev;
+	struct spraid_dev *hdev;
+	struct spraid_vd_info *vd_info;
+	struct spraid_sdev_hostdata *hostdata;
+	int ret;
+
+	sdev = to_scsi_device(dev);
+	hdev = shost_priv(sdev->host);
+	hostdata = sdev->hostdata;
+
+	vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+	if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+		return snprintf(buf, PAGE_SIZE, "NA\n");
+
+	ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+	if (ret)
+		vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+	ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+	       vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+	kfree(vd_info);
+
+	return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev;
+	struct spraid_dev *hdev;
+	struct spraid_vd_info *vd_info;
+	struct spraid_sdev_hostdata *hostdata;
+	int ret;
+
+	sdev = to_scsi_device(dev);
+	hdev = shost_priv(sdev->host);
+	hostdata = sdev->hostdata;
+
+	vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+	if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+		return snprintf(buf, PAGE_SIZE, "NA\n");
+
+	ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+	if (ret) {
+		vd_info->vd_status = 0;
+		vd_info->rg_id = 0xff;
+	}
+
+	ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ?
+	       vd_info->vd_status : 0;
+
+	kfree(vd_info);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev;
+	struct spraid_dev *hdev;
+	struct spraid_vd_info *vd_info;
+	struct spraid_bgtask *bgtask;
+	struct spraid_sdev_hostdata *hostdata;
+	u8 rg_id, i, progress = 0;
+	int ret;
+
+	sdev = to_scsi_device(dev);
+	hdev = shost_priv(sdev->host);
+	hostdata = sdev->hostdata;
+
+	vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+	if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+		return snprintf(buf, PAGE_SIZE, "NA\n");
+
+	ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+	if (ret)
+		goto out;
+
+	rg_id = vd_info->rg_id;
+
+	bgtask = (struct spraid_bgtask *)vd_info;
+	ret = spraid_get_bgtask(hdev, bgtask);
+	if (ret)
+		goto out;
+	for (i = 0; i < bgtask->task_num; i++) {
+		if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+		    (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+			progress = bgtask->bgtask[i].progress;
+	}
+
+out:
+	kfree(vd_info);
+	return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+
+static struct device_attribute *spraid_dev_attrs[] = {
+	&dev_attr_raid_level,
+	&dev_attr_raid_state,
+	&dev_attr_raid_resync,
+	NULL,
+};
+
+static struct pci_error_handlers spraid_err_handler = {
+	.error_detected = spraid_pci_error_detected,
+	.slot_reset = spraid_pci_slot_reset,
+	.reset_done = spraid_reset_done,
+};
+
+static int spraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+	int ret;
+	struct spraid_dev *hdev = shost_priv(shost);
+
+	dev_info(hdev->dev, "[%s] start sysfs host reset cmd\n", __func__);
+	ret = spraid_reset_work_sync(hdev);
+	dev_info(hdev->dev, "[%s] stop sysfs host reset cmd[%d]\n",
+		 __func__, ret);
+
+	return ret;
+}
+
+static struct scsi_host_template spraid_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= "Ramaxel Logic spraid driver",
+	.proc_name		= "spraid",
+	.queuecommand		= spraid_queue_command,
+	.slave_alloc		= spraid_slave_alloc,
+	.slave_destroy		= spraid_slave_destroy,
+	.slave_configure	= spraid_slave_configure,
+	.eh_timed_out		= spraid_scmd_timeout,
+	.eh_abort_handler	= spraid_abort_handler,
+	.eh_target_reset_handler	= spraid_tgt_reset_handler,
+	.eh_bus_reset_handler		= spraid_bus_reset_handler,
+	.eh_host_reset_handler		= spraid_shost_reset_handler,
+	.change_queue_depth		= scsi_change_queue_depth,
+	.this_id			= -1,
+	.shost_attrs			= spraid_host_attrs,
+	.sdev_attrs			= spraid_dev_attrs,
+	.host_reset			= spraid_sysfs_host_reset,
+};
+
+static void spraid_shutdown(struct pci_dev *pdev)
+{
+	struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+	spraid_remove_io_queues(hdev);
+	spraid_disable_admin_queue(hdev, true);
+}
+
+/* bsg dispatch user command */
+static int spraid_bsg_host_dispatch(struct bsg_job *job)
+{
+	struct Scsi_Host *shost = dev_to_shost(job->dev);
+	struct spraid_dev *hdev = shost_priv(shost);
+	struct request *rq = blk_mq_rq_from_pdu(job);
+	struct spraid_bsg_request *bsg_req = job->request;
+	int ret = 0;
+
+	dev_info(hdev->dev, "[%s] msgcode[%d], msglen[%d], timeout[%d];"
+		 " req_nsge[%d], req_len[%d]\n",
+		 __func__, bsg_req->msgcode, job->request_len,
+		 rq->timeout, job->request_payload.sg_cnt,
+		 job->request_payload.payload_len);
+
+	job->reply_len = 0;
+
+	switch (bsg_req->msgcode) {
+	case SPRAID_BSG_ADM:
+		ret = spraid_user_admin_cmd(hdev, job);
+		break;
+	case SPRAID_BSG_IOQ:
+		ret = spraid_user_ioq_cmd(hdev, job);
+		break;
+	default:
+		dev_info(hdev->dev, "[%s] unsupport msgcode[%d]\n",
+			 __func__, bsg_req->msgcode);
+		break;
+	}
+
+	if (ret > 0)
+		ret = ret | (ret << 8);
+
+	bsg_job_done(job, ret, 0);
+	return 0;
+}
+
+static inline void spraid_remove_bsg(struct spraid_dev *hdev)
+{
+	if (hdev->bsg_queue) {
+		bsg_unregister_queue(hdev->bsg_queue);
+		blk_cleanup_queue(hdev->bsg_queue);
+	}
+}
+static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct spraid_dev *hdev;
+	struct Scsi_Host *shost;
+	int node, ret;
+	char bsg_name[15];
+
+	shost = scsi_host_alloc(&spraid_driver_template, sizeof(*hdev));
+	if (!shost) {
+		dev_err(&pdev->dev, "Failed to allocate scsi host\n");
+		return -ENOMEM;
+	}
+	hdev = shost_priv(shost);
+	hdev->pdev = pdev;
+	hdev->dev = get_device(&pdev->dev);
+
+	node = dev_to_node(hdev->dev);
+	if (node == NUMA_NO_NODE) {
+		node = first_memory_node;
+		set_dev_node(hdev->dev, node);
+	}
+	hdev->numa_node = node;
+	hdev->shost = shost;
+	pci_set_drvdata(pdev, hdev);
+
+	ret = spraid_dev_map(hdev);
+	if (ret)
+		goto put_dev;
+
+	init_rwsem(&hdev->devices_rwsem);
+	INIT_WORK(&hdev->scan_work, spraid_scan_work);
+	INIT_WORK(&hdev->timesyn_work, spraid_timesyn_work);
+	INIT_WORK(&hdev->reset_work, spraid_reset_work);
+	INIT_WORK(&hdev->fw_act_work, spraid_fw_act_work);
+	spin_lock_init(&hdev->state_lock);
+
+	ret = spraid_alloc_resources(hdev);
+	if (ret)
+		goto dev_unmap;
+
+	ret = spraid_pci_enable(hdev);
+	if (ret)
+		goto resources_free;
+
+	ret = spraid_setup_admin_queue(hdev);
+	if (ret)
+		goto pci_disable;
+
+	ret = spraid_init_ctrl_info(hdev);
+	if (ret)
+		goto disable_admin_q;
+
+	ret = spraid_alloc_iod_ext_mem_pool(hdev);
+	if (ret)
+		goto disable_admin_q;
+
+	ret = spraid_setup_io_queues(hdev);
+	if (ret)
+		goto free_iod_mempool;
+
+	spraid_shost_init(hdev);
+
+	ret = scsi_add_host(hdev->shost, hdev->dev);
+	if (ret) {
+		dev_err(hdev->dev, "Add shost to system failed, ret: %d\n",
+			ret);
+		goto remove_io_queues;
+	}
+
+	snprintf(bsg_name, sizeof(bsg_name), "spraid%d", shost->host_no);
+	hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name,
+					  spraid_bsg_host_dispatch,
+					  spraid_cmd_size(hdev, true, false));
+	if (IS_ERR(hdev->bsg_queue)) {
+		dev_err(hdev->dev, "err, setup bsg failed\n");
+		hdev->bsg_queue = NULL;
+		goto remove_io_queues;
+	}
+
+	if (hdev->online_queues == SPRAID_ADMIN_QUEUE_NUM) {
+		dev_warn(hdev->dev, "warn only admin queue can be used\n");
+		return 0;
+	}
+
+	hdev->state = SPRAID_LIVE;
+
+	spraid_send_all_aen(hdev);
+
+	ret = spraid_dev_list_init(hdev);
+	if (ret)
+		goto remove_bsg;
+
+	ret = spraid_configure_timestamp(hdev);
+	if (ret)
+		dev_warn(hdev->dev, "init set timestamp failed\n");
+
+	ret = spraid_alloc_ioq_ptcmds(hdev);
+	if (ret)
+		goto remove_bsg;
+
+	scsi_scan_host(hdev->shost);
+
+	return 0;
+
+remove_bsg:
+	spraid_remove_bsg(hdev);
+remove_io_queues:
+	spraid_remove_io_queues(hdev);
+free_iod_mempool:
+	spraid_free_iod_ext_mem_pool(hdev);
+disable_admin_q:
+	spraid_disable_admin_queue(hdev, false);
+pci_disable:
+	spraid_pci_disable(hdev);
+resources_free:
+	spraid_free_resources(hdev);
+dev_unmap:
+	spraid_dev_unmap(hdev);
+put_dev:
+	put_device(hdev->dev);
+	scsi_host_put(shost);
+
+	return -ENODEV;
+}
+
+static void spraid_remove(struct pci_dev *pdev)
+{
+	struct spraid_dev *hdev = pci_get_drvdata(pdev);
+	struct Scsi_Host *shost = hdev->shost;
+
+	dev_info(hdev->dev, "enter spraid remove\n");
+
+	spraid_change_host_state(hdev, SPRAID_DELETING);
+	flush_work(&hdev->reset_work);
+
+	if (!pci_device_is_present(pdev))
+		spraid_back_all_io(hdev);
+
+	spraid_remove_bsg(hdev);
+	scsi_remove_host(shost);
+	spraid_free_ioq_ptcmds(hdev);
+	kfree(hdev->devices);
+	spraid_remove_io_queues(hdev);
+	spraid_free_iod_ext_mem_pool(hdev);
+	spraid_disable_admin_queue(hdev, false);
+	spraid_pci_disable(hdev);
+	spraid_free_resources(hdev);
+	spraid_dev_unmap(hdev);
+	put_device(hdev->dev);
+	scsi_host_put(shost);
+
+	dev_info(hdev->dev, "exit spraid remove\n");
+}
+
+static const struct pci_device_id spraid_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+		     SPRAID_SERVER_DEVICE_HBA_DID) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+		     SPRAID_SERVER_DEVICE_RAID_DID) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, spraid_id_table);
+
+static struct pci_driver spraid_driver = {
+	.name		= "spraid",
+	.id_table	= spraid_id_table,
+	.probe		= spraid_probe,
+	.remove		= spraid_remove,
+	.shutdown	= spraid_shutdown,
+	.err_handler	= &spraid_err_handler,
+};
+
+static int __init spraid_init(void)
+{
+	int ret;
+
+	spraid_wq = alloc_workqueue("spraid-wq", WQ_UNBOUND | WQ_MEM_RECLAIM |
+								WQ_SYSFS, 0);
+	if (!spraid_wq)
+		return -ENOMEM;
+
+	spraid_class = class_create(THIS_MODULE, "spraid");
+	if (IS_ERR(spraid_class)) {
+		ret = PTR_ERR(spraid_class);
+		goto destroy_wq;
+	}
+
+	ret = pci_register_driver(&spraid_driver);
+	if (ret < 0)
+		goto destroy_class;
+
+	return 0;
+
+destroy_class:
+	class_destroy(spraid_class);
+destroy_wq:
+	destroy_workqueue(spraid_wq);
+
+	return ret;
+}
+
+static void __exit spraid_exit(void)
+{
+	pci_unregister_driver(&spraid_driver);
+	class_destroy(spraid_class);
+	destroy_workqueue(spraid_wq);
+	ida_destroy(&spraid_instance_ida);
+}
+
+MODULE_AUTHOR("songyl(a)ramaxel.com");
+MODULE_DESCRIPTION("Ramaxel Memory Technology SPraid Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SPRAID_DRV_VERSION);
+module_init(spraid_init);
+module_exit(spraid_exit);
-- 
2.27.0
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            1