Kernel
  Threads by month 
                
            - ----- 2025 -----
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 20964 discussions
                    
                        Baokun Li (3):
  ext4: remove unnecessary check from alloc_flex_gd()
  ext4: unify the type of flexbg_size to unsigned int
  ext4: avoid online resizing failures due to oversized flex bg
 fs/ext4/resize.c | 37 ++++++++++++++++++++++---------------
 1 file changed, 22 insertions(+), 15 deletions(-)
-- 
2.39.2
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            12
                            
                          
                          
                            
    
                          
                        
                    
                    
                        Baokun Li (3):
  ext4: remove unnecessary check from alloc_flex_gd()
  ext4: unify the type of flexbg_size to unsigned int
  ext4: avoid online resizing failures due to oversized flex bg
 fs/ext4/resize.c | 37 ++++++++++++++++++++++---------------
 1 file changed, 22 insertions(+), 15 deletions(-)
-- 
2.39.2
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            13
                            
                          
                          
                            
    
                          
                        
                    
                    
                        Baokun Li (3):
  ext4: remove unnecessary check from alloc_flex_gd()
  ext4: unify the type of flexbg_size to unsigned int
  ext4: avoid online resizing failures due to oversized flex bg
 fs/ext4/resize.c | 37 ++++++++++++++++++++++---------------
 1 file changed, 22 insertions(+), 15 deletions(-)
-- 
2.39.2
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            13
                            
                          
                          
                            
    
                          
                        
                    
                    
                        Baokun Li (3):
  ext4: remove unnecessary check from alloc_flex_gd()
  ext4: unify the type of flexbg_size to unsigned int
  ext4: avoid online resizing failures due to oversized flex bg
 fs/ext4/resize.c | 37 ++++++++++++++++++++++---------------
 1 file changed, 22 insertions(+), 15 deletions(-)
-- 
2.39.2
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            12
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [openeuler:OLK-6.6] BUILD REGRESSION c8e916a3b26e752d8992dff4fa320353e1f0ec84
                        
                        
by kernel test robot 28 Mar '24
                    by kernel test robot 28 Mar '24
28 Mar '24
                    
                        tree/branch: https://gitee.com/openeuler/kernel.git OLK-6.6
branch HEAD: c8e916a3b26e752d8992dff4fa320353e1f0ec84  !5524 [OLK-6.6] fix 0day bugs reported by CI robot for Mont-TSSE
Error/Warning reports:
https://lore.kernel.org/oe-kbuild-all/202403281003.L0CYgKLz-lkp@intel.com
Error/Warning: (recently discovered and may have been fixed)
drivers/net/ethernet/huawei/hinic/hinic_api_cmd.c:413: warning: expecting prototype for prepare_cell(). Prototype was for wait_for_resp_polling() instead
drivers/net/ethernet/huawei/hinic/hinic_eqs.c:369: warning: expecting prototype for hinic_aeq_register_sw_cb(). Prototype was for hinic_aeq_register_swe_cb() instead
drivers/net/ethernet/huawei/hinic/hinic_eqs.c:391: warning: expecting prototype for hinic_aeq_unregister_sw_cb(). Prototype was for hinic_aeq_unregister_swe_cb() instead
drivers/net/ethernet/huawei/hinic/hinic_eqs.c:417: warning: expecting prototype for hinic_ceq_register_sw_cb(). Prototype was for hinic_ceq_register_cb() instead
drivers/net/ethernet/huawei/hinic/hinic_hwif.c:717: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst
drivers/net/ethernet/huawei/hinic/hinic_mbox.c:373: warning: expecting prototype for hinic_unregister_ppf_mbox_cb(). Prototype was for hinic_unregister_pf_mbox_cb() instead
drivers/net/ethernet/huawei/hinic/hinic_mbox.c:411: warning: expecting prototype for hinic_unregister_ppf_mbox_cb(). Prototype was for hinic_unregister_ppf_to_pf_mbox_cb() instead
drivers/net/ethernet/huawei/hinic/hinic_mgmt.c:939: warning: This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst
Error/Warning ids grouped by kconfigs:
gcc_recent_errors
`-- arm64-defconfig
    `-- drivers-irqchip-irq-mbigen.c:warning:expecting-prototype-for-Due-to-the-existence-of-hyper().-Prototype-was-for-GICR_LENGTH()-instead
clang_recent_errors
|-- arm64-allmodconfig
|   |-- drivers-i2c-busses-i2c-hisi.c:warning:expecting-prototype-for-i2c_dw_acpi_pin_mux_change().-Prototype-was-for-i2c_hisi_pin_mux_change()-instead
|   |-- drivers-irqchip-irq-mbigen.c:warning:expecting-prototype-for-Due-to-the-existence-of-hyper().-Prototype-was-for-GICR_LENGTH()-instead
|   |-- drivers-net-ethernet-huawei-hinic-hinic_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
|   |-- drivers-net-ethernet-huawei-hinic-hinic_cfg.c:warning:arithmetic-between-different-enumeration-types-(-enum-hinic_node_id-and-enum-hinic_fault_err_level-)
|   |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_register_sw_cb().-Prototype-was-for-hinic_aeq_register_swe_cb()-instead
|   |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_unregister_sw_cb().-Prototype-was-for-hinic_aeq_unregister_swe_cb()-instead
|   |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_ceq_register_sw_cb().-Prototype-was-for-hinic_ceq_register_cb()-instead
|   |-- drivers-net-ethernet-huawei-hinic-hinic_hwdev.c:warning:arithmetic-between-different-enumeration-types-(-enum-hinic_node_id-and-enum-hinic_fault_err_level-)
|   |-- drivers-net-ethernet-huawei-hinic-hinic_hwif.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
|   |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_pf_mbox_cb()-instead
|   |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_ppf_to_pf_mbox_cb()-instead
|   |-- drivers-net-ethernet-huawei-hinic-hinic_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
|   |-- drivers-net-ethernet-huawei-hinic-hinic_nic_dbg.c:warning:arithmetic-between-different-enumeration-types-(-enum-hinic_node_id-and-enum-hinic_fault_err_level-)
|   |-- drivers-net-ethernet-mucse-rnp-rnp_ethtool.c:warning:expecting-prototype-for-rnp_set_rxfh().-Prototype-was-for-rnp_set_rxnfc()-instead
|   |-- drivers-net-ethernet-mucse-rnp-rnp_ethtool.c:warning:expecting-prototype-for-rnp_tet_rxfh().-Prototype-was-for-rnp_get_rxfh()-instead
|   |-- drivers-net-ethernet-mucse-rnp-rnp_main.c:warning:expecting-prototype-for-rnp_write_eitr().-Prototype-was-for-rnp_write_eitr_rx()-instead
|   |-- drivers-net-ethernet-mucse-rnp-rnp_mbx_fw.c:warning:expecting-prototype-for-rnp_mbx_link_event_eanble().-Prototype-was-for-rnp_mbx_link_event_enable()-instead
|   |-- drivers-net-ethernet-mucse-rnp-rnp_mbx_fw.c:warning:expecting-prototype-for-rnp_mbx_stat_mark().-Prototype-was-for-rnp_link_stat_mark()-instead
|   |-- drivers-net-ethernet-mucse-rnp-rnp_mbx_fw.c:warning:no-previous-prototype-for-function-rnp_mbx_lldp_all_ports_enable
|   |-- drivers-net-ethernet-mucse-rnp-rnp_n10.c:warning:expecting-prototype-for-rnp_fc_mode_n10().-Prototype-was-for-rnp_mac_fc_mode_n10()-instead
|   |-- drivers-net-ethernet-mucse-rnp-rnp_n10.c:warning:expecting-prototype-for-rnp_init_hw_n10().-Prototype-was-for-rnp_init_hw_ops_n10()-instead
|   |-- drivers-net-ethernet-mucse-rnp-rnp_n10.c:warning:expecting-prototype-for-rnp_write_uc_addr_list().-Prototype-was-for-rnp_write_uc_addr_list_n10()-instead
|   |-- drivers-net-ethernet-mucse-rnp-rnp_n10.c:warning:expecting-prototype-for-rnpm_device_supports_autoneg_fc().-Prototype-was-for-rnp_device_supports_autoneg_fc()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_enable_rx_buff().-Prototype-was-for-rnpm_enable_rx_buff_generic()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_update_mc_addr_list_generic().-Prototype-was-for-rnpm_update_mutiport_mc_addr_list_generic()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_debugfs.c:warning:expecting-prototype-for-rnpm_dbg_reg_ops_write().-Prototype-was-for-rnpm_dbg_phy_ops_write()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_ethtool.c:warning:no-previous-prototype-for-function-rnpm_get_phy_statistics
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-ixgbe_write_eitr().-Prototype-was-for-rnpm_write_eitr()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_notify().-Prototype-was-for-rnpm_irq_affinity_notify()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_release().-Prototype-was-for-rnpm_irq_affinity_release()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_is_non_eop().-Prototype-was-for-rnpm_is_non_eop()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnpm_set_ivar().-Prototype-was-for-rnpm_set_ring_vector()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:Cannot-understand-speed:
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_n10.c:warning:expecting-prototype-for-rnpm_atr_add_signature_filter_n10().-Prototype-was-for-rnpm_fdir_add_signature_filter_n10()-instead
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_get_vf_ringnum
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_setup_ring_maxrate
|   |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:variable-y-set-but-not-used
|   `-- mm-mem_reliable.c:warning:arithmetic-between-different-enumeration-types-(-enum-node_stat_item-and-enum-lru_list-)
`-- x86_64-allyesconfig
    |-- drivers-i2c-busses-i2c-hisi.c:warning:expecting-prototype-for-i2c_dw_acpi_pin_mux_change().-Prototype-was-for-i2c_hisi_pin_mux_change()-instead
    |-- drivers-net-ethernet-huawei-hinic-hinic_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
    |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_register_sw_cb().-Prototype-was-for-hinic_aeq_register_swe_cb()-instead
    |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_unregister_sw_cb().-Prototype-was-for-hinic_aeq_unregister_swe_cb()-instead
    |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_ceq_register_sw_cb().-Prototype-was-for-hinic_ceq_register_cb()-instead
    |-- drivers-net-ethernet-huawei-hinic-hinic_hwif.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
    |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_pf_mbox_cb()-instead
    |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_ppf_to_pf_mbox_cb()-instead
    |-- drivers-net-ethernet-huawei-hinic-hinic_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
    |-- drivers-net-ethernet-mucse-rnp-rnp_ethtool.c:warning:expecting-prototype-for-rnp_set_rxfh().-Prototype-was-for-rnp_set_rxnfc()-instead
    |-- drivers-net-ethernet-mucse-rnp-rnp_ethtool.c:warning:expecting-prototype-for-rnp_tet_rxfh().-Prototype-was-for-rnp_get_rxfh()-instead
    |-- drivers-net-ethernet-mucse-rnp-rnp_main.c:warning:expecting-prototype-for-rnp_write_eitr().-Prototype-was-for-rnp_write_eitr_rx()-instead
    |-- drivers-net-ethernet-mucse-rnp-rnp_mbx_fw.c:warning:expecting-prototype-for-rnp_mbx_link_event_eanble().-Prototype-was-for-rnp_mbx_link_event_enable()-instead
    |-- drivers-net-ethernet-mucse-rnp-rnp_mbx_fw.c:warning:expecting-prototype-for-rnp_mbx_stat_mark().-Prototype-was-for-rnp_link_stat_mark()-instead
    |-- drivers-net-ethernet-mucse-rnp-rnp_mbx_fw.c:warning:no-previous-prototype-for-function-rnp_mbx_lldp_all_ports_enable
    |-- drivers-net-ethernet-mucse-rnp-rnp_n10.c:warning:expecting-prototype-for-rnp_fc_mode_n10().-Prototype-was-for-rnp_mac_fc_mode_n10()-instead
    |-- drivers-net-ethernet-mucse-rnp-rnp_n10.c:warning:expecting-prototype-for-rnp_init_hw_n10().-Prototype-was-for-rnp_init_hw_ops_n10()-instead
    |-- drivers-net-ethernet-mucse-rnp-rnp_n10.c:warning:expecting-prototype-for-rnp_write_uc_addr_list().-Prototype-was-for-rnp_write_uc_addr_list_n10()-instead
    |-- drivers-net-ethernet-mucse-rnp-rnp_n10.c:warning:expecting-prototype-for-rnpm_device_supports_autoneg_fc().-Prototype-was-for-rnp_device_supports_autoneg_fc()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_enable_rx_buff().-Prototype-was-for-rnpm_enable_rx_buff_generic()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_update_mc_addr_list_generic().-Prototype-was-for-rnpm_update_mutiport_mc_addr_list_generic()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_debugfs.c:warning:expecting-prototype-for-rnpm_dbg_reg_ops_write().-Prototype-was-for-rnpm_dbg_phy_ops_write()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_ethtool.c:warning:no-previous-prototype-for-function-rnpm_get_phy_statistics
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-ixgbe_write_eitr().-Prototype-was-for-rnpm_write_eitr()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_notify().-Prototype-was-for-rnpm_irq_affinity_notify()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_release().-Prototype-was-for-rnpm_irq_affinity_release()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_is_non_eop().-Prototype-was-for-rnpm_is_non_eop()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnpm_set_ivar().-Prototype-was-for-rnpm_set_ring_vector()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:Cannot-understand-speed:
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_n10.c:warning:expecting-prototype-for-rnpm_atr_add_signature_filter_n10().-Prototype-was-for-rnpm_fdir_add_signature_filter_n10()-instead
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_get_vf_ringnum
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_setup_ring_maxrate
    |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:variable-y-set-but-not-used
    |-- ld.lld:error:duplicate-symbol:__cfi_check_ari_mode
    |-- ld.lld:error:duplicate-symbol:__cfi_mbx_cookie_zalloc
    |-- ld.lld:error:duplicate-symbol:check_ari_mode
    |-- ld.lld:error:duplicate-symbol:mbx_cookie_zalloc
    `-- ld.lld:error:duplicate-symbol:rnp10_netdev_ops
elapsed time: 1057m
configs tested: 16
configs skipped: 142
tested configs:
arm64                            allmodconfig   clang
arm64                             allnoconfig   gcc  
arm64                               defconfig   gcc  
arm64                 randconfig-001-20240328   gcc  
arm64                 randconfig-002-20240328   gcc  
arm64                 randconfig-003-20240328   gcc  
arm64                 randconfig-004-20240328   gcc  
loongarch                        allmodconfig   gcc  
loongarch                         allnoconfig   gcc  
loongarch                           defconfig   gcc  
loongarch             randconfig-001-20240328   gcc  
loongarch             randconfig-002-20240328   gcc  
x86_64                            allnoconfig   clang
x86_64                           allyesconfig   clang
x86_64                              defconfig   gcc  
x86_64                          rhel-8.3-rust   clang
-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [PATCH openEuler-1.0-LTS] mm: ksm: fix use-after-free kasan report in ksm_might_need_to_copy
                        
                        
by Jinjiang Tu 28 Mar '24
                    by Jinjiang Tu 28 Mar '24
28 Mar '24
                    
                        From: Nanyong Sun <sunnanyong(a)huawei.com>
mainline inclusion
from mainline-v5.17-rc1
commit e1c63e110f977205ab9dfb38989c54e6e7b52a7b
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I9BWES
CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
When under the stress of swapping in/out with KSM enabled, there is a
low probability that kasan reports the BUG of use-after-free in
ksm_might_need_to_copy() when do swap in.  The freed object is the
anon_vma got from page_anon_vma(page).
It is because a swapcache page associated with one anon_vma now needed
for another anon_vma, but the page's original vma was unmapped and the
anon_vma was freed.  In this case the if condition below always return
false and then alloc a new page to copy.  Swapin process then use the
new page and can continue to run well, so this is harmless actually.
      } else if (anon_vma->root == vma->anon_vma->root &&
                 page->index == linear_page_index(vma, address)) {
This patch exchange the order of above two judgment statement to avoid
the kasan warning.  Let cpu run "page->index == linear_page_index(vma,
address)" firstly and return false basically to skip the read of
anon_vma->root which may trigger the kasan use-after-free warning:
    ==================================================================
    BUG: KASAN: use-after-free in ksm_might_need_to_copy+0x12e/0x5b0
    Read of size 8 at addr ffff88be9977dbd0 by task khugepaged/694
     CPU: 8 PID: 694 Comm: khugepaged Kdump: loaded Tainted: G OE - 4.18.0.x86_64
     Hardware name: 1288H V5/BC11SPSC0, BIOS 7.93 01/14/2021
    Call Trace:
     dump_stack+0xf1/0x19b
     print_address_description+0x70/0x360
     kasan_report+0x1b2/0x330
     ksm_might_need_to_copy+0x12e/0x5b0
     do_swap_page+0x452/0xe70
     __collapse_huge_page_swapin+0x24b/0x720
     khugepaged_scan_pmd+0xcae/0x1ff0
     khugepaged+0x8ee/0xd70
     kthread+0x1a2/0x1d0
     ret_from_fork+0x1f/0x40
    Allocated by task 2306153:
     kasan_kmalloc+0xa0/0xd0
     kmem_cache_alloc+0xc0/0x1c0
     anon_vma_clone+0xf7/0x380
     anon_vma_fork+0xc0/0x390
     copy_process+0x447b/0x4810
     _do_fork+0x118/0x620
     do_syscall_64+0x112/0x360
     entry_SYSCALL_64_after_hwframe+0x65/0xca
    Freed by task 2306242:
     __kasan_slab_free+0x130/0x180
     kmem_cache_free+0x78/0x1d0
     unlink_anon_vmas+0x19c/0x4a0
     free_pgtables+0x137/0x1b0
     exit_mmap+0x133/0x320
     mmput+0x15e/0x390
     do_exit+0x8c5/0x1210
     do_group_exit+0xb5/0x1b0
     __x64_sys_exit_group+0x21/0x30
     do_syscall_64+0x112/0x360
     entry_SYSCALL_64_after_hwframe+0x65/0xca
    The buggy address belongs to the object at ffff88be9977dba0
     which belongs to the cache anon_vma_chain of size 64
    The buggy address is located 48 bytes inside of
     64-byte region [ffff88be9977dba0, ffff88be9977dbe0)
    The buggy address belongs to the page:
    page:ffffea00fa65df40 count:1 mapcount:0 mapping:ffff888107717800 index:0x0
    flags: 0x17ffffc0000100(slab)
    ==================================================================
Link: https://lkml.kernel.org/r/20211202102940.1069634-1-sunnanyong@huawei.com
Signed-off-by: Nanyong Sun <sunnanyong(a)huawei.com>
Cc: Hugh Dickins <hughd(a)google.com>
Cc: Kefeng Wang <wangkefeng.wang(a)huawei.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org>
Signed-off-by: Jinjiang Tu <tujinjiang(a)huawei.com>
---
 mm/ksm.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/ksm.c b/mm/ksm.c
index b656fa77f92f..341e09c376e6 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2596,8 +2596,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
 			return page;	/* no need to copy it */
 	} else if (!anon_vma) {
 		return page;		/* no need to copy it */
-	} else if (anon_vma->root == vma->anon_vma->root &&
-		 page->index == linear_page_index(vma, address)) {
+	} else if (page->index == linear_page_index(vma, address) &&
+			anon_vma->root == vma->anon_vma->root) {
 		return page;		/* still no need to copy it */
 	}
 	if (!PageUptodate(page))
-- 
2.25.1
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            1
                            
                          
                          
                            
    
                          
                        
                     
                        
                    
                        
                            
                                
                            
                            [openeuler:OLK-5.10] BUILD SUCCESS WITH WARNING e9ed7f8615cefdeaa780b8a38f20f7740e30abbd
                        
                        
by kernel test robot 28 Mar '24
                    by kernel test robot 28 Mar '24
28 Mar '24
                    
                        tree/branch: https://gitee.com/openeuler/kernel.git OLK-5.10
branch HEAD: e9ed7f8615cefdeaa780b8a38f20f7740e30abbd  !5553  mm: ksm: fix use-after-free kasan report in ksm_might_need_to_copy
Warning: (recently discovered and may have been fixed)
arch/arm64/kernel/vdso-ilp32/../vdso/vgettimeofday.c:15:5: warning: no previous prototype for '__kernel_gettimeofday' [-Wmissing-prototypes]
arch/arm64/kernel/vdso-ilp32/../vdso/vgettimeofday.c:21:5: warning: no previous prototype for '__kernel_clock_getres' [-Wmissing-prototypes]
arch/arm64/kernel/vdso-ilp32/../vdso/vgettimeofday.c:9:5: warning: no previous prototype for '__kernel_clock_gettime' [-Wmissing-prototypes]
Warning ids grouped by kconfigs:
gcc_recent_errors
`-- arm64-randconfig-003-20240327
    |-- arch-arm64-kernel-vdso-ilp32-..-vdso-vgettimeofday.c:warning:no-previous-prototype-for-__kernel_clock_getres
    |-- arch-arm64-kernel-vdso-ilp32-..-vdso-vgettimeofday.c:warning:no-previous-prototype-for-__kernel_clock_gettime
    `-- arch-arm64-kernel-vdso-ilp32-..-vdso-vgettimeofday.c:warning:no-previous-prototype-for-__kernel_gettimeofday
clang_recent_errors
`-- x86_64-buildonly-randconfig-002-20240328
    `-- kernel-workqueue.o:warning:objtool:pwq_dec_nr_in_flight:unreachable-instruction
elapsed time: 1082m
configs tested: 26
configs skipped: 148
tested configs:
arm64                            allmodconfig   clang
arm64                             allnoconfig   gcc  
arm64                               defconfig   gcc  
arm64                 randconfig-001-20240328   gcc  
arm64                 randconfig-002-20240328   gcc  
arm64                 randconfig-003-20240328   gcc  
arm64                 randconfig-004-20240328   gcc  
x86_64                            allnoconfig   clang
x86_64                           allyesconfig   clang
x86_64       buildonly-randconfig-001-20240328   gcc  
x86_64       buildonly-randconfig-002-20240328   clang
x86_64       buildonly-randconfig-003-20240328   gcc  
x86_64       buildonly-randconfig-004-20240328   gcc  
x86_64       buildonly-randconfig-005-20240328   gcc  
x86_64       buildonly-randconfig-006-20240328   gcc  
x86_64                              defconfig   gcc  
x86_64                randconfig-001-20240328   clang
x86_64                randconfig-002-20240328   gcc  
x86_64                randconfig-003-20240328   clang
x86_64                randconfig-004-20240328   gcc  
x86_64                randconfig-005-20240328   clang
x86_64                randconfig-006-20240328   clang
x86_64                randconfig-011-20240328   clang
x86_64                randconfig-012-20240328   clang
x86_64                randconfig-013-20240328   gcc  
x86_64                          rhel-8.3-rust   clang
-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
                    
                  
                  
                          
                            
                            1
                            
                          
                          
                            
                            0
                            
                          
                          
                            
    
                          
                        
                     
                        
                    28 Mar '24
                    
                        mainline inclusion
from mainline-v5.13-rc4
commit 46a8b29c6306d8bbfd92b614ef65a47c900d8e70
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9BHNM
CVE: CVE-2021-47171
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Syzbot reported memory leak in smsc75xx_bind().
The problem was is non-freed memory in case of
errors after memory allocation.
backtrace:
  [<ffffffff84245b62>] kmalloc include/linux/slab.h:556 [inline]
  [<ffffffff84245b62>] kzalloc include/linux/slab.h:686 [inline]
  [<ffffffff84245b62>] smsc75xx_bind+0x7a/0x334 drivers/net/usb/smsc75xx.c:1460
  [<ffffffff82b5b2e6>] usbnet_probe+0x3b6/0xc30 drivers/net/usb/usbnet.c:1728
Fixes: d0cad871703b ("smsc75xx: SMSC LAN75xx USB gigabit ethernet adapter driver")
Cc: stable(a)kernel.vger.org
Reported-and-tested-by: syzbot+b558506ba8165425fee2(a)syzkaller.appspotmail.com
Signed-off-by: Pavel Skripkin <paskripkin(a)gmail.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Luo Gengkun <luogengkun2(a)huawei.com>
---
 drivers/net/usb/smsc75xx.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index ef8589e3a6d1..15cc5e4acaa5 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1497,7 +1497,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
 	ret = smsc75xx_wait_ready(dev, 0);
 	if (ret < 0) {
 		netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
-		return ret;
+		goto err;
 	}
 
 	smsc75xx_init_mac_address(dev);
@@ -1506,7 +1506,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
 	ret = smsc75xx_reset(dev);
 	if (ret < 0) {
 		netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
-		return ret;
+		goto err;
 	}
 
 	dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1516,6 +1516,10 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 	dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
 	return 0;
+
+err:
+	kfree(pdata);
+	return ret;
 }
 
 static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
-- 
2.34.1
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            2
                            
                          
                          
                            
    
                          
                        
                     
                        
                    28 Mar '24
                    
                        From: Pavel Skripkin <paskripkin(a)gmail.com>
mainline inclusion
from mainline-v5.13-rc4
commit 46a8b29c6306d8bbfd92b614ef65a47c900d8e70
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9BHNM
CVE: CVE-2021-47171
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Syzbot reported memory leak in smsc75xx_bind().
The problem was is non-freed memory in case of
errors after memory allocation.
backtrace:
  [<ffffffff84245b62>] kmalloc include/linux/slab.h:556 [inline]
  [<ffffffff84245b62>] kzalloc include/linux/slab.h:686 [inline]
  [<ffffffff84245b62>] smsc75xx_bind+0x7a/0x334 drivers/net/usb/smsc75xx.c:1460
  [<ffffffff82b5b2e6>] usbnet_probe+0x3b6/0xc30 drivers/net/usb/usbnet.c:1728
Fixes: d0cad871703b ("smsc75xx: SMSC LAN75xx USB gigabit ethernet adapter driver")
Cc: stable(a)kernel.vger.org
Reported-and-tested-by: syzbot+b558506ba8165425fee2(a)syzkaller.appspotmail.com
Signed-off-by: Pavel Skripkin <paskripkin(a)gmail.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Luo Gengkun <luogengkun2(a)huawei.com>
---
 drivers/net/usb/smsc75xx.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index ef8589e3a6d1..15cc5e4acaa5 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1497,7 +1497,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
 	ret = smsc75xx_wait_ready(dev, 0);
 	if (ret < 0) {
 		netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
-		return ret;
+		goto err;
 	}
 
 	smsc75xx_init_mac_address(dev);
@@ -1506,7 +1506,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
 	ret = smsc75xx_reset(dev);
 	if (ret < 0) {
 		netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
-		return ret;
+		goto err;
 	}
 
 	dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1516,6 +1516,10 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 	dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
 	return 0;
+
+err:
+	kfree(pdata);
+	return ret;
 }
 
 static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
-- 
2.34.1
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            3
                            
                          
                          
                            
    
                          
                        
                     
                        
                    27 Mar '24
                    
                        hisiraid inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9AWXA
CVE: NA
------------------------------------------
This commit is to support SPxxx RAID/HBA controllers.
RAID controllers support RAID 0/1/5/6/10/50/60 modes.
HBA controlllers support RAID 0/1/10 modes.
RAID/HBA support SAS/SATA HDD/SSD.
Signed-off-by: zhanglei <zhanglei48(a)huawei.com>
---
 Documentation/scsi/hisi_raid.rst       |   84 +
 MAINTAINERS                            |    7 +
 arch/arm64/configs/openeuler_defconfig |    1 +
 arch/x86/configs/openeuler_defconfig   |    1 +
 drivers/scsi/Kconfig                   |    1 +
 drivers/scsi/Makefile                  |    1 +
 drivers/scsi/hisi_raid/Kconfig         |   14 +
 drivers/scsi/hisi_raid/Makefile        |    7 +
 drivers/scsi/hisi_raid/hiraid.h        |  764 +++++
 drivers/scsi/hisi_raid/hiraid_main.c   | 4034 ++++++++++++++++++++++++
 10 files changed, 4914 insertions(+)
 create mode 100644 Documentation/scsi/hisi_raid.rst
 create mode 100644 drivers/scsi/hisi_raid/Kconfig
 create mode 100644 drivers/scsi/hisi_raid/Makefile
 create mode 100644 drivers/scsi/hisi_raid/hiraid.h
 create mode 100644 drivers/scsi/hisi_raid/hiraid_main.c
diff --git a/Documentation/scsi/hisi_raid.rst b/Documentation/scsi/hisi_raid.rst
new file mode 100644
index 000000000000..523a6763a7fd
--- /dev/null
+++ b/Documentation/scsi/hisi_raid.rst
@@ -0,0 +1,84 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================================
+hisi_raid - HUAWEI SCSI RAID Controller driver
+==============================================
+
+This file describes the hisi_raid SCSI driver for HUAWEI
+RAID controllers. The hisi_raid driver is the first
+generation RAID driver.
+
+For hisi_raid controller support, enable the hisi_raid driver
+when configuring the kernel.
+
+hisi_raid specific entries in /sys
+=================================
+
+hisi_raid host attributes
+------------------------
+  - /sys/class/scsi_host/host*/csts_pp
+  - /sys/class/scsi_host/host*/csts_shst
+  - /sys/class/scsi_host/host*/csts_cfs
+  - /sys/class/scsi_host/host*/csts_rdy
+  - /sys/class/scsi_host/host*/fw_version
+
+  The host csts_pp attribute is a read only attribute. This attribute
+  indicates whether the controller is processing commands. If this attribute
+  is set to ‘1’, then the controller is processing commands normally. If
+  this attribute is cleared to ‘0’, then the controller has temporarily stopped
+  processing commands in order to handle an event (e.g., firmware activation).
+
+  The host csts_shst attribute is a read only attribute. This attribute
+  indicates status of shutdown processing.The shutdown status values are defined
+  as:
+        ======     ==============================
+        Value      Definition
+        ======     ==============================
+        00b        Normal operation
+        01b        Shutdown processing occurring
+        10b        Shutdown processing complete
+        11b        Reserved
+        ======     ==============================
+  The host csts_cfs attribute is a read only attribute. This attribute is set to
+  ’1’ when a fatal controller error occurred that could not be communicated in the
+  appropriate Completion Queue. This bit is cleared to ‘0’ when a fatal controller
+  error has not occurred.
+
+  The host csts_rdy attribute is a read only attribute. This attribute is set to
+  ‘1’ when the controller is ready to process submission queue entries.
+
+  The fw_version attribute is read-only and will return the driver version and the
+  controller firmware version.
+
+hisi_raid scsi device attributes
+------------------------------
+  - /sys/class/scsi_device/X\:X\:X\:X/device/raid_level
+  - /sys/class/scsi_device/X\:X\:X\:X/device/raid_state
+  - /sys/class/scsi_device/X\:X\:X\:X/device/raid_resync
+
+  The device raid_level attribute is a read only attribute. This attribute indicates
+  RAID level of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+  The device raid_state attribute is read-only and indicates RAID status of scsi
+  device(will dispaly "NA" if scsi device is not virtual disk type).
+
+  The device raid_resync attribute is read-only and indicates RAID rebuild processing
+  of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+Supported devices
+=================
+
+        ===================     ======= =======================================
+        PCI ID (pci.ids)        OEM     Product
+        ===================     ======= =======================================
+        19E5:3858               HUAWEI 	SP186-M-8i(HBA:8Ports)
+        19E5:3858               HUAWEI 	SP186-M-16i(HBA:16Ports)
+        19E5:3858               HUAWEI 	SP186-M-32i(HBA:32Ports)
+        19E5:3858               HUAWEI 	SP186-M-40i(HBA:40Ports)
+        19E5:3758               HUAWEI 	SP686C-M-16i(RAID:16Ports,2G cache)
+        19E5:3758               HUAWEI 	SP686C-M-16i(RAID:16Ports,4G cache)
+        19E5:3758               HUAWEI 	SP686C-MH-32i(RAID:32Ports,4G cache)
+        19E5:3758               HUAWEI 	SP686C-M-40i(RAID:40Ports,2G cache)
+        19E5:3758               HUAWEI 	SP686C-M-40i(RAID:40Ports,4G cache)
+        ===================     ======= =======================================
+
diff --git a/MAINTAINERS b/MAINTAINERS
index a38244365f38..a327a4913519 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9531,6 +9531,13 @@ S:	Maintained
 F:	Documentation/ABI/testing/debugfs-hisi-zip
 F:	drivers/crypto/hisilicon/zip/
 
+HISI_RAID SCSI RAID DRIVERS
+M:	Zhang Lei <zhanglei48(a)huawei.com>
+L:	kernel(a)openeuler.org
+S:	Maintained
+F:	Documentation/scsi/hisi_raid.rst
+F:	drivers/scsi/hisi_raid/
+
 HMM - Heterogeneous Memory Management
 M:	Jérôme Glisse <jglisse(a)redhat.com>
 L:	linux-mm(a)kvack.org
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 00e8677d750b..51b5a3c213b4 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2538,6 +2538,7 @@ CONFIG_SCSI_MPT3SAS_MAX_SGE=128
 CONFIG_SCSI_MPT2SAS=m
 # CONFIG_SCSI_MPI3MR is not set
 CONFIG_SCSI_SMARTPQI=m
+CONFIG_SCSI_HISI_RAID=m
 # CONFIG_SCSI_HPTIOP is not set
 # CONFIG_SCSI_BUSLOGIC is not set
 # CONFIG_SCSI_MYRB is not set
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 3d7d62ffb0c1..c60390a7973e 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2525,6 +2525,7 @@ CONFIG_SCSI_MPT3SAS_MAX_SGE=128
 CONFIG_SCSI_MPT2SAS=m
 # CONFIG_SCSI_MPI3MR is not set
 CONFIG_SCSI_SMARTPQI=m
+CONFIG_SCSI_HISI_RAID=m
 # CONFIG_SCSI_HPTIOP is not set
 # CONFIG_SCSI_BUSLOGIC is not set
 # CONFIG_SCSI_MYRB is not set
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 23bce8995a55..57779fca308a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -490,6 +490,7 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/mpt3sas/Kconfig"
 source "drivers/scsi/mpi3mr/Kconfig"
 source "drivers/scsi/smartpqi/Kconfig"
+source "drivers/scsi/hisi_raid/Kconfig"
 
 config SCSI_HPTIOP
 	tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index f055bfd54a68..9eea59fa75a8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_MEGARAID_LEGACY)	+= megaraid.o
 obj-$(CONFIG_MEGARAID_NEWGEN)	+= megaraid/
 obj-$(CONFIG_MEGARAID_SAS)	+= megaraid/
 obj-$(CONFIG_SCSI_MPT3SAS)	+= mpt3sas/
+obj-$(CONFIG_SCSI_HISI_RAID)	+= hisi_raid/
 obj-$(CONFIG_SCSI_MPI3MR)	+= mpi3mr/
 obj-$(CONFIG_SCSI_ACARD)	+= atp870u.o
 obj-$(CONFIG_SCSI_SUNESP)	+= esp_scsi.o	sun_esp.o
diff --git a/drivers/scsi/hisi_raid/Kconfig b/drivers/scsi/hisi_raid/Kconfig
new file mode 100644
index 000000000000..05a060682193
--- /dev/null
+++ b/drivers/scsi/hisi_raid/Kconfig
@@ -0,0 +1,14 @@
+#
+# Kernel configuration file for the hisi_raid
+#
+
+config SCSI_HISI_RAID
+	tristate "Huawei Hisi_Raid Adapter"
+	depends on PCI && SCSI
+	select BLK_DEV_BSGLIB
+	depends on ARM64 || X86_64
+	help
+	This driver supports hisi_raid SPxx serial RAID controller, which has
+	PCI Express Gen4 interface with host and supports SAS/SATA HDD/SSD.
+	To compile this driver as a module, choose M here: the module will
+	be called hisi_raid.
diff --git a/drivers/scsi/hisi_raid/Makefile b/drivers/scsi/hisi_raid/Makefile
new file mode 100644
index 000000000000..b71a675f4190
--- /dev/null
+++ b/drivers/scsi/hisi_raid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the hisi_raid drivers.
+#
+
+obj-$(CONFIG_SCSI_HISI_RAID) += hiraid.o
+
+hiraid-objs := hiraid_main.o
diff --git a/drivers/scsi/hisi_raid/hiraid.h b/drivers/scsi/hisi_raid/hiraid.h
new file mode 100644
index 000000000000..a9d2b150aac1
--- /dev/null
+++ b/drivers/scsi/hisi_raid/hiraid.h
@@ -0,0 +1,764 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 Huawei Technologies Co., Ltd */
+
+#ifndef __HIRAID_H_
+#define __HIRAID_H_
+
+#define HIRAID_HDD_PD_QD 64
+#define HIRAID_HDD_VD_QD 256
+#define HIRAID_SSD_PD_QD 64
+#define HIRAID_SSD_VD_QD 256
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+#define HIRAID_DEFAULT_MAX_CHANNEL 4
+#define HIRAID_DEFAULT_MAX_ID 240
+#define HIRAID_DEFAULT_MAX_LUN_PER_HOST 8
+
+#define FUA_MASK 0x08
+
+#define HIRAID_IO_SQES 7
+#define HIRAID_IO_CQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define EXTRA_POOL_SIZE 256
+#define MAX_EXTRA_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 16
+
+#define HIRAID_AQ_DEPTH 128
+#define HIRAID_ASYN_COMMANDS 16
+#define HIRAID_AQ_BLK_MQ_DEPTH (HIRAID_AQ_DEPTH - HIRAID_ASYN_COMMANDS)
+#define HIRAID_AQ_MQ_TAG_DEPTH (HIRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define HIRAID_ADMIN_QUEUE_NUM 1
+#define HIRAID_PTHRU_CMDS_PERQ 1
+#define HIRAID_TOTAL_PTCMDS(qn) (HIRAID_PTHRU_CMDS_PERQ * (qn))
+
+#define HIRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define HIRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define HIRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define HIRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+#define HIRAID_DEV_DISK_TYPE(attr) ((attr) & 0x1e)
+
+#define HIRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define HIRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+#define HIRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define HIRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define HIRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define HIRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define HIRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define HIRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define IO_SQE_SIZE sizeof(struct hiraid_scsi_io_cmd)
+#define ADMIN_SQE_SIZE sizeof(struct hiraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct hiraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth)	((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define MAX_DEV_ENTRY_PER_PAGE_4K	340
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+#define PCI_VENDOR_ID_HUAWEI_LOGIC 0x19E5
+#define HIRAID_SERVER_DEVICE_HBA_DID	0x3858
+#define HIRAID_SERVER_DEVICE_HBAS_DID	0x3918
+#define HIRAID_SERVER_DEVICE_RAID_DID	0x3758
+#define HIRAID_SERVER_DEVICE_RAIDS_DID	0x38D8
+
+
+enum {
+	HIRAID_SC_SUCCESS = 0x0,
+	HIRAID_SC_INVALID_OPCODE = 0x1,
+	HIRAID_SC_INVALID_FIELD  = 0x2,
+
+	HIRAID_SC_ABORT_LIMIT = 0x103,
+	HIRAID_SC_ABORT_MISSING = 0x104,
+	HIRAID_SC_ASYNC_LIMIT = 0x105,
+
+	HIRAID_SC_DNR = 0x4000,
+};
+
+enum {
+	HIRAID_REG_CAP  = 0x0000,
+	HIRAID_REG_CC   = 0x0014,
+	HIRAID_REG_CSTS = 0x001c,
+	HIRAID_REG_AQA  = 0x0024,
+	HIRAID_REG_ASQ  = 0x0028,
+	HIRAID_REG_ACQ  = 0x0030,
+	HIRAID_REG_DBS  = 0x1000,
+};
+
+enum {
+	HIRAID_CC_ENABLE     = 1 << 0,
+	HIRAID_CC_CSS_NVM    = 0 << 4,
+	HIRAID_CC_MPS_SHIFT  = 7,
+	HIRAID_CC_AMS_SHIFT  = 11,
+	HIRAID_CC_SHN_SHIFT  = 14,
+	HIRAID_CC_IOSQES_SHIFT = 16,
+	HIRAID_CC_IOCQES_SHIFT = 20,
+	HIRAID_CC_AMS_RR       = 0 << HIRAID_CC_AMS_SHIFT,
+	HIRAID_CC_SHN_NONE     = 0 << HIRAID_CC_SHN_SHIFT,
+	HIRAID_CC_IOSQES       = HIRAID_IO_SQES << HIRAID_CC_IOSQES_SHIFT,
+	HIRAID_CC_IOCQES       = HIRAID_IO_CQES << HIRAID_CC_IOCQES_SHIFT,
+	HIRAID_CC_SHN_NORMAL   = 1 << HIRAID_CC_SHN_SHIFT,
+	HIRAID_CC_SHN_MASK     = 3 << HIRAID_CC_SHN_SHIFT,
+	HIRAID_CSTS_CFS_SHIFT  = 1,
+	HIRAID_CSTS_SHST_SHIFT = 2,
+	HIRAID_CSTS_PP_SHIFT   = 5,
+	HIRAID_CSTS_RDY	       = 1 << 0,
+	HIRAID_CSTS_SHST_CMPLT = 2 << 2,
+	HIRAID_CSTS_SHST_MASK  = 3 << 2,
+	HIRAID_CSTS_CFS_MASK   = 1 << HIRAID_CSTS_CFS_SHIFT,
+	HIRAID_CSTS_PP_MASK    = 1 << HIRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+	HIRAID_ADMIN_DELETE_SQ = 0x00,
+	HIRAID_ADMIN_CREATE_SQ = 0x01,
+	HIRAID_ADMIN_DELETE_CQ = 0x04,
+	HIRAID_ADMIN_CREATE_CQ = 0x05,
+	HIRAID_ADMIN_ABORT_CMD = 0x08,
+	HIRAID_ADMIN_SET_FEATURES = 0x09,
+	HIRAID_ADMIN_ASYNC_EVENT = 0x0c,
+	HIRAID_ADMIN_GET_INFO = 0xc6,
+	HIRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+	HIRAID_GET_CTRL_INFO = 0,
+	HIRAID_GET_DEVLIST_INFO = 1,
+};
+
+enum hiraid_rst_type {
+	HIRAID_RESET_TARGET = 0,
+	HIRAID_RESET_BUS = 1,
+};
+
+enum {
+	HIRAID_ASYN_EVENT_ERROR = 0,
+	HIRAID_ASYN_EVENT_NOTICE = 2,
+	HIRAID_ASYN_EVENT_VS = 7,
+};
+
+enum {
+	HIRAID_ASYN_DEV_CHANGED = 0x00,
+	HIRAID_ASYN_FW_ACT_START = 0x01,
+	HIRAID_ASYN_HOST_PROBING = 0x10,
+};
+
+enum {
+	HIRAID_ASYN_TIMESYN = 0x00,
+	HIRAID_ASYN_FW_ACT_FINISH = 0x02,
+	HIRAID_ASYN_EVENT_MIN = 0x80,
+	HIRAID_ASYN_EVENT_MAX = 0xff,
+};
+
+enum {
+	HIRAID_CMD_WRITE = 0x01,
+	HIRAID_CMD_READ = 0x02,
+
+	HIRAID_CMD_NONRW_NONE = 0x80,
+	HIRAID_CMD_NONRW_TODEV = 0x81,
+	HIRAID_CMD_NONRW_FROMDEV = 0x82,
+};
+
+enum {
+	HIRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+	HIRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+	HIRAID_FEATURE_NUM_QUEUES = 0x07,
+	HIRAID_FEATURE_ASYNC_EVENT = 0x0b,
+	HIRAID_FEATURE_TIMESTAMP = 0x0e,
+};
+
+enum hiraid_dev_state {
+	DEV_NEW,
+	DEV_LIVE,
+	DEV_RESETTING,
+	DEV_DELETING,
+	DEV_DEAD,
+};
+
+enum {
+	HIRAID_CARD_HBA,
+	HIRAID_CARD_RAID,
+};
+
+enum hiraid_cmd_type {
+	HIRAID_CMD_ADMIN,
+	HIRAID_CMD_PTHRU,
+};
+
+enum {
+	SQE_FLAG_SGL_METABUF = (1 << 6),
+	SQE_FLAG_SGL_METASEG = (1 << 7),
+	SQE_FLAG_SGL_ALL     = SQE_FLAG_SGL_METABUF | SQE_FLAG_SGL_METASEG,
+};
+
+enum hiraid_cmd_state {
+	CMD_IDLE = 0,
+	CMD_FLIGHT = 1,
+	CMD_COMPLETE = 2,
+	CMD_TIMEOUT = 3,
+	CMD_TMO_COMPLETE = 4,
+};
+
+enum {
+	HIRAID_BSG_ADMIN,
+	HIRAID_BSG_IOPTHRU,
+};
+
+enum {
+	HIRAID_SAS_HDD_VD  = 0x04,
+	HIRAID_SATA_HDD_VD = 0x08,
+	HIRAID_SAS_SSD_VD  = 0x0c,
+	HIRAID_SATA_SSD_VD = 0x10,
+	HIRAID_NVME_SSD_VD = 0x14,
+	HIRAID_SAS_HDD_PD  = 0x06,
+	HIRAID_SATA_HDD_PD = 0x0a,
+	HIRAID_SAS_SSD_PD  = 0x0e,
+	HIRAID_SATA_SSD_PD = 0x12,
+	HIRAID_NVME_SSD_PD = 0x16,
+};
+
+enum {
+	DISPATCH_BY_CPU,
+	DISPATCH_BY_DISK,
+};
+
+struct hiraid_completion {
+	__le32 result;
+	union {
+		struct {
+			__u8	sense_len;
+			__u8	resv[3];
+		};
+		__le32	result1;
+	};
+	__le16 sq_head;
+	__le16 sq_id;
+	__le16 cmd_id;
+	__le16 status;
+};
+
+struct hiraid_ctrl_info {
+	__le32 nd;
+	__le16 max_cmds;
+	__le16 max_channel;
+	__le32 max_tgt_id;
+	__le16 max_lun;
+	__le16 max_num_sge;
+	__le16 lun_num_boot;
+	__u8   mdts;
+	__u8   acl;
+	__u8   asynevent;
+	__u8   card_type;
+	__u8   pt_use_sgl;
+	__u8   rsvd;
+	__le32 rtd3e;
+	__u8   sn[32];
+	__u8   fw_version[16];
+	__u8   rsvd1[4020];
+};
+
+struct hiraid_dev {
+	struct pci_dev *pdev;
+	struct device *dev;
+	struct Scsi_Host *shost;
+	struct hiraid_queue *queues;
+	struct dma_pool *prp_page_pool;
+	struct dma_pool *prp_extra_pool[MAX_EXTRA_POOL_NUM];
+	void __iomem *bar;
+	u32 max_qid;
+	u32 num_vecs;
+	u32 queue_count;
+	u32 ioq_depth;
+	u32 db_stride;
+	u32 __iomem *dbs;
+	struct rw_semaphore dev_rwsem;
+	int numa_node;
+	u32 page_size;
+	u32 ctrl_config;
+	u32 online_queues;
+	u64 cap;
+	u32 scsi_qd;
+	u32 instance;
+	struct hiraid_ctrl_info *ctrl_info;
+	struct hiraid_dev_info *dev_info;
+
+	struct hiraid_cmd *adm_cmds;
+	struct list_head adm_cmd_list;
+	spinlock_t adm_cmd_lock;
+
+	struct hiraid_cmd *io_ptcmds;
+	struct list_head io_pt_list;
+	spinlock_t io_pt_lock;
+
+	struct work_struct scan_work;
+	struct work_struct timesyn_work;
+	struct work_struct reset_work;
+	struct work_struct fwact_work;
+
+	enum hiraid_dev_state state;
+	spinlock_t state_lock;
+
+	void *sense_buffer_virt;
+	dma_addr_t sense_buffer_phy;
+	u32 last_qcnt;
+	u8 hdd_dispatch;
+
+	struct request_queue *bsg_queue;
+};
+
+struct hiraid_sgl_desc {
+	__le64 addr;
+	__le32 length;
+	__u8   rsvd[3];
+	__u8   type;
+};
+
+union hiraid_data_ptr {
+	struct {
+		__le64 prp1;
+		__le64 prp2;
+	};
+	struct hiraid_sgl_desc sgl;
+};
+
+struct hiraid_admin_com_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__le32	hdid;
+	__le32	cdw2[4];
+	union hiraid_data_ptr	dptr;
+	__le32	cdw10;
+	__le32	cdw11;
+	__le32	cdw12;
+	__le32	cdw13;
+	__le32	cdw14;
+	__le32	cdw15;
+};
+
+struct hiraid_features {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__le32	hdid;
+	__u64	rsvd2[2];
+	union hiraid_data_ptr dptr;
+	__le32	fid;
+	__le32	dword11;
+	__le32	dword12;
+	__le32	dword13;
+	__le32	dword14;
+	__le32	dword15;
+};
+
+struct hiraid_create_cq {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__u32	rsvd1[5];
+	__le64	prp1;
+	__u64	rsvd8;
+	__le16	cqid;
+	__le16	qsize;
+	__le16	cq_flags;
+	__le16	irq_vector;
+	__u32	rsvd12[4];
+};
+
+struct hiraid_create_sq {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__u32	rsvd1[5];
+	__le64	prp1;
+	__u64	rsvd8;
+	__le16	sqid;
+	__le16	qsize;
+	__le16	sq_flags;
+	__le16	cqid;
+	__u32	rsvd12[4];
+};
+
+struct hiraid_delete_queue {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__u32	rsvd1[9];
+	__le16	qid;
+	__u16	rsvd10;
+	__u32	rsvd11[5];
+};
+
+struct hiraid_get_info {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__le32	hdid;
+	__u32	rsvd2[4];
+	union hiraid_data_ptr	dptr;
+	__u8	type;
+	__u8	rsvd10[3];
+	__le32	cdw11;
+	__u32	rsvd12[4];
+};
+
+struct hiraid_usr_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__le32	hdid;
+	union {
+		struct {
+			__le16 subopcode;
+			__le16 rsvd1;
+		} info_0;
+		__le32 cdw2;
+	};
+	union {
+		struct {
+			__le16 data_len;
+			__le16 param_len;
+		} info_1;
+		__le32 cdw3;
+	};
+	__u64 metadata;
+	union hiraid_data_ptr	dptr;
+	__le32 cdw10;
+	__le32 cdw11;
+	__le32 cdw12;
+	__le32 cdw13;
+	__le32 cdw14;
+	__le32 cdw15;
+};
+
+struct hiraid_abort_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__le32	hdid;
+	__u64	rsvd2[4];
+	__le16	sqid;
+	__le16	cid;
+	__u32	rsvd11[5];
+};
+
+struct hiraid_reset_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__le32	hdid;
+	__u64	rsvd2[4];
+	__u8	type;
+	__u8	rsvd10[3];
+	__u32	rsvd11[5];
+};
+
+struct hiraid_admin_command {
+	union {
+		struct hiraid_admin_com_cmd common;
+		struct hiraid_features features;
+		struct hiraid_create_cq create_cq;
+		struct hiraid_create_sq create_sq;
+		struct hiraid_delete_queue delete_queue;
+		struct hiraid_get_info get_info;
+		struct hiraid_abort_cmd abort;
+		struct hiraid_reset_cmd reset;
+		struct hiraid_usr_cmd usr_cmd;
+	};
+};
+
+struct hiraid_scsi_io_com_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__le32	hdid;
+	__le16	sense_len;
+	__u8	cdb_len;
+	__u8	rsvd2;
+	__le32	cdw3[3];
+	union hiraid_data_ptr	dptr;
+	__le32	cdw10[6];
+	__u8	cdb[32];
+	__le64	sense_addr;
+	__le32	cdw26[6];
+};
+
+struct hiraid_scsi_rw_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__le32	hdid;
+	__le16	sense_len;
+	__u8	cdb_len;
+	__u8	rsvd2;
+	__u32	rsvd3[3];
+	union hiraid_data_ptr	dptr;
+	__le64	slba;
+	__le16	nlb;
+	__le16	control;
+	__u32	rsvd13[3];
+	__u8	cdb[32];
+	__le64	sense_addr;
+	__u32	rsvd26[6];
+};
+
+struct hiraid_scsi_nonrw_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__le16	cmd_id;
+	__le32	hdid;
+	__le16	sense_len;
+	__u8	cdb_length;
+	__u8	rsvd2;
+	__u32	rsvd3[3];
+	union hiraid_data_ptr	dptr;
+	__u32	rsvd10[5];
+	__le32	buf_len;
+	__u8	cdb[32];
+	__le64	sense_addr;
+	__u32	rsvd26[6];
+};
+
+struct hiraid_scsi_io_cmd {
+	union {
+		struct hiraid_scsi_io_com_cmd common;
+		struct hiraid_scsi_rw_cmd rw;
+		struct hiraid_scsi_nonrw_cmd nonrw;
+	};
+};
+
+struct hiraid_passthru_common_cmd {
+	__u8	opcode;
+	__u8	flags;
+	__u16	rsvd0;
+	__u32	nsid;
+	union {
+		struct {
+			__u16 subopcode;
+			__u16 rsvd1;
+		} info_0;
+		__u32 cdw2;
+	};
+	union {
+		struct {
+			__u16 data_len;
+			__u16 param_len;
+		} info_1;
+		__u32 cdw3;
+	};
+	__u64 metadata;
+
+	__u64 addr;
+	__u64 prp2;
+
+	__u32 cdw10;
+	__u32 cdw11;
+	__u32 cdw12;
+	__u32 cdw13;
+	__u32 cdw14;
+	__u32 cdw15;
+	__u32 timeout_ms;
+	__u32 result0;
+	__u32 result1;
+};
+
+struct hiraid_passthru_io_cmd {
+	__u8  opcode;
+	__u8  flags;
+	__u16 rsvd0;
+	__u32 nsid;
+	union {
+		struct {
+			__u16 res_sense_len;
+			__u8  cdb_len;
+			__u8  rsvd0;
+		} info_0;
+		__u32 cdw2;
+	};
+	union {
+		struct {
+			__u16 subopcode;
+			__u16 rsvd1;
+		} info_1;
+		__u32 cdw3;
+	};
+	union {
+		struct {
+			__u16 rsvd;
+			__u16 param_len;
+		} info_2;
+		__u32 cdw4;
+	};
+	__u32 cdw5;
+	__u64 addr;
+	__u64 prp2;
+	union {
+		struct {
+			__u16 eid;
+			__u16 sid;
+		} info_3;
+		__u32 cdw10;
+	};
+	union {
+		struct {
+			__u16 did;
+			__u8  did_flag;
+			__u8  rsvd2;
+		} info_4;
+		__u32 cdw11;
+	};
+	__u32 cdw12;
+	__u32 cdw13;
+	__u32 cdw14;
+	__u32 data_len;
+	__u32 cdw16;
+	__u32 cdw17;
+	__u32 cdw18;
+	__u32 cdw19;
+	__u32 cdw20;
+	__u32 cdw21;
+	__u32 cdw22;
+	__u32 cdw23;
+	__u64 sense_addr;
+	__u32 cdw26[4];
+	__u32 timeout_ms;
+	__u32 result0;
+	__u32 result1;
+};
+
+struct hiraid_bsg_request {
+	u32  msgcode;
+	u32 control;
+	union {
+		struct hiraid_passthru_common_cmd admcmd;
+		struct hiraid_passthru_io_cmd   pthrucmd;
+	};
+};
+
+struct hiraid_cmd {
+	u16 qid;
+	u16 cid;
+	u32 result0;
+	u32 result1;
+	u16 status;
+	void *priv;
+	enum hiraid_cmd_state state;
+	struct completion cmd_done;
+	struct list_head list;
+};
+
+struct hiraid_queue {
+	struct hiraid_dev *hdev;
+	spinlock_t sq_lock;
+
+	spinlock_t cq_lock ____cacheline_aligned_in_smp;
+
+	void *sq_cmds;
+
+	struct hiraid_completion *cqes;
+
+	dma_addr_t sq_buffer_phy;
+	dma_addr_t cq_buffer_phy;
+	u32 __iomem *q_db;
+	u8 cq_phase;
+	u8 sqes;
+	u16 qid;
+	u16 sq_tail;
+	u16 cq_head;
+	u16 last_cq_head;
+	u16 q_depth;
+	s16 cq_vector;
+	atomic_t inflight;
+	void *sense_buffer_virt;
+	dma_addr_t sense_buffer_phy;
+	struct dma_pool *prp_small_pool;
+};
+
+struct hiraid_mapmange {
+	struct hiraid_queue *hiraidq;
+	enum hiraid_cmd_state state;
+	u16 cid;
+	int page_cnt;
+	u32 sge_cnt;
+	u32 len;
+	bool use_sgl;
+	u32 cdb_data_len;
+	dma_addr_t first_dma;
+	void *sense_buffer_virt;
+	dma_addr_t sense_buffer_phy;
+	struct scatterlist *sgl;
+	void *list[];
+};
+
+struct hiraid_vd_info {
+	__u8 name[32];
+	__le16 id;
+	__u8 rg_id;
+	__u8 rg_level;
+	__u8 sg_num;
+	__u8 sg_disk_num;
+	__u8 vd_status;
+	__u8 vd_type;
+	__u8 rsvd1[4056];
+};
+
+struct bgtask_info {
+	__u8 type;
+	__u8 progress;
+	__u8 rate;
+	__u8 rsvd0;
+	__le16 vd_id;
+	__le16 time_left;
+	__u8 rsvd1[4];
+};
+
+struct hiraid_bgtask {
+	__u8 sw;
+	__u8 task_num;
+	__u8 rsvd[6];
+	struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+struct hiraid_dev_info {
+	__le32	hdid;
+	__le16	target;
+	__u8	channel;
+	__u8	lun;
+	__u8	attr;
+	__u8	flag;
+	__le16	max_io_kb;
+};
+
+struct hiraid_dev_list {
+	__le32	dev_num;
+	__u32	rsvd0[3];
+	struct hiraid_dev_info	devinfo[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+struct hiraid_sdev_hostdata {
+	u32 hdid;
+	u16 max_io_kb;
+	u8 attr;
+	u8 flag;
+	u8 rg_id;
+	u8 hwq;
+	u16 pend_count;
+};
+
+#endif
+
diff --git a/drivers/scsi/hisi_raid/hiraid_main.c b/drivers/scsi/hisi_raid/hiraid_main.c
new file mode 100644
index 000000000000..f687c93ad917
--- /dev/null
+++ b/drivers/scsi/hisi_raid/hiraid_main.c
@@ -0,0 +1,4034 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Huawei Technologies Co., Ltd */
+
+/* Huawei Raid Series Linux Driver */
+
+#define pr_fmt(fmt) "hiraid: " fmt
+
+#include <linux/sched/signal.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/once.h>
+#include <linux/debugfs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
+#include <target/target_core_backend.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/sg.h>
+
+#include "hiraid.h"
+
+static u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_rawdisk = 180;
+module_param(scmd_tmout_rawdisk, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_rawdisk, "scsi commands timeout for rawdisk(seconds)");
+
+static u32 scmd_tmout_vd = 180;
+module_param(scmd_tmout_vd, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_vd, "scsi commands timeout for vd(seconds)");
+
+static bool max_io_force;
+module_param(max_io_force, bool, 0644);
+MODULE_PARM_DESC(max_io_force, "force max_hw_sectors_kb = 1024, default false(performance first)");
+
+static bool work_mode;
+module_param(work_mode, bool, 0444);
+MODULE_PARM_DESC(work_mode, "work mode switch, default false for multi hw queues");
+
+#define MAX_IO_QUEUES		128
+#define MIN_IO_QUEUES		1
+
+static int ioq_num_set(const char *val, const struct kernel_param *kp)
+{
+	int n = 0;
+	int ret;
+
+	ret = kstrtoint(val, 10, &n);
+	if (ret != 0 || n < MIN_IO_QUEUES || n > MAX_IO_QUEUES)
+		return -EINVAL;
+
+	return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops max_hwq_num_ops = {
+	.set = ioq_num_set,
+	.get = param_get_uint,
+};
+
+static u32 max_hwq_num = 128;
+module_param_cb(max_hwq_num, &max_hwq_num_ops, &max_hwq_num, 0444);
+MODULE_PARM_DESC(max_hwq_num, "max num of hw io queues, should >= 1, default 128");
+
+static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
+{
+	int n = 0;
+	int ret;
+
+	ret = kstrtoint(val, 10, &n);
+	if (ret != 0 || n < 2)
+		return -EINVAL;
+
+	return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops io_queue_depth_ops = {
+	.set = io_queue_depth_set,
+	.get = param_get_uint,
+};
+
+static u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static u32 log_debug_switch;
+module_param(log_debug_switch, uint, 0644);
+MODULE_PARM_DESC(log_debug_switch, "set log state, default zero for switch off");
+
+static int extra_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+	u8 n = 0;
+	int ret;
+
+	ret = kstrtou8(val, 10, &n);
+	if (ret != 0)
+		return -EINVAL;
+	if (n > MAX_EXTRA_POOL_NUM)
+		n = MAX_EXTRA_POOL_NUM;
+	if (n < 1)
+		n = 1;
+	*((u8 *)kp->arg) = n;
+
+	return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+	.set = extra_pool_num_set,
+	.get = param_get_byte,
+};
+
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+static unsigned char extra_pool_num = 4;
+module_param_cb(extra_pool_num, &small_pool_num_ops, &extra_pool_num, 0644);
+MODULE_PARM_DESC(extra_pool_num, "set prp extra pool num, default 4, MAX 16");
+
+static void hiraid_handle_async_notice(struct hiraid_dev *hdev, u32 result);
+static void hiraid_handle_async_vs(struct hiraid_dev *hdev, u32 result, u32 result1);
+
+static struct class *hiraid_class;
+
+#define HIRAID_CAP_TIMEOUT_UNIT_MS	(HZ / 2)
+
+static struct workqueue_struct *work_queue;
+
+#define dev_log_dbg(dev, fmt, ...)	do { \
+	if (unlikely(log_debug_switch))	\
+		dev_info(dev, "[%s] " fmt,	\
+			__func__, ##__VA_ARGS__);	\
+} while (0)
+
+#define HIRAID_DRV_VERSION	"1.1.0.0"
+
+#define ADMIN_TIMEOUT		(admin_tmout * HZ)
+#define USRCMD_TIMEOUT		(180 * HZ)
+#define CTL_RST_TIME		(600 * HZ)
+
+#define HIRAID_WAIT_ABNL_CMD_TIMEOUT	6
+#define HIRAID_WAIT_RST_IO_TIMEOUT		10
+
+#define HIRAID_DMA_MSK_BIT_MAX	64
+
+#define HIRAID_ALLOC_SIZE_MAX 8
+
+#define IOQ_PT_DATA_LEN		    4096
+#define IOQ_PT_SGL_DATA_LEN		(1024 * 1024)
+
+#define MAX_CAN_QUEUE		(4096 - 1)
+#define MIN_CAN_QUEUE		(1024 - 1)
+
+enum SENSE_STATE_CODE {
+	SENSE_STATE_OK = 0,
+	SENSE_STATE_NEED_CHECK,
+	SENSE_STATE_ERROR,
+	SENSE_STATE_EP_PCIE_ERROR,
+	SENSE_STATE_NAC_DMA_ERROR,
+	SENSE_STATE_ABORTED,
+	SENSE_STATE_NEED_RETRY
+};
+
+enum {
+	FW_EH_OK = 0,
+	FW_EH_DEV_NONE = 0x701
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60", "NA"};
+
+static const char * const raid_states[] = {
+	"NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED", "FORMATTING", "SANITIZING",
+	"INITIALIZING", "INITIALIZE_FAIL", "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int hiraid_remap_bar(struct hiraid_dev *hdev, u32 size)
+{
+	struct pci_dev *pdev = hdev->pdev;
+
+	if (size > pci_resource_len(pdev, 0)) {
+		dev_err(hdev->dev, "input size[%u] exceed bar0 length[%llu]\n",
+			size, pci_resource_len(pdev, 0));
+		return -ENOMEM;
+	}
+
+	if (hdev->bar)
+		iounmap(hdev->bar);
+
+	hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+	if (!hdev->bar) {
+		dev_err(hdev->dev, "ioremap for bar0 failed\n");
+		return -ENOMEM;
+	}
+	hdev->dbs = hdev->bar + HIRAID_REG_DBS;
+
+	return 0;
+}
+
+static int hiraid_dev_map(struct hiraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	int ret;
+
+	ret = pci_request_mem_regions(pdev, "hiraid");
+	if (ret) {
+		dev_err(hdev->dev, "fail to request memory regions\n");
+		return ret;
+	}
+
+	ret = hiraid_remap_bar(hdev, HIRAID_REG_DBS + 4096);
+	if (ret) {
+		pci_release_mem_regions(pdev);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void hiraid_dev_unmap(struct hiraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+
+	if (hdev->bar) {
+		iounmap(hdev->bar);
+		hdev->bar = NULL;
+	}
+	pci_release_mem_regions(pdev);
+}
+
+static int hiraid_pci_enable(struct hiraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	int ret = -ENOMEM;
+	u64 maskbit = HIRAID_DMA_MSK_BIT_MAX;
+
+	if (pci_enable_device_mem(pdev)) {
+		dev_err(hdev->dev, "enable pci device memory resources failed\n");
+		return ret;
+	}
+	pci_set_master(pdev);
+
+	if (readl(hdev->bar + HIRAID_REG_CSTS) == U32_MAX) {
+		ret = -ENODEV;
+		dev_err(hdev->dev, "read CSTS register failed\n");
+		goto disable;
+	}
+
+	hdev->cap = lo_hi_readq(hdev->bar + HIRAID_REG_CAP);
+	hdev->ioq_depth = min_t(u32, HIRAID_CAP_MQES(hdev->cap) + 1, io_queue_depth);
+	hdev->db_stride = 1 << HIRAID_CAP_STRIDE(hdev->cap);
+
+	maskbit = HIRAID_CAP_DMAMASK(hdev->cap);
+	if (maskbit < 32 || maskbit > HIRAID_DMA_MSK_BIT_MAX) {
+		dev_err(hdev->dev, "err, dma mask invalid[%llu], set to default\n", maskbit);
+		maskbit = HIRAID_DMA_MSK_BIT_MAX;
+	}
+
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+		if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+			dev_err(hdev->dev, "set dma mask[32] and coherent failed\n");
+			goto disable;
+		}
+		dev_info(hdev->dev, "set dma mask[32] success\n");
+	} else {
+		dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit);
+	}
+
+	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+	if (ret < 0) {
+		dev_err(hdev->dev, "allocate one IRQ for setup admin queue failed\n");
+		goto disable;
+	}
+
+	pci_save_state(pdev);
+
+	return 0;
+
+disable:
+	pci_disable_device(pdev);
+	return ret;
+}
+
+
+/*
+ * It is fact that first prp and last prp may be not full page.
+ * The size to count total nprps for the io equal to size + page_size,
+ * it may be a slightly overestimate.
+ *
+ * 8B per prp address. It may be there is one prp_list address per page,
+ * prp_list address does not count in io data prps. So divisor equal to
+ * PAGE_SIZE - 8, it may be a slightly overestimate.
+ */
+static int hiraid_prp_pagenum(struct hiraid_dev *hdev)
+{
+	u32 size = 1U << ((hdev->ctrl_info->mdts) * 1U) << 12;
+	u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+	return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, hdev->page_size - PRP_ENTRY_SIZE);
+}
+
+/*
+ * Calculates the number of pages needed for the SGL segments. For example a 4k
+ * page can accommodate 256 SGL descriptors.
+ */
+static int hiraid_sgl_pagenum(struct hiraid_dev *hdev)
+{
+	u32 nsge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+
+	return DIV_ROUND_UP(nsge * sizeof(struct hiraid_sgl_desc), hdev->page_size);
+}
+
+static inline void **hiraid_mapbuf_list(struct hiraid_mapmange *mapbuf)
+{
+	return mapbuf->list;
+}
+
+static u32 hiraid_get_max_cmd_size(struct hiraid_dev *hdev)
+{
+	u32 alloc_size = sizeof(__le64 *) * max(hiraid_prp_pagenum(hdev), hiraid_sgl_pagenum(hdev));
+
+	dev_info(hdev->dev, "mapbuf size[%lu], alloc_size[%u]\n",
+		 sizeof(struct hiraid_mapmange), alloc_size);
+
+	return sizeof(struct hiraid_mapmange) + alloc_size;
+}
+
+static int hiraid_build_passthru_prp(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf)
+{
+	struct scatterlist *sg = mapbuf->sgl;
+	__le64 *phy_regpage, *prior_list;
+	u64 buf_addr = sg_dma_address(sg);
+	int buf_length = sg_dma_len(sg);
+	u32 page_size = hdev->page_size;
+	int offset = buf_addr & (page_size - 1);
+	void **list = hiraid_mapbuf_list(mapbuf);
+	int maplen = mapbuf->len;
+	struct dma_pool *pool;
+	dma_addr_t buffer_phy;
+	int i;
+
+	maplen -= (page_size - offset);
+	if (maplen <= 0) {
+		mapbuf->first_dma = 0;
+		return 0;
+	}
+
+	buf_length -= (page_size - offset);
+	if (buf_length) {
+		buf_addr += (page_size - offset);
+	} else {
+		sg = sg_next(sg);
+		buf_addr = sg_dma_address(sg);
+		buf_length = sg_dma_len(sg);
+	}
+
+	if (maplen <= page_size) {
+		mapbuf->first_dma = buf_addr;
+		return 0;
+	}
+
+	pool = hdev->prp_page_pool;
+	mapbuf->page_cnt = 1;
+
+	phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+	if (!phy_regpage) {
+		dev_err_ratelimited(hdev->dev, "allocate first admin prp_list memory failed\n");
+		mapbuf->first_dma = buf_addr;
+		mapbuf->page_cnt = -1;
+		return -ENOMEM;
+	}
+	list[0] = phy_regpage;
+	mapbuf->first_dma = buffer_phy;
+	i = 0;
+	for (;;) {
+		if (i == page_size / PRP_ENTRY_SIZE) {
+			prior_list = phy_regpage;
+
+			phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+			if (!phy_regpage) {
+				dev_err_ratelimited(hdev->dev, "allocate [%d]th admin prp list memory failed\n",
+						mapbuf->page_cnt + 1);
+				return -ENOMEM;
+			}
+			list[mapbuf->page_cnt++] = phy_regpage;
+			phy_regpage[0] = prior_list[i - 1];
+			prior_list[i - 1] = cpu_to_le64(buffer_phy);
+			i = 1;
+		}
+		phy_regpage[i++] = cpu_to_le64(buf_addr);
+		buf_addr += page_size;
+		buf_length -= page_size;
+		maplen -= page_size;
+		if (maplen <= 0)
+			break;
+		if (buf_length > 0)
+			continue;
+		if (unlikely(buf_length < 0))
+			goto bad_admin_sgl;
+		sg = sg_next(sg);
+		buf_addr = sg_dma_address(sg);
+		buf_length = sg_dma_len(sg);
+	}
+
+	return 0;
+
+bad_admin_sgl:
+	dev_err(hdev->dev, "setup prps, invalid admin SGL for payload[%d] nents[%d]\n",
+		mapbuf->len, mapbuf->sge_cnt);
+	return -EIO;
+}
+
+static int hiraid_build_prp(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf)
+{
+	struct scatterlist *sg = mapbuf->sgl;
+	__le64 *phy_regpage, *prior_list;
+	u64 buf_addr = sg_dma_address(sg);
+	int buf_length = sg_dma_len(sg);
+	u32 page_size = hdev->page_size;
+	int offset = buf_addr & (page_size - 1);
+	void **list = hiraid_mapbuf_list(mapbuf);
+	int maplen = mapbuf->len;
+	struct dma_pool *pool;
+	dma_addr_t buffer_phy;
+	int nprps, i;
+
+	maplen -= (page_size - offset);
+	if (maplen <= 0) {
+		mapbuf->first_dma = 0;
+		return 0;
+	}
+
+	buf_length -= (page_size - offset);
+	if (buf_length) {
+		buf_addr += (page_size - offset);
+	} else {
+		sg = sg_next(sg);
+		buf_addr = sg_dma_address(sg);
+		buf_length = sg_dma_len(sg);
+	}
+
+	if (maplen <= page_size) {
+		mapbuf->first_dma = buf_addr;
+		return 0;
+	}
+
+	nprps = DIV_ROUND_UP(maplen, page_size);
+	if (nprps <= (EXTRA_POOL_SIZE / PRP_ENTRY_SIZE)) {
+		pool = mapbuf->hiraidq->prp_small_pool;
+		mapbuf->page_cnt = 0;
+	} else {
+		pool = hdev->prp_page_pool;
+		mapbuf->page_cnt = 1;
+	}
+
+	phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+	if (!phy_regpage) {
+		dev_err_ratelimited(hdev->dev, "allocate first prp_list memory failed\n");
+		mapbuf->first_dma = buf_addr;
+		mapbuf->page_cnt = -1;
+		return -ENOMEM;
+	}
+	list[0] = phy_regpage;
+	mapbuf->first_dma = buffer_phy;
+	i = 0;
+	for (;;) {
+		if (i == page_size / PRP_ENTRY_SIZE) {
+			prior_list = phy_regpage;
+
+			phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+			if (!phy_regpage) {
+				dev_err_ratelimited(hdev->dev, "allocate [%d]th prp list memory failed\n",
+						    mapbuf->page_cnt + 1);
+				return -ENOMEM;
+			}
+			list[mapbuf->page_cnt++] = phy_regpage;
+			phy_regpage[0] = prior_list[i - 1];
+			prior_list[i - 1] = cpu_to_le64(buffer_phy);
+			i = 1;
+		}
+		phy_regpage[i++] = cpu_to_le64(buf_addr);
+		buf_addr += page_size;
+		buf_length -= page_size;
+		maplen -= page_size;
+		if (maplen <= 0)
+			break;
+		if (buf_length > 0)
+			continue;
+		if (unlikely(buf_length < 0))
+			goto bad_sgl;
+		sg = sg_next(sg);
+		buf_addr = sg_dma_address(sg);
+		buf_length = sg_dma_len(sg);
+	}
+
+	return 0;
+
+bad_sgl:
+	dev_err(hdev->dev, "setup prps, invalid SGL for payload[%d] nents[%d]\n",
+		mapbuf->len, mapbuf->sge_cnt);
+	return -EIO;
+}
+
+#define SGES_PER_PAGE    (PAGE_SIZE / sizeof(struct hiraid_sgl_desc))
+
+static void hiraid_submit_cmd(struct hiraid_queue *hiraidq, const void *cmd)
+{
+	u32 sqes = SQE_SIZE(hiraidq->qid);
+	unsigned long flags;
+	struct hiraid_admin_com_cmd *acd = (struct hiraid_admin_com_cmd *)cmd;
+
+	spin_lock_irqsave(&hiraidq->sq_lock, flags);
+	memcpy((hiraidq->sq_cmds + sqes * hiraidq->sq_tail), cmd, sqes);
+	if (++hiraidq->sq_tail == hiraidq->q_depth)
+		hiraidq->sq_tail = 0;
+
+	writel(hiraidq->sq_tail, hiraidq->q_db);
+	spin_unlock_irqrestore(&hiraidq->sq_lock, flags);
+
+	dev_log_dbg(hiraidq->hdev->dev, "cid[%d] qid[%d] opcode[0x%x] flags[0x%x] hdid[%u]\n",
+		    le16_to_cpu(acd->cmd_id), hiraidq->qid, acd->opcode, acd->flags,
+		    le32_to_cpu(acd->hdid));
+}
+
+static inline bool hiraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+	switch (scmd->cmnd[0]) {
+	case READ_6:
+	case READ_10:
+	case READ_12:
+	case READ_16:
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_12:
+	case WRITE_16:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/*
+ * checks if prps can be built for the IO cmd
+ */
+static bool hiraid_is_prp(struct hiraid_dev *hdev, struct scatterlist *sgl, u32 nsge)
+{
+	struct scatterlist *sg = sgl;
+	u32 page_mask = hdev->page_size - 1;
+	bool is_prp = true;
+	u32 i = 0;
+
+	for_each_sg(sgl, sg, nsge, i) {
+		/*
+		 * Data length of the middle sge multiple of page_size,
+		 * address page_size aligned.
+		 */
+		if (i != 0 && i != nsge - 1) {
+			if ((sg_dma_len(sg) & page_mask) ||
+			    (sg_dma_address(sg) & page_mask)) {
+				is_prp = false;
+				break;
+			}
+		}
+
+		/*
+		 * The first sge addr plus the data length meets
+		 * the page_size alignment.
+		 */
+		if (nsge > 1 && i == 0) {
+			if ((sg_dma_address(sg) + sg_dma_len(sg)) & page_mask) {
+				is_prp = false;
+				break;
+			}
+		}
+
+		/* The last sge addr meets the page_size alignment. */
+		if (nsge > 1 && i == (nsge - 1)) {
+			if (sg_dma_address(sg) & page_mask) {
+				is_prp = false;
+				break;
+			}
+		}
+	}
+
+	return is_prp;
+}
+
+enum {
+	HIRAID_SGL_FMT_DATA_DESC     = 0x00,
+	HIRAID_SGL_FMT_SEG_DESC      = 0x02,
+	HIRAID_SGL_FMT_LAST_SEG_DESC    = 0x03,
+	HIRAID_KEY_SGL_FMT_DATA_DESC    = 0x04,
+	HIRAID_TRANSPORT_SGL_DATA_DESC  = 0x05
+};
+
+static void hiraid_sgl_set_data(struct hiraid_sgl_desc *sge, struct scatterlist *sg)
+{
+	sge->addr = cpu_to_le64(sg_dma_address(sg));
+	sge->length = cpu_to_le32(sg_dma_len(sg));
+	sge->type = HIRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void hiraid_sgl_set_seg(struct hiraid_sgl_desc *sge, dma_addr_t buffer_phy, int entries)
+{
+	sge->addr = cpu_to_le64(buffer_phy);
+	if (entries <= SGES_PER_PAGE) {
+		sge->length = cpu_to_le32(entries * sizeof(*sge));
+		sge->type = HIRAID_SGL_FMT_LAST_SEG_DESC << 4;
+	} else {
+		sge->length = cpu_to_le32(PAGE_SIZE);
+		sge->type = HIRAID_SGL_FMT_SEG_DESC << 4;
+	}
+}
+
+static int hiraid_build_passthru_sgl(struct hiraid_dev *hdev,
+					struct hiraid_admin_command *admin_cmd,
+					struct hiraid_mapmange *mapbuf)
+{
+	struct hiraid_sgl_desc *sg_list, *link, *old_sg_list;
+	struct scatterlist *sg = mapbuf->sgl;
+	void **list = hiraid_mapbuf_list(mapbuf);
+	struct dma_pool *pool;
+	int nsge = mapbuf->sge_cnt;
+	dma_addr_t buffer_phy;
+	int i = 0;
+
+	admin_cmd->common.flags |= SQE_FLAG_SGL_METABUF;
+
+	if (nsge == 1) {
+		hiraid_sgl_set_data(&admin_cmd->common.dptr.sgl, sg);
+		return 0;
+	}
+
+	pool = hdev->prp_page_pool;
+	mapbuf->page_cnt = 1;
+
+	sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+	if (!sg_list) {
+		dev_err_ratelimited(hdev->dev, "allocate first admin sgl_list failed\n");
+		mapbuf->page_cnt = -1;
+		return -ENOMEM;
+	}
+
+	list[0] = sg_list;
+	mapbuf->first_dma = buffer_phy;
+	hiraid_sgl_set_seg(&admin_cmd->common.dptr.sgl, buffer_phy, nsge);
+	do {
+		if (i == SGES_PER_PAGE) {
+			old_sg_list = sg_list;
+			link = &old_sg_list[SGES_PER_PAGE - 1];
+
+			sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+			if (!sg_list) {
+				dev_err_ratelimited(hdev->dev, "allocate [%d]th admin sgl_list failed\n",
+						    mapbuf->page_cnt + 1);
+				return -ENOMEM;
+			}
+			list[mapbuf->page_cnt++] = sg_list;
+
+			i = 0;
+			memcpy(&sg_list[i++], link, sizeof(*link));
+			hiraid_sgl_set_seg(link, buffer_phy, nsge);
+		}
+
+		hiraid_sgl_set_data(&sg_list[i++], sg);
+		sg = sg_next(sg);
+	} while (--nsge > 0);
+
+	return 0;
+}
+
+
+static int hiraid_build_sgl(struct hiraid_dev *hdev, struct hiraid_scsi_io_cmd *io_cmd,
+				struct hiraid_mapmange *mapbuf)
+{
+	struct hiraid_sgl_desc *sg_list, *link, *old_sg_list;
+	struct scatterlist *sg = mapbuf->sgl;
+	void **list = hiraid_mapbuf_list(mapbuf);
+	struct dma_pool *pool;
+	int nsge = mapbuf->sge_cnt;
+	dma_addr_t buffer_phy;
+	int i = 0;
+
+	io_cmd->common.flags |= SQE_FLAG_SGL_METABUF;
+
+	if (nsge == 1) {
+		hiraid_sgl_set_data(&io_cmd->common.dptr.sgl, sg);
+		return 0;
+	}
+
+	if (nsge <= (EXTRA_POOL_SIZE / sizeof(struct hiraid_sgl_desc))) {
+		pool = mapbuf->hiraidq->prp_small_pool;
+		mapbuf->page_cnt = 0;
+	} else {
+		pool = hdev->prp_page_pool;
+		mapbuf->page_cnt = 1;
+	}
+
+	sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+	if (!sg_list) {
+		dev_err_ratelimited(hdev->dev, "allocate first sgl_list failed\n");
+		mapbuf->page_cnt = -1;
+		return -ENOMEM;
+	}
+
+	list[0] = sg_list;
+	mapbuf->first_dma = buffer_phy;
+	hiraid_sgl_set_seg(&io_cmd->common.dptr.sgl, buffer_phy, nsge);
+	do {
+		if (i == SGES_PER_PAGE) {
+			old_sg_list = sg_list;
+			link = &old_sg_list[SGES_PER_PAGE - 1];
+
+			sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+			if (!sg_list) {
+				dev_err_ratelimited(hdev->dev, "allocate [%d]th sgl_list failed\n",
+						mapbuf->page_cnt + 1);
+				return -ENOMEM;
+			}
+			list[mapbuf->page_cnt++] = sg_list;
+
+			i = 0;
+			memcpy(&sg_list[i++], link, sizeof(*link));
+			hiraid_sgl_set_seg(link, buffer_phy, nsge);
+		}
+
+		hiraid_sgl_set_data(&sg_list[i++], sg);
+		sg = sg_next(sg);
+	} while (--nsge > 0);
+
+	return 0;
+}
+
+#define HIRAID_RW_FUA	BIT(14)
+#define RW_LENGTH_ZERO	(67)
+
+static int hiraid_setup_rw_cmd(struct hiraid_dev *hdev,
+				struct hiraid_scsi_rw_cmd *io_cmd,
+				struct scsi_cmnd *scmd,
+				struct hiraid_mapmange *mapbuf)
+{
+	u32 ret = 0;
+	u32 start_lba_lo, start_lba_hi;
+	u32 datalength = 0;
+	u16 control = 0;
+	struct scsi_device *sdev = scmd->device;
+	u32 buf_len = cpu_to_le32(scsi_bufflen(scmd));
+
+	start_lba_lo = 0;
+	start_lba_hi = 0;
+
+	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+		io_cmd->opcode = HIRAID_CMD_WRITE;
+	} else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+		io_cmd->opcode = HIRAID_CMD_READ;
+	} else if (scmd->sc_data_direction == DMA_NONE) {
+		ret = RW_LENGTH_ZERO;
+	} else {
+		dev_err(hdev->dev, "invalid RW_IO for unsupported data direction[%d]\n",
+			scmd->sc_data_direction);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (ret == RW_LENGTH_ZERO)
+		return ret;
+
+	/* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+	if (scmd->cmd_len == 6) {
+		datalength = (u32)(scmd->cmnd[4] == 0 ?
+				IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+		start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+		start_lba_lo &= 0x1FFFFF;
+	}
+
+	/* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+	else if (scmd->cmd_len == 10) {
+		datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+		start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+		if (scmd->cmnd[1] & FUA_MASK)
+			control |= HIRAID_RW_FUA;
+	}
+
+	/* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+	else if (scmd->cmd_len == 12) {
+		datalength = get_unaligned_be32(&scmd->cmnd[6]);
+		start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+		if (scmd->cmnd[1] & FUA_MASK)
+			control |= HIRAID_RW_FUA;
+	}
+	/* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+	else if (scmd->cmd_len == 16) {
+		datalength = get_unaligned_be32(&scmd->cmnd[10]);
+		start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+		start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+		if (scmd->cmnd[1] & FUA_MASK)
+			control |= HIRAID_RW_FUA;
+	}
+
+	if (unlikely(datalength > U16_MAX)) {
+		dev_err(hdev->dev, "invalid IO for illegal transfer data length[%u]\n", datalength);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (unlikely(datalength == 0))
+		return RW_LENGTH_ZERO;
+
+	io_cmd->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+	/* 0base for nlb */
+	io_cmd->nlb = cpu_to_le16((u16)(datalength - 1));
+	io_cmd->control = cpu_to_le16(control);
+
+	mapbuf->cdb_data_len = (u32)((io_cmd->nlb + 1) * sdev->sector_size);
+	if (mapbuf->cdb_data_len > buf_len) {
+		/* return DID_ERROR */
+		dev_err(hdev->dev, "error: buf len[0x%x] is smaller than actual length[0x%x] sectorsize[0x%x]\n",
+			buf_len, mapbuf->cdb_data_len, sdev->sector_size);
+		return -EINVAL;
+	} else if (mapbuf->cdb_data_len < buf_len) {
+		dev_warn(hdev->dev, "warn: buf_len[0x%x] cdb_data_len[0x%x] nlb[0x%x] sectorsize[0x%x]\n",
+			buf_len, mapbuf->cdb_data_len, io_cmd->nlb, sdev->sector_size);
+	}
+	return 0;
+}
+
+static int hiraid_setup_nonrw_cmd(struct hiraid_dev *hdev,
+				struct hiraid_scsi_nonrw_cmd *io_cmd, struct scsi_cmnd *scmd)
+{
+	io_cmd->buf_len = cpu_to_le32(scsi_bufflen(scmd));
+
+	switch (scmd->sc_data_direction) {
+	case DMA_NONE:
+		io_cmd->opcode = HIRAID_CMD_NONRW_NONE;
+		break;
+	case DMA_TO_DEVICE:
+		io_cmd->opcode = HIRAID_CMD_NONRW_TODEV;
+		break;
+	case DMA_FROM_DEVICE:
+		io_cmd->opcode = HIRAID_CMD_NONRW_FROMDEV;
+		break;
+	default:
+		dev_err(hdev->dev, "invalid NON_IO for unsupported data direction[%d]\n",
+			scmd->sc_data_direction);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int hiraid_setup_io_cmd(struct hiraid_dev *hdev,
+				struct hiraid_scsi_io_cmd *io_cmd, struct scsi_cmnd *scmd,
+				struct hiraid_mapmange *mapbuf)
+{
+	memcpy(io_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+	io_cmd->common.cdb_len = scmd->cmd_len;
+
+	/* init cdb_data_len */
+	mapbuf->cdb_data_len = cpu_to_le32(scsi_bufflen(scmd));
+
+	if (hiraid_is_rw_scmd(scmd))
+		return hiraid_setup_rw_cmd(hdev, &io_cmd->rw, scmd, mapbuf);
+	else
+		return hiraid_setup_nonrw_cmd(hdev, &io_cmd->nonrw, scmd);
+}
+
+static inline void hiraid_init_mapbuff(struct hiraid_mapmange *mapbuf)
+{
+	mapbuf->sge_cnt = 0;
+	mapbuf->page_cnt = -1;
+	mapbuf->use_sgl = false;
+	WRITE_ONCE(mapbuf->state, CMD_IDLE);
+}
+
+static void hiraid_free_mapbuf(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf)
+{
+	const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+	dma_addr_t buffer_phy, next_buffer_phy;
+	struct hiraid_sgl_desc *sg_list;
+	__le64 *prp_list;
+	void *addr;
+	int i;
+
+	buffer_phy = mapbuf->first_dma;
+	if (mapbuf->page_cnt == 0)
+		dma_pool_free(mapbuf->hiraidq->prp_small_pool,
+			hiraid_mapbuf_list(mapbuf)[0], buffer_phy);
+
+	for (i = 0; i < mapbuf->page_cnt; i++) {
+		addr = hiraid_mapbuf_list(mapbuf)[i];
+
+		if (mapbuf->use_sgl) {
+			sg_list = addr;
+			next_buffer_phy =
+				le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+		} else {
+			prp_list = addr;
+			next_buffer_phy = le64_to_cpu(prp_list[last_prp]);
+		}
+
+		dma_pool_free(hdev->prp_page_pool, addr, buffer_phy);
+		buffer_phy = next_buffer_phy;
+	}
+
+	mapbuf->sense_buffer_virt = NULL;
+	mapbuf->page_cnt = -1;
+}
+
+static int hiraid_io_map_data(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf,
+				struct scsi_cmnd *scmd, struct hiraid_scsi_io_cmd *io_cmd)
+{
+	int ret;
+
+	ret = scsi_dma_map(scmd);
+	if (unlikely(ret < 0))
+		return ret;
+	mapbuf->sge_cnt = ret;
+
+	/* No data to DMA, it may be scsi no-rw command */
+	if (unlikely(mapbuf->sge_cnt == 0))
+		return 0;
+
+	mapbuf->len = scsi_bufflen(scmd);
+	mapbuf->sgl = scsi_sglist(scmd);
+	mapbuf->use_sgl = !hiraid_is_prp(hdev, mapbuf->sgl, mapbuf->sge_cnt);
+
+	if (mapbuf->use_sgl) {
+		ret = hiraid_build_sgl(hdev, io_cmd, mapbuf);
+	} else {
+		ret = hiraid_build_prp(hdev, mapbuf);
+		io_cmd->common.dptr.prp1 =
+				cpu_to_le64(sg_dma_address(mapbuf->sgl));
+		io_cmd->common.dptr.prp2 = cpu_to_le64(mapbuf->first_dma);
+	}
+
+	if (ret)
+		scsi_dma_unmap(scmd);
+
+	return ret;
+}
+
+static void hiraid_check_status(struct hiraid_mapmange *mapbuf, struct scsi_cmnd *scmd,
+				struct hiraid_completion *cqe)
+{
+	u32 datalength = cpu_to_le32(scsi_bufflen(scmd));
+
+	if (datalength > mapbuf->cdb_data_len)
+		scsi_set_resid(scmd, datalength - mapbuf->cdb_data_len);
+	else
+		scsi_set_resid(scmd, 0);
+
+	switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+	case SENSE_STATE_OK:
+		set_host_byte(scmd, DID_OK);
+		break;
+	case SENSE_STATE_NEED_CHECK:
+		set_host_byte(scmd, DID_OK);
+		scmd->result |= le16_to_cpu(cqe->status) >> 8;
+		if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+			memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+			memcpy(scmd->sense_buffer,
+				mapbuf->sense_buffer_virt, SCSI_SENSE_BUFFERSIZE);
+			scmd->result = (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+		}
+		break;
+	case SENSE_STATE_ABORTED:
+		set_host_byte(scmd, DID_ABORT);
+		break;
+	case SENSE_STATE_NEED_RETRY:
+		set_host_byte(scmd, DID_REQUEUE);
+		break;
+	default:
+		set_host_byte(scmd, DID_BAD_TARGET);
+		dev_warn_ratelimited(mapbuf->hiraidq->hdev->dev, "cid[%d] qid[%d] sdev[%d:%d] opcode[%.2x] bad status[0x%x]\n",
+			le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id), scmd->device->channel,
+			scmd->device->id, scmd->cmnd[0], le16_to_cpu(cqe->status));
+		break;
+	}
+}
+
+static inline void hiraid_query_scmd_tag(struct scsi_cmnd *scmd, u16 *qid, u16 *cid,
+				struct hiraid_dev *hdev, struct hiraid_sdev_hostdata *hostdata)
+{
+	u32 tag = blk_mq_unique_tag(blk_mq_rq_from_pdu((void *)scmd));
+
+	if (work_mode) {
+		if ((hdev->hdd_dispatch == DISPATCH_BY_DISK) && (hostdata->hwq != 0))
+			*qid = hostdata->hwq;
+		else
+			*qid = raw_smp_processor_id() % (hdev->online_queues - 1) + 1;
+	} else {
+		*qid = blk_mq_unique_tag_to_hwq(tag) + 1;
+	}
+	*cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static int hiraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+	struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+	struct hiraid_dev *hdev = shost_priv(shost);
+	struct scsi_device *sdev = scmd->device;
+	struct hiraid_sdev_hostdata *hostdata;
+	struct hiraid_scsi_io_cmd io_cmd;
+	struct hiraid_queue *ioq;
+	u16 hwq, cid;
+	int ret;
+
+	if (unlikely(hdev->state == DEV_RESETTING))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	if (unlikely(hdev->state != DEV_LIVE)) {
+		set_host_byte(scmd, DID_NO_CONNECT);
+		scsi_done(scmd);
+		return 0;
+	}
+
+	if (log_debug_switch)
+		scsi_print_command(scmd);
+
+	hostdata = sdev->hostdata;
+	hiraid_query_scmd_tag(scmd, &hwq, &cid, hdev, hostdata);
+	ioq = &hdev->queues[hwq];
+
+	if (unlikely(atomic_inc_return(&ioq->inflight) >
+		(hdev->ioq_depth - HIRAID_PTHRU_CMDS_PERQ))) {
+		atomic_dec(&ioq->inflight);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	memset(&io_cmd, 0, sizeof(io_cmd));
+	io_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+	io_cmd.rw.cmd_id = cpu_to_le16(cid);
+
+	ret = hiraid_setup_io_cmd(hdev, &io_cmd, scmd, mapbuf);
+	if (unlikely(ret)) {
+		if (ret == RW_LENGTH_ZERO) {
+			scsi_set_resid(scmd, scsi_bufflen(scmd));
+			set_host_byte(scmd, DID_OK);
+		} else {
+			set_host_byte(scmd, DID_ERROR);
+		}
+		scsi_done(scmd);
+		atomic_dec(&ioq->inflight);
+		return 0;
+	}
+
+	ret = cid * SCSI_SENSE_BUFFERSIZE;
+	if (work_mode) {
+		mapbuf->sense_buffer_virt = hdev->sense_buffer_virt + ret;
+		mapbuf->sense_buffer_phy = hdev->sense_buffer_phy + ret;
+	} else {
+		mapbuf->sense_buffer_virt = ioq->sense_buffer_virt + ret;
+		mapbuf->sense_buffer_phy = ioq->sense_buffer_phy + ret;
+	}
+	io_cmd.common.sense_addr = cpu_to_le64(mapbuf->sense_buffer_phy);
+	io_cmd.common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+	hiraid_init_mapbuff(mapbuf);
+
+	mapbuf->hiraidq = ioq;
+	mapbuf->cid = cid;
+	ret = hiraid_io_map_data(hdev, mapbuf, scmd, &io_cmd);
+	if (unlikely(ret)) {
+		dev_err(hdev->dev, "io map data err\n");
+		set_host_byte(scmd, DID_ERROR);
+		scsi_done(scmd);
+		ret = 0;
+		goto deinit_iobuf;
+	}
+
+	WRITE_ONCE(mapbuf->state, CMD_FLIGHT);
+	hiraid_submit_cmd(ioq, &io_cmd);
+
+	return 0;
+
+deinit_iobuf:
+	atomic_dec(&ioq->inflight);
+	hiraid_free_mapbuf(hdev, mapbuf);
+	return ret;
+}
+
+static int hiraid_match_dev(struct hiraid_dev *hdev, u16 idx, struct scsi_device *sdev)
+{
+	if (HIRAID_DEV_INFO_FLAG_VALID(hdev->dev_info[idx].flag)) {
+		if (sdev->channel == hdev->dev_info[idx].channel &&
+		sdev->id == le16_to_cpu(hdev->dev_info[idx].target) &&
+		sdev->lun < hdev->dev_info[idx].lun) {
+			dev_info(hdev->dev, "match device success, channel:target:lun[%d:%d:%d]\n",
+				 hdev->dev_info[idx].channel,
+				 hdev->dev_info[idx].target,
+				 hdev->dev_info[idx].lun);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static int hiraid_disk_qd(u8 attr)
+{
+	switch (HIRAID_DEV_DISK_TYPE(attr)) {
+	case HIRAID_SAS_HDD_VD:
+	case HIRAID_SATA_HDD_VD:
+		return HIRAID_HDD_VD_QD;
+	case HIRAID_SAS_SSD_VD:
+	case HIRAID_SATA_SSD_VD:
+	case HIRAID_NVME_SSD_VD:
+		return HIRAID_SSD_VD_QD;
+	case HIRAID_SAS_HDD_PD:
+	case HIRAID_SATA_HDD_PD:
+		return HIRAID_HDD_PD_QD;
+	case HIRAID_SAS_SSD_PD:
+	case HIRAID_SATA_SSD_PD:
+	case HIRAID_NVME_SSD_PD:
+		return HIRAID_SSD_PD_QD;
+	default:
+		return MAX_CMD_PER_DEV;
+	}
+}
+
+static bool hiraid_disk_is_hdd(u8 attr)
+{
+	switch (HIRAID_DEV_DISK_TYPE(attr)) {
+	case HIRAID_SAS_HDD_VD:
+	case HIRAID_SATA_HDD_VD:
+	case HIRAID_SAS_HDD_PD:
+	case HIRAID_SATA_HDD_PD:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static int hiraid_slave_alloc(struct scsi_device *sdev)
+{
+	struct hiraid_sdev_hostdata *hostdata;
+	struct hiraid_dev *hdev;
+	u16 idx;
+
+	hdev = shost_priv(sdev->host);
+	hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+	if (!hostdata) {
+		dev_err(hdev->dev, "alloc scsi host data memory failed\n");
+		return -ENOMEM;
+	}
+
+	down_read(&hdev->dev_rwsem);
+	for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+		if (hiraid_match_dev(hdev, idx, sdev))
+			goto scan_host;
+	}
+	up_read(&hdev->dev_rwsem);
+
+	kfree(hostdata);
+	return -ENXIO;
+
+scan_host:
+	hostdata->hdid = le32_to_cpu(hdev->dev_info[idx].hdid);
+	hostdata->max_io_kb = le16_to_cpu(hdev->dev_info[idx].max_io_kb);
+	hostdata->attr = hdev->dev_info[idx].attr;
+	hostdata->flag = hdev->dev_info[idx].flag;
+	hostdata->rg_id = 0xff;
+	sdev->hostdata = hostdata;
+	up_read(&hdev->dev_rwsem);
+	return 0;
+}
+
+static void hiraid_slave_destroy(struct scsi_device *sdev)
+{
+	kfree(sdev->hostdata);
+	sdev->hostdata = NULL;
+}
+
+static int hiraid_slave_configure(struct scsi_device *sdev)
+{
+	unsigned int timeout = scmd_tmout_rawdisk * HZ;
+	struct hiraid_dev *hdev = shost_priv(sdev->host);
+	struct hiraid_sdev_hostdata *hostdata = sdev->hostdata;
+	u32 max_sec = sdev->host->max_sectors;
+	int qd = MAX_CMD_PER_DEV;
+
+	if (hostdata) {
+		if (HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+			timeout = scmd_tmout_vd * HZ;
+		else if (HIRAID_DEV_INFO_ATTR_RAWDISK(hostdata->attr))
+			timeout = scmd_tmout_rawdisk * HZ;
+		max_sec = hostdata->max_io_kb << 1;
+		qd = hiraid_disk_qd(hostdata->attr);
+
+		if (hiraid_disk_is_hdd(hostdata->attr))
+			hostdata->hwq = hostdata->hdid % (hdev->online_queues - 1) + 1;
+		else
+			hostdata->hwq = 0;
+	} else {
+		dev_err(hdev->dev, "err, sdev->hostdata is null\n");
+	}
+
+	blk_queue_rq_timeout(sdev->request_queue, timeout);
+	sdev->eh_timeout = timeout;
+	scsi_change_queue_depth(sdev, qd);
+
+	if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+		max_sec = sdev->host->max_sectors;
+
+	if (!max_io_force)
+		blk_queue_max_hw_sectors(sdev->request_queue, max_sec);
+
+	dev_info(hdev->dev, "sdev->channel:id:lun[%d:%d:%lld] scmd_timeout[%d]s maxsec[%d]\n",
+		 sdev->channel, sdev->id, sdev->lun, timeout / HZ, max_sec);
+
+	return 0;
+}
+
+static void hiraid_shost_init(struct hiraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	u8 domain, bus;
+	u32 dev_func;
+
+	domain = pci_domain_nr(pdev->bus);
+	bus = pdev->bus->number;
+	dev_func = pdev->devfn;
+
+	hdev->shost->nr_hw_queues = work_mode ? 1 : hdev->online_queues - 1;
+	hdev->shost->can_queue = hdev->scsi_qd;
+
+	hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+	/* 512B per sector */
+	hdev->shost->max_sectors = (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+	hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+	hdev->shost->max_channel = le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+	hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+	hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+	hdev->shost->this_id = -1;
+	hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+	hdev->shost->max_cmd_len = MAX_CDB_LEN;
+}
+
+static int hiraid_alloc_queue(struct hiraid_dev *hdev, u16 qid, u16 depth)
+{
+	struct hiraid_queue *hiraidq = &hdev->queues[qid];
+	int ret = 0;
+
+	if (hdev->queue_count > qid) {
+		dev_info(hdev->dev, "warn: queue[%d] is exist\n", qid);
+		return 0;
+	}
+
+	hiraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth),
+					&hiraidq->cq_buffer_phy, GFP_KERNEL | __GFP_ZERO);
+	if (!hiraidq->cqes)
+		return -ENOMEM;
+
+	hiraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth),
+					&hiraidq->sq_buffer_phy, GFP_KERNEL);
+	if (!hiraidq->sq_cmds) {
+		ret = -ENOMEM;
+		goto  free_cqes;
+	}
+
+	/*
+	 * if single hw queue, we do not need to alloc sense buffer for every queue,
+	 * we have alloced all on hiraid_alloc_resources.
+	 */
+	if (work_mode)
+		goto initq;
+
+	/* alloc sense buffer */
+	hiraidq->sense_buffer_virt = dma_alloc_coherent(hdev->dev, SENSE_SIZE(depth),
+					&hiraidq->sense_buffer_phy, GFP_KERNEL | __GFP_ZERO);
+	if (!hiraidq->sense_buffer_virt) {
+		ret = -ENOMEM;
+		goto free_sq_cmds;
+	}
+
+initq:
+	spin_lock_init(&hiraidq->sq_lock);
+	spin_lock_init(&hiraidq->cq_lock);
+	hiraidq->hdev = hdev;
+	hiraidq->q_depth = depth;
+	hiraidq->qid = qid;
+	hiraidq->cq_vector = -1;
+	hdev->queue_count++;
+
+	return 0;
+
+free_sq_cmds:
+	dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth), (void *)hiraidq->sq_cmds,
+			  hiraidq->sq_buffer_phy);
+free_cqes:
+	dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)hiraidq->cqes,
+			  hiraidq->cq_buffer_phy);
+	return ret;
+}
+
+static int hiraid_wait_control_ready(struct hiraid_dev *hdev, u64 cap, bool enabled)
+{
+	unsigned long timeout =
+	((HIRAID_CAP_TIMEOUT(cap) + 1) * HIRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+	u32 bit = enabled ? HIRAID_CSTS_RDY : 0;
+
+	while ((readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_RDY) != bit) {
+		usleep_range(1000, 2000);
+		if (fatal_signal_pending(current))
+			return -EINTR;
+
+		if (time_after(jiffies, timeout)) {
+			dev_err(hdev->dev, "device not ready; aborting %s\n",
+				enabled ? "initialisation" : "reset");
+			return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+static int hiraid_shutdown_control(struct hiraid_dev *hdev)
+{
+	unsigned long timeout = le32_to_cpu(hdev->ctrl_info->rtd3e) / 1000000 * HZ + jiffies;
+
+	hdev->ctrl_config &= ~HIRAID_CC_SHN_MASK;
+	hdev->ctrl_config |= HIRAID_CC_SHN_NORMAL;
+	writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC);
+
+	while ((readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_SHST_MASK) !=
+		HIRAID_CSTS_SHST_CMPLT) {
+		msleep(100);
+		if (fatal_signal_pending(current))
+			return -EINTR;
+		if (time_after(jiffies, timeout)) {
+			dev_err(hdev->dev, "device shutdown incomplete, abort shutdown\n");
+			return -ENODEV;
+		}
+	}
+	return 0;
+}
+
+static int hiraid_disable_control(struct hiraid_dev *hdev)
+{
+	hdev->ctrl_config &= ~HIRAID_CC_SHN_MASK;
+	hdev->ctrl_config &= ~HIRAID_CC_ENABLE;
+	writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC);
+
+	return hiraid_wait_control_ready(hdev, hdev->cap, false);
+}
+
+static int hiraid_enable_control(struct hiraid_dev *hdev)
+{
+	u64 cap = hdev->cap;
+	u32 dev_page_min = HIRAID_CAP_MPSMIN(cap) + 12;
+	u32 page_shift = PAGE_SHIFT;
+
+	if (page_shift < dev_page_min) {
+		dev_err(hdev->dev, "minimum device page size[%u], too large for host[%u]\n",
+			1U << dev_page_min, 1U << page_shift);
+		return -ENODEV;
+	}
+
+	page_shift = min_t(unsigned int, HIRAID_CAP_MPSMAX(cap) + 12, PAGE_SHIFT);
+	hdev->page_size = 1U << page_shift;
+
+	hdev->ctrl_config = HIRAID_CC_CSS_NVM;
+	hdev->ctrl_config |= (page_shift - 12) << HIRAID_CC_MPS_SHIFT;
+	hdev->ctrl_config |= HIRAID_CC_AMS_RR | HIRAID_CC_SHN_NONE;
+	hdev->ctrl_config |= HIRAID_CC_IOSQES | HIRAID_CC_IOCQES;
+	hdev->ctrl_config |= HIRAID_CC_ENABLE;
+	writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC);
+
+	return hiraid_wait_control_ready(hdev, cap, true);
+}
+
+static void hiraid_init_queue(struct hiraid_queue *hiraidq, u16 qid)
+{
+	struct hiraid_dev *hdev = hiraidq->hdev;
+
+	memset((void *)hiraidq->cqes, 0, CQ_SIZE(hiraidq->q_depth));
+
+	hiraidq->sq_tail = 0;
+	hiraidq->cq_head = 0;
+	hiraidq->cq_phase = 1;
+	hiraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride];
+	hiraidq->prp_small_pool = hdev->prp_extra_pool[qid % extra_pool_num];
+	hdev->online_queues++;
+	atomic_set(&hiraidq->inflight, 0);
+}
+
+static inline bool hiraid_cqe_pending(struct hiraid_queue *hiraidq)
+{
+	return (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].status) & 1) ==
+		hiraidq->cq_phase;
+}
+
+static void hiraid_complete_io_cmnd(struct hiraid_queue *ioq, struct hiraid_completion *cqe)
+{
+	struct hiraid_dev *hdev = ioq->hdev;
+	struct blk_mq_tags *tags;
+	struct scsi_cmnd *scmd;
+	struct hiraid_mapmange *mapbuf;
+	struct request *req;
+	unsigned long elapsed;
+
+	atomic_dec(&ioq->inflight);
+
+	if (work_mode)
+		tags = hdev->shost->tag_set.tags[0];
+	else
+		tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+	req = blk_mq_tag_to_rq(tags, le16_to_cpu(cqe->cmd_id));
+	if (unlikely(!req || !blk_mq_request_started(req))) {
+		dev_warn(hdev->dev, "invalid id[%d] completed on queue[%d]\n",
+			 le16_to_cpu(cqe->cmd_id), ioq->qid);
+		return;
+	}
+
+	scmd = blk_mq_rq_to_pdu(req);
+	mapbuf = scsi_cmd_priv(scmd);
+
+	elapsed = jiffies - scmd->jiffies_at_alloc;
+	dev_log_dbg(hdev->dev, "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
+		    le16_to_cpu(cqe->cmd_id), ioq->qid, elapsed / HZ, elapsed % HZ);
+
+	if (cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_COMPLETE) != CMD_FLIGHT) {
+		dev_warn(hdev->dev, "cid[%d] qid[%d] enters abnormal handler, cost %3ld.%3ld seconds\n",
+			 le16_to_cpu(cqe->cmd_id), ioq->qid, elapsed / HZ, elapsed % HZ);
+		WRITE_ONCE(mapbuf->state, CMD_TMO_COMPLETE);
+
+		if (mapbuf->sge_cnt) {
+			mapbuf->sge_cnt = 0;
+			scsi_dma_unmap(scmd);
+		}
+		hiraid_free_mapbuf(hdev, mapbuf);
+
+		return;
+	}
+
+	hiraid_check_status(mapbuf, scmd, cqe);
+	if (mapbuf->sge_cnt) {
+		mapbuf->sge_cnt = 0;
+		scsi_dma_unmap(scmd);
+	}
+	hiraid_free_mapbuf(hdev, mapbuf);
+	scsi_done(scmd);
+}
+
+static void hiraid_complete_admin_cmnd(struct hiraid_queue *adminq, struct hiraid_completion *cqe)
+{
+	struct hiraid_dev *hdev = adminq->hdev;
+	struct hiraid_cmd *adm_cmd;
+
+	adm_cmd = hdev->adm_cmds + le16_to_cpu(cqe->cmd_id);
+	if (unlikely(adm_cmd->state == CMD_IDLE)) {
+		dev_warn(adminq->hdev->dev, "invalid id[%d] completed on queue[%d]\n",
+			 le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id));
+		return;
+	}
+
+	adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+	adm_cmd->result0 = le32_to_cpu(cqe->result);
+	adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+	complete(&adm_cmd->cmd_done);
+}
+
+static void hiraid_send_async_event(struct hiraid_dev *hdev, u16 cid);
+
+static void hiraid_complete_async_event(struct hiraid_queue *hiraidq, struct hiraid_completion *cqe)
+{
+	struct hiraid_dev *hdev = hiraidq->hdev;
+	u32 result = le32_to_cpu(cqe->result);
+
+	dev_info(hdev->dev, "recv async event, cid[%d] status[0x%x] result[0x%x]\n",
+		 le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->status) >> 1, result);
+
+	hiraid_send_async_event(hdev, le16_to_cpu(cqe->cmd_id));
+
+	if ((le16_to_cpu(cqe->status) >> 1) != HIRAID_SC_SUCCESS)
+		return;
+	switch (result & 0x7) {
+	case HIRAID_ASYN_EVENT_NOTICE:
+		hiraid_handle_async_notice(hdev, result);
+		break;
+	case HIRAID_ASYN_EVENT_VS:
+		hiraid_handle_async_vs(hdev, result, le32_to_cpu(cqe->result1));
+		break;
+	default:
+		dev_warn(hdev->dev, "unsupported async event type[%u]\n", result & 0x7);
+		break;
+	}
+}
+
+static void hiraid_complete_pthru_cmnd(struct hiraid_queue *ioq, struct hiraid_completion *cqe)
+{
+	struct hiraid_dev *hdev = ioq->hdev;
+	struct hiraid_cmd *ptcmd;
+
+	ptcmd = hdev->io_ptcmds + (ioq->qid - 1) * HIRAID_PTHRU_CMDS_PERQ +
+		le16_to_cpu(cqe->cmd_id) - hdev->scsi_qd;
+
+	ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+	ptcmd->result0 = le32_to_cpu(cqe->result);
+	ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+	complete(&ptcmd->cmd_done);
+}
+
+static inline void hiraid_handle_cqe(struct hiraid_queue *hiraidq, u16 idx)
+{
+	struct hiraid_completion *cqe = &hiraidq->cqes[idx];
+	struct hiraid_dev *hdev = hiraidq->hdev;
+	u16 cid = le16_to_cpu(cqe->cmd_id);
+
+	if (unlikely(!work_mode && (cid >= hiraidq->q_depth))) {
+		dev_err(hdev->dev, "invalid command id[%d] completed on queue[%d]\n",
+			cid, cqe->sq_id);
+		return;
+	}
+
+	dev_log_dbg(hdev->dev, "cid[%d] qid[%d] result[0x%x] sqid[%d] status[0x%x]\n",
+		    cid, hiraidq->qid, le32_to_cpu(cqe->result),
+		    le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+	if (unlikely(hiraidq->qid == 0 && cid >= HIRAID_AQ_BLK_MQ_DEPTH)) {
+		hiraid_complete_async_event(hiraidq, cqe);
+		return;
+	}
+
+	if (unlikely(hiraidq->qid && cid >= hdev->scsi_qd)) {
+		hiraid_complete_pthru_cmnd(hiraidq, cqe);
+		return;
+	}
+
+	if (hiraidq->qid)
+		hiraid_complete_io_cmnd(hiraidq, cqe);
+	else
+		hiraid_complete_admin_cmnd(hiraidq, cqe);
+}
+
+static void hiraid_complete_cqes(struct hiraid_queue *hiraidq, u16 start, u16 end)
+{
+	while (start != end) {
+		hiraid_handle_cqe(hiraidq, start);
+		if (++start == hiraidq->q_depth)
+			start = 0;
+	}
+}
+
+static inline void hiraid_update_cq_head(struct hiraid_queue *hiraidq)
+{
+	if (++hiraidq->cq_head == hiraidq->q_depth) {
+		hiraidq->cq_head = 0;
+		hiraidq->cq_phase = !hiraidq->cq_phase;
+	}
+}
+
+static inline bool hiraid_process_cq(struct hiraid_queue *hiraidq, u16 *start, u16 *end, int tag)
+{
+	bool found = false;
+
+	*start = hiraidq->cq_head;
+	while (!found && hiraid_cqe_pending(hiraidq)) {
+		if (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].cmd_id) == tag)
+			found = true;
+		hiraid_update_cq_head(hiraidq);
+	}
+	*end = hiraidq->cq_head;
+
+	if (*start != *end)
+		writel(hiraidq->cq_head, hiraidq->q_db + hiraidq->hdev->db_stride);
+
+	return found;
+}
+
+static bool hiraid_poll_cq(struct hiraid_queue *hiraidq, int cid)
+{
+	u16 start, end;
+	bool found;
+
+	if (!hiraid_cqe_pending(hiraidq))
+		return 0;
+
+	spin_lock_irq(&hiraidq->cq_lock);
+	found = hiraid_process_cq(hiraidq, &start, &end, cid);
+	spin_unlock_irq(&hiraidq->cq_lock);
+
+	hiraid_complete_cqes(hiraidq, start, end);
+	return found;
+}
+
+static irqreturn_t hiraid_handle_irq(int irq, void *data)
+{
+	struct hiraid_queue *hiraidq = data;
+	irqreturn_t ret = IRQ_NONE;
+	u16 start, end;
+
+	spin_lock(&hiraidq->cq_lock);
+	if (hiraidq->cq_head != hiraidq->last_cq_head)
+		ret = IRQ_HANDLED;
+
+	hiraid_process_cq(hiraidq, &start, &end, -1);
+	hiraidq->last_cq_head = hiraidq->cq_head;
+	spin_unlock(&hiraidq->cq_lock);
+
+	if (start != end) {
+		hiraid_complete_cqes(hiraidq, start, end);
+		ret = IRQ_HANDLED;
+	}
+	return ret;
+}
+
+static int hiraid_setup_admin_queue(struct hiraid_dev *hdev)
+{
+	struct hiraid_queue *adminq = &hdev->queues[0];
+	u32 aqa;
+	int ret;
+
+	dev_info(hdev->dev, "start disable controller\n");
+
+	ret = hiraid_disable_control(hdev);
+	if (ret)
+		return ret;
+
+	ret = hiraid_alloc_queue(hdev, 0, HIRAID_AQ_DEPTH);
+	if (ret)
+		return ret;
+
+	aqa = adminq->q_depth - 1;
+	aqa |= aqa << 16;
+	writel(aqa, hdev->bar + HIRAID_REG_AQA);
+	lo_hi_writeq(adminq->sq_buffer_phy, hdev->bar + HIRAID_REG_ASQ);
+	lo_hi_writeq(adminq->cq_buffer_phy, hdev->bar + HIRAID_REG_ACQ);
+
+	dev_info(hdev->dev, "start enable controller\n");
+
+	ret = hiraid_enable_control(hdev);
+	if (ret) {
+		ret = -ENODEV;
+		return ret;
+	}
+
+	adminq->cq_vector = 0;
+	ret = pci_request_irq(hdev->pdev, adminq->cq_vector, hiraid_handle_irq, NULL,
+			      adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
+	if (ret) {
+		adminq->cq_vector = -1;
+		return ret;
+	}
+
+	hiraid_init_queue(adminq, 0);
+
+	dev_info(hdev->dev, "setup admin queue success, queuecount[%d] online[%d] pagesize[%d]\n",
+		 hdev->queue_count, hdev->online_queues, hdev->page_size);
+
+	return 0;
+}
+
+static u32 hiraid_get_bar_size(struct hiraid_dev *hdev, u32 nr_ioqs)
+{
+	return (HIRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
+}
+
+static int hiraid_create_admin_cmds(struct hiraid_dev *hdev)
+{
+	u16 i;
+
+	INIT_LIST_HEAD(&hdev->adm_cmd_list);
+	spin_lock_init(&hdev->adm_cmd_lock);
+
+	hdev->adm_cmds = kcalloc_node(HIRAID_AQ_BLK_MQ_DEPTH, sizeof(struct hiraid_cmd),
+				      GFP_KERNEL, hdev->numa_node);
+
+	if (!hdev->adm_cmds) {
+		dev_err(hdev->dev, "alloc admin cmds failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < HIRAID_AQ_BLK_MQ_DEPTH; i++) {
+		hdev->adm_cmds[i].qid = 0;
+		hdev->adm_cmds[i].cid = i;
+		list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+	}
+
+	dev_info(hdev->dev, "alloc admin cmds success, num[%d]\n", HIRAID_AQ_BLK_MQ_DEPTH);
+
+	return 0;
+}
+
+static void hiraid_free_admin_cmds(struct hiraid_dev *hdev)
+{
+	kfree(hdev->adm_cmds);
+	hdev->adm_cmds = NULL;
+	INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct hiraid_cmd *hiraid_get_cmd(struct hiraid_dev *hdev, enum hiraid_cmd_type type)
+{
+	struct hiraid_cmd *cmd = NULL;
+	unsigned long flags;
+	struct list_head *head = &hdev->adm_cmd_list;
+	spinlock_t *slock = &hdev->adm_cmd_lock;
+
+	if (type == HIRAID_CMD_PTHRU) {
+		head = &hdev->io_pt_list;
+		slock = &hdev->io_pt_lock;
+	}
+
+	spin_lock_irqsave(slock, flags);
+	if (list_empty(head)) {
+		spin_unlock_irqrestore(slock, flags);
+		dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+		return NULL;
+	}
+	cmd = list_entry(head->next, struct hiraid_cmd, list);
+	list_del_init(&cmd->list);
+	spin_unlock_irqrestore(slock, flags);
+
+	WRITE_ONCE(cmd->state, CMD_FLIGHT);
+
+	return cmd;
+}
+
+static void hiraid_put_cmd(struct hiraid_dev *hdev, struct hiraid_cmd *cmd,
+				enum hiraid_cmd_type type)
+{
+	unsigned long flags;
+	struct list_head *head = &hdev->adm_cmd_list;
+	spinlock_t *slock = &hdev->adm_cmd_lock;
+
+	if (type == HIRAID_CMD_PTHRU) {
+		head = &hdev->io_pt_list;
+		slock = &hdev->io_pt_lock;
+	}
+
+	spin_lock_irqsave(slock, flags);
+	WRITE_ONCE(cmd->state, CMD_IDLE);
+	list_add_tail(&cmd->list, head);
+	spin_unlock_irqrestore(slock, flags);
+}
+
+static bool hiraid_admin_need_reset(struct hiraid_admin_command *cmd)
+{
+	switch (cmd->common.opcode) {
+	case HIRAID_ADMIN_DELETE_SQ:
+	case HIRAID_ADMIN_CREATE_SQ:
+	case HIRAID_ADMIN_DELETE_CQ:
+	case HIRAID_ADMIN_CREATE_CQ:
+	case HIRAID_ADMIN_SET_FEATURES:
+		return false;
+	default:
+		return true;
+	}
+}
+
+static int hiraid_reset_work_sync(struct hiraid_dev *hdev);
+static inline void hiraid_admin_timeout(struct hiraid_dev *hdev, struct hiraid_cmd *cmd)
+{
+	/* command may be returned because controller reset */
+	if (READ_ONCE(cmd->state) == CMD_COMPLETE)
+		return;
+	if (hiraid_reset_work_sync(hdev) == -EBUSY)
+		flush_work(&hdev->reset_work);
+}
+
+static int hiraid_put_admin_sync_request(struct hiraid_dev *hdev, struct hiraid_admin_command *cmd,
+						u32 *result0, u32 *result1, u32 timeout)
+{
+	struct hiraid_cmd *adm_cmd = hiraid_get_cmd(hdev, HIRAID_CMD_ADMIN);
+
+	if (!adm_cmd) {
+		dev_err(hdev->dev, "err, get admin cmd failed\n");
+		return -EFAULT;
+	}
+
+	timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+	init_completion(&adm_cmd->cmd_done);
+
+	cmd->common.cmd_id = cpu_to_le16(adm_cmd->cid);
+	hiraid_submit_cmd(&hdev->queues[0], cmd);
+
+	if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+		dev_err(hdev->dev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+			 adm_cmd->cid, adm_cmd->qid, cmd->usr_cmd.opcode,
+			cmd->usr_cmd.info_0.subopcode);
+
+		/* reset controller if admin timeout */
+		if (hiraid_admin_need_reset(cmd))
+			hiraid_admin_timeout(hdev, adm_cmd);
+
+		hiraid_put_cmd(hdev, adm_cmd, HIRAID_CMD_ADMIN);
+		return -ETIME;
+	}
+
+	if (result0)
+		*result0 = adm_cmd->result0;
+	if (result1)
+		*result1 = adm_cmd->result1;
+
+	hiraid_put_cmd(hdev, adm_cmd, HIRAID_CMD_ADMIN);
+
+	return adm_cmd->status;
+}
+
+/**
+ * hiraid_create_cq - send cmd to controller for create controller cq
+ */
+static int hiraid_create_complete_queue(struct hiraid_dev *hdev, u16 qid,
+						struct hiraid_queue *hiraidq, u16 cq_vector)
+{
+	struct hiraid_admin_command admin_cmd;
+	int flags = HIRAID_QUEUE_PHYS_CONTIG | HIRAID_CQ_IRQ_ENABLED;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.create_cq.opcode = HIRAID_ADMIN_CREATE_CQ;
+	admin_cmd.create_cq.prp1 = cpu_to_le64(hiraidq->cq_buffer_phy);
+	admin_cmd.create_cq.cqid = cpu_to_le16(qid);
+	admin_cmd.create_cq.qsize = cpu_to_le16(hiraidq->q_depth - 1);
+	admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+	admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
+
+	return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/**
+ * hiraid_create_sq - send cmd to controller for create controller sq
+ */
+static int hiraid_create_send_queue(struct hiraid_dev *hdev, u16 qid,
+					struct hiraid_queue *hiraidq)
+{
+	struct hiraid_admin_command admin_cmd;
+	int flags = HIRAID_QUEUE_PHYS_CONTIG;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.create_sq.opcode = HIRAID_ADMIN_CREATE_SQ;
+	admin_cmd.create_sq.prp1 = cpu_to_le64(hiraidq->sq_buffer_phy);
+	admin_cmd.create_sq.sqid = cpu_to_le16(qid);
+	admin_cmd.create_sq.qsize = cpu_to_le16(hiraidq->q_depth - 1);
+	admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+	admin_cmd.create_sq.cqid = cpu_to_le16(qid);
+
+	return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static void hiraid_free_all_queues(struct hiraid_dev *hdev)
+{
+	int i;
+	struct hiraid_queue *hq;
+
+	for (i = 0; i < hdev->queue_count; i++) {
+		hq = &hdev->queues[i];
+		dma_free_coherent(hdev->dev, CQ_SIZE(hq->q_depth),
+				  (void *)hq->cqes, hq->cq_buffer_phy);
+		dma_free_coherent(hdev->dev, SQ_SIZE(hq->qid, hq->q_depth),
+				  hq->sq_cmds, hq->sq_buffer_phy);
+		if (!work_mode)
+			dma_free_coherent(hdev->dev, SENSE_SIZE(hq->q_depth),
+					  hq->sense_buffer_virt, hq->sense_buffer_phy);
+	}
+
+	hdev->queue_count = 0;
+}
+
+static void hiraid_free_sense_buffer(struct hiraid_dev *hdev)
+{
+	if (hdev->sense_buffer_virt) {
+		dma_free_coherent(hdev->dev,
+			SENSE_SIZE(hdev->scsi_qd + max_hwq_num * HIRAID_PTHRU_CMDS_PERQ),
+			hdev->sense_buffer_virt, hdev->sense_buffer_phy);
+		hdev->sense_buffer_virt = NULL;
+	}
+}
+
+static int hiraid_delete_queue(struct hiraid_dev *hdev, u8 opcode, u16 qid)
+{
+	struct hiraid_admin_command admin_cmd;
+	int ret;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.delete_queue.opcode = opcode;
+	admin_cmd.delete_queue.qid = cpu_to_le16(qid);
+
+	ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+
+	if (ret)
+		dev_err(hdev->dev, "delete %s:[%d] failed\n",
+			(opcode == HIRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", qid);
+
+	return ret;
+}
+
+static int hiraid_delete_complete_queue(struct hiraid_dev *hdev, u16 cqid)
+{
+	return hiraid_delete_queue(hdev, HIRAID_ADMIN_DELETE_CQ, cqid);
+}
+
+static int hiraid_delete_send_queue(struct hiraid_dev *hdev, u16 sqid)
+{
+	return hiraid_delete_queue(hdev, HIRAID_ADMIN_DELETE_SQ, sqid);
+}
+
+static int hiraid_create_queue(struct hiraid_queue *hiraidq, u16 qid)
+{
+	struct hiraid_dev *hdev = hiraidq->hdev;
+	u16 cq_vector;
+	int ret;
+
+	cq_vector = (hdev->num_vecs == 1) ? 0 : qid;
+	ret = hiraid_create_complete_queue(hdev, qid, hiraidq, cq_vector);
+	if (ret)
+		return ret;
+
+	ret = hiraid_create_send_queue(hdev, qid, hiraidq);
+	if (ret)
+		goto delete_cq;
+
+	hiraidq->cq_vector = cq_vector;
+	ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq, NULL,
+			      hiraidq, "hiraid%d_q%d", hdev->instance, qid);
+	if (ret) {
+		hiraidq->cq_vector = -1;
+		dev_err(hdev->dev, "request queue[%d] irq failed\n", qid);
+		goto delete_sq;
+	}
+
+	hiraid_init_queue(hiraidq, qid);
+
+	return 0;
+
+delete_sq:
+	hiraid_delete_send_queue(hdev, qid);
+delete_cq:
+	hiraid_delete_complete_queue(hdev, qid);
+
+	return ret;
+}
+
+static int hiraid_create_io_queues(struct hiraid_dev *hdev)
+{
+	u32 i, max;
+	int ret = 0;
+
+	max = min(hdev->max_qid, hdev->queue_count - 1);
+	for (i = hdev->online_queues; i <= max; i++) {
+		ret = hiraid_create_queue(&hdev->queues[i], i);
+		if (ret) {
+			dev_err(hdev->dev, "create queue[%d] failed\n", i);
+			break;
+		}
+	}
+
+	if (!hdev->last_qcnt)
+		hdev->last_qcnt = hdev->online_queues;
+
+	dev_info(hdev->dev, "queue_count[%d] online_queue[%d] last_online[%d]",
+		 hdev->queue_count, hdev->online_queues, hdev->last_qcnt);
+
+	return ret >= 0 ? 0 : ret;
+}
+
+static int hiraid_set_features(struct hiraid_dev *hdev, u32 fid, u32 dword11, void *buffer,
+				size_t buflen, u32 *result)
+{
+	struct hiraid_admin_command admin_cmd;
+	int ret;
+	u8 *data_ptr = NULL;
+	dma_addr_t buffer_phy = 0;
+
+	if (buffer && buflen) {
+		data_ptr = dma_alloc_coherent(hdev->dev, buflen, &buffer_phy, GFP_KERNEL);
+		if (!data_ptr)
+			return -ENOMEM;
+
+		memcpy(data_ptr, buffer, buflen);
+	}
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.features.opcode = HIRAID_ADMIN_SET_FEATURES;
+	admin_cmd.features.fid = cpu_to_le32(fid);
+	admin_cmd.features.dword11 = cpu_to_le32(dword11);
+	admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+	ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, result, NULL, 0);
+
+	if (data_ptr)
+		dma_free_coherent(hdev->dev, buflen, data_ptr, buffer_phy);
+
+	return ret;
+}
+
+static int hiraid_configure_timestamp(struct hiraid_dev *hdev)
+{
+	__le64 timestamp;
+	int ret;
+
+	timestamp = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+	ret = hiraid_set_features(hdev, HIRAID_FEATURE_TIMESTAMP, 0,
+				×tamp, sizeof(timestamp), NULL);
+
+	if (ret)
+		dev_err(hdev->dev, "set timestamp failed[%d]\n", ret);
+	return ret;
+}
+
+static int hiraid_get_queue_cnt(struct hiraid_dev *hdev, u32 *cnt)
+{
+	u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+	u32 nr_ioqs, result;
+	int status;
+
+	status = hiraid_set_features(hdev, HIRAID_FEATURE_NUM_QUEUES, q_cnt, NULL, 0, &result);
+	if (status) {
+		dev_err(hdev->dev, "set queue count failed, status[%d]\n",
+			status);
+		return -EIO;
+	}
+
+	nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+	*cnt = min(*cnt, nr_ioqs);
+	if (*cnt == 0) {
+		dev_err(hdev->dev, "illegal qcount: zero, nr_ioqs[%d], cnt[%d]\n", nr_ioqs, *cnt);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int hiraid_setup_io_queues(struct hiraid_dev *hdev)
+{
+	struct hiraid_queue *adminq = &hdev->queues[0];
+	struct pci_dev *pdev = hdev->pdev;
+	u32 i, size, nr_ioqs;
+	int ret;
+
+	struct irq_affinity affd = {
+		.pre_vectors = 1
+	};
+
+	/* alloc IO sense buffer for single hw queue mode */
+	if (work_mode && !hdev->sense_buffer_virt) {
+		hdev->sense_buffer_virt = dma_alloc_coherent(hdev->dev,
+			SENSE_SIZE(hdev->scsi_qd + max_hwq_num * HIRAID_PTHRU_CMDS_PERQ),
+			&hdev->sense_buffer_phy, GFP_KERNEL | __GFP_ZERO);
+		if (!hdev->sense_buffer_virt)
+			return -ENOMEM;
+	}
+
+	nr_ioqs = min(num_online_cpus(), max_hwq_num);
+	ret = hiraid_get_queue_cnt(hdev, &nr_ioqs);
+	if (ret < 0)
+		return ret;
+
+	size = hiraid_get_bar_size(hdev, nr_ioqs);
+	ret = hiraid_remap_bar(hdev, size);
+	if (ret)
+		return -ENOMEM;
+
+	adminq->q_db = hdev->dbs;
+
+	pci_free_irq(pdev, 0, adminq);
+	pci_free_irq_vectors(pdev);
+	hdev->online_queues--;
+
+	ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1),
+					PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+	if (ret <= 0)
+		return -EIO;
+
+	hdev->num_vecs = ret;
+	hdev->max_qid = max(ret - 1, 1);
+
+	ret = pci_request_irq(pdev, adminq->cq_vector, hiraid_handle_irq, NULL,
+			adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
+	if (ret) {
+		dev_err(hdev->dev, "request admin irq failed\n");
+		adminq->cq_vector = -1;
+		return ret;
+	}
+
+	hdev->online_queues++;
+
+	for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
+		ret = hiraid_alloc_queue(hdev, i, hdev->ioq_depth);
+		if (ret)
+			break;
+	}
+	dev_info(hdev->dev, "max_qid[%d] queuecount[%d] onlinequeue[%d] ioqdepth[%d]\n",
+		 hdev->max_qid, hdev->queue_count, hdev->online_queues, hdev->ioq_depth);
+
+	return hiraid_create_io_queues(hdev);
+}
+
+static void hiraid_delete_io_queues(struct hiraid_dev *hdev)
+{
+	u16 queues = hdev->online_queues - 1;
+	u8 opcode = HIRAID_ADMIN_DELETE_SQ;
+	u16 i, pass;
+
+	if (!pci_device_is_present(hdev->pdev)) {
+		dev_err(hdev->dev, "pci_device is not present, skip disable io queues\n");
+		return;
+	}
+
+	if (hdev->online_queues < 2) {
+		dev_err(hdev->dev, "err, io queue has been delete\n");
+		return;
+	}
+
+	for (pass = 0; pass < 2; pass++) {
+		for (i = queues; i > 0; i--)
+			if (hiraid_delete_queue(hdev, opcode, i))
+				break;
+
+		opcode = HIRAID_ADMIN_DELETE_CQ;
+	}
+}
+
+static void hiraid_pci_disable(struct hiraid_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	u32 i;
+
+	for (i = 0; i < hdev->online_queues; i++)
+		pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]);
+	pci_free_irq_vectors(pdev);
+	if (pci_is_enabled(pdev))
+		pci_disable_device(pdev);
+	hdev->online_queues = 0;
+}
+
+static void hiraid_disable_admin_queue(struct hiraid_dev *hdev, bool shutdown)
+{
+	struct hiraid_queue *adminq = &hdev->queues[0];
+	u16 start, end;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		if (shutdown)
+			hiraid_shutdown_control(hdev);
+		else
+			hiraid_disable_control(hdev);
+	}
+
+	if (hdev->queue_count == 0) {
+		dev_err(hdev->dev, "err, admin queue has been delete\n");
+		return;
+	}
+
+	spin_lock_irq(&adminq->cq_lock);
+	hiraid_process_cq(adminq, &start, &end, -1);
+	spin_unlock_irq(&adminq->cq_lock);
+	hiraid_complete_cqes(adminq, start, end);
+}
+
+static int hiraid_create_prp_pools(struct hiraid_dev *hdev)
+{
+	int i;
+	char poolname[20] = { 0 };
+
+	hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev,
+					PAGE_SIZE, PAGE_SIZE, 0);
+
+	if (!hdev->prp_page_pool) {
+		dev_err(hdev->dev, "create prp_page_pool failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < extra_pool_num; i++) {
+		sprintf(poolname, "prp_list_256_%d", i);
+		hdev->prp_extra_pool[i] = dma_pool_create(poolname, hdev->dev, EXTRA_POOL_SIZE,
+							EXTRA_POOL_SIZE, 0);
+
+		if (!hdev->prp_extra_pool[i]) {
+			dev_err(hdev->dev, "create prp extra pool[%d] failed\n", i);
+			goto destroy_prp_extra_pool;
+		}
+	}
+
+	return 0;
+
+destroy_prp_extra_pool:
+	while (i > 0)
+		dma_pool_destroy(hdev->prp_extra_pool[--i]);
+	dma_pool_destroy(hdev->prp_page_pool);
+
+	return -ENOMEM;
+}
+
+static void hiraid_free_prp_pools(struct hiraid_dev *hdev)
+{
+	int i;
+
+	for (i = 0; i < extra_pool_num; i++)
+		dma_pool_destroy(hdev->prp_extra_pool[i]);
+	dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int hiraid_request_devices(struct hiraid_dev *hdev, struct hiraid_dev_info *dev)
+{
+	u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+	struct hiraid_admin_command admin_cmd;
+	struct hiraid_dev_list *list_buf;
+	dma_addr_t buffer_phy = 0;
+	u32 i, idx, hdid, ndev;
+	int ret = 0;
+
+	list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+	if (!list_buf)
+		return -ENOMEM;
+
+	for (idx = 0; idx < nd;) {
+		memset(&admin_cmd, 0, sizeof(admin_cmd));
+		admin_cmd.get_info.opcode = HIRAID_ADMIN_GET_INFO;
+		admin_cmd.get_info.type = HIRAID_GET_DEVLIST_INFO;
+		admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+		admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+		ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+
+		if (ret) {
+			dev_err(hdev->dev, "get device list failed, nd[%u] idx[%u] ret[%d]\n",
+				nd, idx, ret);
+			goto out;
+		}
+		ndev = le32_to_cpu(list_buf->dev_num);
+
+		dev_info(hdev->dev, "get dev list ndev num[%u]\n", ndev);
+
+		for (i = 0; i < ndev; i++) {
+			hdid = le32_to_cpu(list_buf->devinfo[i].hdid);
+			dev_info(hdev->dev, "devices[%d], hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+				 i, hdid, le16_to_cpu(list_buf->devinfo[i].target),
+				 list_buf->devinfo[i].channel,
+				 list_buf->devinfo[i].lun,
+				 list_buf->devinfo[i].attr);
+			if (hdid > nd || hdid == 0) {
+				dev_err(hdev->dev, "err, hdid[%d] invalid\n", hdid);
+				continue;
+			}
+			memcpy(&dev[hdid - 1], &list_buf->devinfo[i],
+			       sizeof(struct hiraid_dev_info));
+		}
+		idx += ndev;
+
+		if (ndev < MAX_DEV_ENTRY_PER_PAGE_4K)
+			break;
+	}
+
+out:
+	dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, buffer_phy);
+	return ret;
+}
+
+static void hiraid_send_async_event(struct hiraid_dev *hdev, u16 cid)
+{
+	struct hiraid_queue *adminq = &hdev->queues[0];
+	struct hiraid_admin_command admin_cmd;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.common.opcode = HIRAID_ADMIN_ASYNC_EVENT;
+	admin_cmd.common.cmd_id = cpu_to_le16(cid);
+
+	hiraid_submit_cmd(adminq, &admin_cmd);
+	dev_info(hdev->dev, "send async event to controller, cid[%d]\n", cid);
+}
+
+static inline void hiraid_init_async_event(struct hiraid_dev *hdev)
+{
+	u16 i;
+
+	for (i = 0; i < hdev->ctrl_info->asynevent; i++)
+		hiraid_send_async_event(hdev, i + HIRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int hiraid_add_device(struct hiraid_dev *hdev, struct hiraid_dev_info *devinfo)
+{
+	struct Scsi_Host *shost = hdev->shost;
+	struct scsi_device *sdev;
+
+	dev_info(hdev->dev, "add device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+			le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target),
+			devinfo->channel, devinfo->lun, devinfo->attr);
+
+	sdev = scsi_device_lookup(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+	if (sdev) {
+		dev_warn(hdev->dev, "device is already exist, channel[%d] targetid[%d] lun[%d]\n",
+			 devinfo->channel, le16_to_cpu(devinfo->target), 0);
+		scsi_device_put(sdev);
+		return -EEXIST;
+	}
+	scsi_add_device(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+	return 0;
+}
+
+static int hiraid_rescan_device(struct hiraid_dev *hdev, struct hiraid_dev_info *devinfo)
+{
+	struct Scsi_Host *shost = hdev->shost;
+	struct scsi_device *sdev;
+
+	dev_info(hdev->dev, "rescan device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+			le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target),
+			devinfo->channel, devinfo->lun, devinfo->attr);
+
+	sdev = scsi_device_lookup(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+	if (!sdev) {
+		dev_warn(hdev->dev, "device is not exit rescan it, channel[%d] target_id[%d] lun[%d]\n",
+			 devinfo->channel, le16_to_cpu(devinfo->target), 0);
+		return -ENODEV;
+	}
+
+	scsi_rescan_device(sdev);
+	scsi_device_put(sdev);
+	return 0;
+}
+
+static int hiraid_delete_device(struct hiraid_dev *hdev, struct hiraid_dev_info *devinfo)
+{
+	struct Scsi_Host *shost = hdev->shost;
+	struct scsi_device *sdev;
+
+	dev_info(hdev->dev, "remove device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+			le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target),
+			devinfo->channel, devinfo->lun, devinfo->attr);
+
+	sdev = scsi_device_lookup(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+	if (!sdev) {
+		dev_warn(hdev->dev, "device is not exit remove it, channel[%d] target_id[%d] lun[%d]\n",
+			 devinfo->channel, le16_to_cpu(devinfo->target), 0);
+		return -ENODEV;
+	}
+
+	scsi_remove_device(sdev);
+	scsi_device_put(sdev);
+	return 0;
+}
+
+static int hiraid_dev_list_init(struct hiraid_dev *hdev)
+{
+	u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+
+	hdev->dev_info = kzalloc_node(nd * sizeof(struct hiraid_dev_info),
+				     GFP_KERNEL, hdev->numa_node);
+	if (!hdev->dev_info)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int hiraid_luntarget_sort(const void *l, const void *r)
+{
+	const struct hiraid_dev_info *ln = l;
+	const struct hiraid_dev_info *rn = r;
+	int l_attr = HIRAID_DEV_INFO_ATTR_BOOT(ln->attr);
+	int r_attr = HIRAID_DEV_INFO_ATTR_BOOT(rn->attr);
+
+	/* boot first */
+	if (l_attr != r_attr)
+		return (r_attr - l_attr);
+
+	if (ln->channel == rn->channel)
+		return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+	return ln->channel - rn->channel;
+}
+
+static void hiraid_scan_work(struct work_struct *work)
+{
+	struct hiraid_dev *hdev =
+		container_of(work, struct hiraid_dev, scan_work);
+	struct hiraid_dev_info *dev, *old_dev, *new_dev;
+	u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+	u8 flag, org_flag;
+	int i, ret;
+	int count = 0;
+
+	dev = kcalloc(nd, sizeof(struct hiraid_dev_info), GFP_KERNEL);
+	if (!dev)
+		return;
+
+	new_dev = kcalloc(nd, sizeof(struct hiraid_dev_info), GFP_KERNEL);
+	if (!new_dev)
+		goto free_list;
+
+	ret = hiraid_request_devices(hdev, dev);
+	if (ret)
+		goto free_all;
+	old_dev = hdev->dev_info;
+	for (i = 0; i < nd; i++) {
+		org_flag = old_dev[i].flag;
+		flag = dev[i].flag;
+
+		dev_log_dbg(hdev->dev, "i[%d] org_flag[0x%x] flag[0x%x]\n", i, org_flag, flag);
+
+		if (HIRAID_DEV_INFO_FLAG_VALID(flag)) {
+			if (!HIRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+				down_write(&hdev->dev_rwsem);
+				memcpy(&old_dev[i], &dev[i],
+						sizeof(struct hiraid_dev_info));
+				memcpy(&new_dev[count++], &dev[i],
+						sizeof(struct hiraid_dev_info));
+				up_write(&hdev->dev_rwsem);
+			} else if (HIRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+				hiraid_rescan_device(hdev, &dev[i]);
+			}
+		} else {
+			if (HIRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+				down_write(&hdev->dev_rwsem);
+				old_dev[i].flag &= 0xfe;
+				up_write(&hdev->dev_rwsem);
+				hiraid_delete_device(hdev, &old_dev[i]);
+			}
+		}
+	}
+
+	dev_info(hdev->dev, "scan work add device num[%d]\n", count);
+
+	sort(new_dev, count, sizeof(new_dev[0]), hiraid_luntarget_sort, NULL);
+
+	for (i = 0; i < count; i++)
+		hiraid_add_device(hdev, &new_dev[i]);
+
+free_all:
+	kfree(new_dev);
+free_list:
+	kfree(dev);
+}
+
+static void hiraid_timesyn_work(struct work_struct *work)
+{
+	struct hiraid_dev *hdev =
+		container_of(work, struct hiraid_dev, timesyn_work);
+
+	hiraid_configure_timestamp(hdev);
+}
+
+static int hiraid_init_control_info(struct hiraid_dev *hdev);
+static void hiraid_fwactive_work(struct work_struct *work)
+{
+	struct hiraid_dev *hdev = container_of(work, struct hiraid_dev, fwact_work);
+
+	if (hiraid_init_control_info(hdev))
+		dev_err(hdev->dev, "get controller info failed after fw activation\n");
+}
+
+static void hiraid_queue_scan(struct hiraid_dev *hdev)
+{
+	queue_work(work_queue, &hdev->scan_work);
+}
+
+static void hiraid_handle_async_notice(struct hiraid_dev *hdev, u32 result)
+{
+	switch ((result & 0xff00) >> 8) {
+	case HIRAID_ASYN_DEV_CHANGED:
+		hiraid_queue_scan(hdev);
+		break;
+	case HIRAID_ASYN_FW_ACT_START:
+		dev_info(hdev->dev, "fw activation starting\n");
+		break;
+	case HIRAID_ASYN_HOST_PROBING:
+		break;
+	default:
+		dev_warn(hdev->dev, "async event result[%08x]\n", result);
+	}
+}
+
+static void hiraid_handle_async_vs(struct hiraid_dev *hdev, u32 result, u32 result1)
+{
+	switch ((result & 0xff00) >> 8) {
+	case HIRAID_ASYN_TIMESYN:
+		queue_work(work_queue, &hdev->timesyn_work);
+		break;
+	case HIRAID_ASYN_FW_ACT_FINISH:
+		dev_info(hdev->dev, "fw activation finish\n");
+		queue_work(work_queue, &hdev->fwact_work);
+		break;
+	case HIRAID_ASYN_EVENT_MIN ... HIRAID_ASYN_EVENT_MAX:
+		dev_info(hdev->dev, "recv card event[%d] param1[0x%x] param2[0x%x]\n",
+			 (result & 0xff00) >> 8, result, result1);
+		break;
+	default:
+		dev_warn(hdev->dev, "async event result[0x%x]\n", result);
+	}
+}
+
+static int hiraid_alloc_resources(struct hiraid_dev *hdev)
+{
+	int ret, nqueue;
+
+	hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info), GFP_KERNEL, hdev->numa_node);
+	if (!hdev->ctrl_info)
+		return -ENOMEM;
+
+	ret = hiraid_create_prp_pools(hdev);
+	if (ret)
+		goto free_ctrl_info;
+	nqueue = min(num_possible_cpus(), max_hwq_num) + 1;
+	hdev->queues = kcalloc_node(nqueue, sizeof(struct hiraid_queue),
+				    GFP_KERNEL, hdev->numa_node);
+	if (!hdev->queues) {
+		ret = -ENOMEM;
+		goto destroy_dma_pools;
+	}
+
+	ret = hiraid_create_admin_cmds(hdev);
+	if (ret)
+		goto free_queues;
+
+	dev_info(hdev->dev, "total queues num[%d]\n", nqueue);
+
+	return 0;
+
+free_queues:
+	kfree(hdev->queues);
+destroy_dma_pools:
+	hiraid_free_prp_pools(hdev);
+free_ctrl_info:
+	kfree(hdev->ctrl_info);
+
+	return ret;
+}
+
+static void hiraid_free_resources(struct hiraid_dev *hdev)
+{
+	hiraid_free_admin_cmds(hdev);
+	kfree(hdev->queues);
+	hiraid_free_prp_pools(hdev);
+	kfree(hdev->ctrl_info);
+}
+
+static void hiraid_bsg_buf_unmap(struct hiraid_dev *hdev, struct bsg_job *job)
+{
+	struct request *rq = blk_mq_rq_from_pdu(job);
+	struct hiraid_mapmange *mapbuf = job->dd_data;
+	enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	if (mapbuf->sge_cnt)
+		dma_unmap_sg(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir);
+
+	hiraid_free_mapbuf(hdev, mapbuf);
+}
+
+static int hiraid_bsg_buf_map(struct hiraid_dev *hdev, struct bsg_job *job,
+				struct hiraid_admin_command *cmd)
+{
+	struct hiraid_bsg_request *bsg_req = job->request;
+	struct request *rq = blk_mq_rq_from_pdu(job);
+	struct hiraid_mapmange *mapbuf = job->dd_data;
+	enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+	int ret = 0;
+
+	/* No data to DMA, it may be scsi no-rw command */
+	mapbuf->sge_cnt = job->request_payload.sg_cnt;
+	mapbuf->sgl = job->request_payload.sg_list;
+	mapbuf->len = job->request_payload.payload_len;
+	mapbuf->page_cnt = -1;
+	if (unlikely(mapbuf->sge_cnt == 0))
+		goto out;
+
+	ret = dma_map_sg_attrs(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir, DMA_ATTR_NO_WARN);
+	if (!ret)
+		goto out;
+
+	mapbuf->use_sgl = !hiraid_is_prp(hdev, mapbuf->sgl, mapbuf->sge_cnt);
+
+	if ((mapbuf->use_sgl == (bool)true) && (bsg_req->msgcode == HIRAID_BSG_IOPTHRU) &&
+		(hdev->ctrl_info->pt_use_sgl != (bool)false)) {
+		ret = hiraid_build_passthru_sgl(hdev, cmd, mapbuf);
+	} else {
+		mapbuf->use_sgl = false;
+
+		ret = hiraid_build_passthru_prp(hdev, mapbuf);
+		cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(mapbuf->sgl));
+		cmd->common.dptr.prp2 = cpu_to_le64(mapbuf->first_dma);
+	}
+
+	if (ret)
+		goto unmap;
+
+	return 0;
+
+unmap:
+	dma_unmap_sg(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir);
+out:
+	return ret;
+}
+
+static int hiraid_get_control_info(struct hiraid_dev *hdev, struct hiraid_ctrl_info *ctrl_info)
+{
+	struct hiraid_admin_command admin_cmd;
+	u8 *data_ptr = NULL;
+	dma_addr_t buffer_phy = 0;
+	int ret;
+
+	data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+	if (!data_ptr)
+		return -ENOMEM;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.get_info.opcode = HIRAID_ADMIN_GET_INFO;
+	admin_cmd.get_info.type = HIRAID_GET_CTRL_INFO;
+	admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+	ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+	if (!ret)
+		memcpy(ctrl_info, data_ptr, sizeof(struct hiraid_ctrl_info));
+
+	dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy);
+
+	return ret;
+}
+
+static int hiraid_init_control_info(struct hiraid_dev *hdev)
+{
+	int ret;
+
+	hdev->ctrl_info->nd = cpu_to_le32(240);
+	hdev->ctrl_info->mdts = 8;
+	hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+	hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+	hdev->ctrl_info->max_channel = cpu_to_le16(4);
+	hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+	hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+	ret = hiraid_get_control_info(hdev, hdev->ctrl_info);
+	if (ret)
+		dev_err(hdev->dev, "get controller info failed[%d]\n", ret);
+
+	dev_info(hdev->dev, "device_num = %d\n", hdev->ctrl_info->nd);
+	dev_info(hdev->dev, "max_cmd = %d\n", hdev->ctrl_info->max_cmds);
+	dev_info(hdev->dev, "max_channel = %d\n", hdev->ctrl_info->max_channel);
+	dev_info(hdev->dev, "max_tgt_id = %d\n", hdev->ctrl_info->max_tgt_id);
+	dev_info(hdev->dev, "max_lun = %d\n", hdev->ctrl_info->max_lun);
+	dev_info(hdev->dev, "max_num_sge = %d\n", hdev->ctrl_info->max_num_sge);
+	dev_info(hdev->dev, "lun_num_boot = %d\n", hdev->ctrl_info->lun_num_boot);
+	dev_info(hdev->dev, "max_data_transfer_size = %d\n", hdev->ctrl_info->mdts);
+	dev_info(hdev->dev, "abort_cmd_limit = %d\n", hdev->ctrl_info->acl);
+	dev_info(hdev->dev, "asyn_event_num = %d\n", hdev->ctrl_info->asynevent);
+	dev_info(hdev->dev, "card_type = %d\n", hdev->ctrl_info->card_type);
+	dev_info(hdev->dev, "pt_use_sgl = %d\n", hdev->ctrl_info->pt_use_sgl);
+	dev_info(hdev->dev, "rtd3e = %d\n", hdev->ctrl_info->rtd3e);
+	dev_info(hdev->dev, "serial_num = %s\n", hdev->ctrl_info->sn);
+	dev_info(hdev->dev, "fw_verion = %s\n", hdev->ctrl_info->fw_version);
+
+	if (!hdev->ctrl_info->asynevent)
+		hdev->ctrl_info->asynevent = 1;
+	if (hdev->ctrl_info->asynevent > HIRAID_ASYN_COMMANDS)
+		hdev->ctrl_info->asynevent = HIRAID_ASYN_COMMANDS;
+
+	hdev->scsi_qd = work_mode ?
+		le16_to_cpu(hdev->ctrl_info->max_cmds) : (hdev->ioq_depth - HIRAID_PTHRU_CMDS_PERQ);
+
+	return 0;
+}
+
+static int hiraid_user_send_admcmd(struct hiraid_dev *hdev, struct bsg_job *job)
+{
+	struct hiraid_bsg_request *bsg_req = job->request;
+	struct hiraid_passthru_common_cmd *ptcmd = &(bsg_req->admcmd);
+	struct hiraid_admin_command admin_cmd;
+	u32 timeout = msecs_to_jiffies(ptcmd->timeout_ms);
+	u32 result[2] = {0};
+	int status;
+
+	if (hdev->state >= DEV_RESETTING) {
+		dev_err(hdev->dev, "err, host state[%d] is not right\n",
+			hdev->state);
+		return -EBUSY;
+	}
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.common.opcode = ptcmd->opcode;
+	admin_cmd.common.flags = ptcmd->flags;
+	admin_cmd.common.hdid = cpu_to_le32(ptcmd->nsid);
+	admin_cmd.common.cdw2[0] = cpu_to_le32(ptcmd->cdw2);
+	admin_cmd.common.cdw2[1] = cpu_to_le32(ptcmd->cdw3);
+	admin_cmd.common.cdw10 = cpu_to_le32(ptcmd->cdw10);
+	admin_cmd.common.cdw11 = cpu_to_le32(ptcmd->cdw11);
+	admin_cmd.common.cdw12 = cpu_to_le32(ptcmd->cdw12);
+	admin_cmd.common.cdw13 = cpu_to_le32(ptcmd->cdw13);
+	admin_cmd.common.cdw14 = cpu_to_le32(ptcmd->cdw14);
+	admin_cmd.common.cdw15 = cpu_to_le32(ptcmd->cdw15);
+
+	status = hiraid_bsg_buf_map(hdev, job, &admin_cmd);
+	if (status) {
+		dev_err(hdev->dev, "err, map data failed\n");
+		return status;
+	}
+
+	status = hiraid_put_admin_sync_request(hdev, &admin_cmd, &result[0], &result[1], timeout);
+	if (status >= 0) {
+		job->reply_len = sizeof(result);
+		memcpy(job->reply, result, sizeof(result));
+	}
+	if (status)
+		dev_info(hdev->dev, "opcode[0x%x] subopcode[0x%x] status[0x%x] result0[0x%x];"
+			"result1[0x%x]\n", ptcmd->opcode, ptcmd->info_0.subopcode, status,
+			result[0], result[1]);
+
+	hiraid_bsg_buf_unmap(hdev, job);
+
+	return status;
+}
+
+static int hiraid_alloc_io_ptcmds(struct hiraid_dev *hdev)
+{
+	u32 i;
+	u32 ptnum = HIRAID_TOTAL_PTCMDS(hdev->online_queues - 1);
+
+	INIT_LIST_HEAD(&hdev->io_pt_list);
+	spin_lock_init(&hdev->io_pt_lock);
+
+	hdev->io_ptcmds = kcalloc_node(ptnum, sizeof(struct hiraid_cmd),
+					GFP_KERNEL, hdev->numa_node);
+
+	if (!hdev->io_ptcmds) {
+		dev_err(hdev->dev, "alloc io pthrunum failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < ptnum; i++) {
+		hdev->io_ptcmds[i].qid = i / HIRAID_PTHRU_CMDS_PERQ + 1;
+		hdev->io_ptcmds[i].cid = i % HIRAID_PTHRU_CMDS_PERQ + hdev->scsi_qd;
+		list_add_tail(&(hdev->io_ptcmds[i].list), &hdev->io_pt_list);
+	}
+
+	dev_info(hdev->dev, "alloc io pthru cmd success, pthrunum[%d]\n", ptnum);
+
+	return 0;
+}
+
+static void hiraid_free_io_ptcmds(struct hiraid_dev *hdev)
+{
+	kfree(hdev->io_ptcmds);
+	hdev->io_ptcmds = NULL;
+
+	INIT_LIST_HEAD(&hdev->io_pt_list);
+}
+
+static int hiraid_put_io_sync_request(struct hiraid_dev *hdev, struct hiraid_scsi_io_cmd *io_cmd,
+					u32 *result, u32 *reslen, u32 timeout)
+{
+	int ret;
+	dma_addr_t buffer_phy;
+	struct hiraid_queue *ioq;
+	void *sense_addr = NULL;
+	struct hiraid_cmd *pt_cmd = hiraid_get_cmd(hdev, HIRAID_CMD_PTHRU);
+
+	if (!pt_cmd) {
+		dev_err(hdev->dev, "err, get ioq cmd failed\n");
+		return -EFAULT;
+	}
+
+	timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+	init_completion(&pt_cmd->cmd_done);
+
+	ioq = &hdev->queues[pt_cmd->qid];
+	if (work_mode) {
+		ret = ((pt_cmd->qid - 1) * HIRAID_PTHRU_CMDS_PERQ + pt_cmd->cid) *
+			SCSI_SENSE_BUFFERSIZE;
+		sense_addr = hdev->sense_buffer_virt + ret;
+		buffer_phy = hdev->sense_buffer_phy + ret;
+	} else {
+		ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+		sense_addr = ioq->sense_buffer_virt + ret;
+		buffer_phy = ioq->sense_buffer_phy + ret;
+	}
+
+	io_cmd->common.sense_addr = cpu_to_le64(buffer_phy);
+	io_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+	io_cmd->common.cmd_id = cpu_to_le16(pt_cmd->cid);
+
+	hiraid_submit_cmd(ioq, io_cmd);
+
+	if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+		dev_err(hdev->dev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+			pt_cmd->cid, pt_cmd->qid, io_cmd->common.opcode,
+			(le32_to_cpu(io_cmd->common.cdw3[0]) & 0xffff));
+
+		hiraid_admin_timeout(hdev, pt_cmd);
+
+		hiraid_put_cmd(hdev, pt_cmd, HIRAID_CMD_PTHRU);
+		return -ETIME;
+	}
+
+	if (result && reslen) {
+		if ((pt_cmd->status & 0x17f) == 0x101) {
+			memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+			*reslen = SCSI_SENSE_BUFFERSIZE;
+		}
+	}
+
+	hiraid_put_cmd(hdev, pt_cmd, HIRAID_CMD_PTHRU);
+
+	return pt_cmd->status;
+}
+
+static int hiraid_user_send_ptcmd(struct hiraid_dev *hdev, struct bsg_job *job)
+{
+	struct hiraid_bsg_request *bsg_req = (struct hiraid_bsg_request *)(job->request);
+	struct hiraid_passthru_io_cmd *cmd = &(bsg_req->pthrucmd);
+	struct hiraid_scsi_io_cmd pthru_cmd;
+	int status = 0;
+	u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+	// data len is 4k before use sgl, now len is 1M
+	u32 io_pt_data_len = (hdev->ctrl_info->pt_use_sgl == (bool)true) ?
+		IOQ_PT_SGL_DATA_LEN : IOQ_PT_DATA_LEN;
+
+	if (cmd->data_len > io_pt_data_len) {
+		dev_err(hdev->dev, "data len bigger than %d\n", io_pt_data_len);
+		return -EFAULT;
+	}
+
+	if (hdev->state != DEV_LIVE) {
+		dev_err(hdev->dev, "err, host state[%d] is not live\n", hdev->state);
+		return -EBUSY;
+	}
+
+	memset(&pthru_cmd, 0, sizeof(pthru_cmd));
+	pthru_cmd.common.opcode = cmd->opcode;
+	pthru_cmd.common.flags = cmd->flags;
+	pthru_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+	pthru_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+	pthru_cmd.common.cdb_len = cmd->info_0.cdb_len;
+	pthru_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+	pthru_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+	pthru_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+	pthru_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+	pthru_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+	pthru_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+	pthru_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+	pthru_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+	pthru_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+	pthru_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+	memcpy(pthru_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+	pthru_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+	pthru_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+	pthru_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+	pthru_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+	status = hiraid_bsg_buf_map(hdev, job, (struct hiraid_admin_command *)&pthru_cmd);
+	if (status) {
+		dev_err(hdev->dev, "err, map data failed\n");
+		return status;
+	}
+
+	status = hiraid_put_io_sync_request(hdev, &pthru_cmd, job->reply, &job->reply_len, timeout);
+
+	if (status)
+		dev_info(hdev->dev, "opcode[0x%x] subopcode[0x%x] status[0x%x] replylen[%d]\n",
+			 cmd->opcode, cmd->info_1.subopcode, status, job->reply_len);
+
+	hiraid_bsg_buf_unmap(hdev, job);
+
+	return status;
+}
+
+static bool hiraid_check_scmd_finished(struct scsi_cmnd *scmd)
+{
+	struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+	struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+	struct hiraid_queue *hiraidq;
+
+	hiraidq = mapbuf->hiraidq;
+	if (!hiraidq)
+		return false;
+	if (READ_ONCE(mapbuf->state) == CMD_COMPLETE || hiraid_poll_cq(hiraidq, mapbuf->cid)) {
+		dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
+			 mapbuf->cid, hiraidq->qid);
+		return true;
+	}
+	return false;
+}
+
+static enum scsi_timeout_action hiraid_timed_out(struct scsi_cmnd *scmd)
+{
+	struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+	unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+	if (hiraid_check_scmd_finished(scmd))
+		goto out;
+
+	if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+		if (cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_TIMEOUT) == CMD_FLIGHT)
+			return SCSI_EH_DONE;
+	}
+out:
+	return SCSI_EH_RESET_TIMER;
+}
+
+/* send abort command by admin queue temporary */
+static int hiraid_send_abort_cmd(struct hiraid_dev *hdev, u32 hdid, u16 qid, u16 cid)
+{
+	struct hiraid_admin_command admin_cmd;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.abort.opcode = HIRAID_ADMIN_ABORT_CMD;
+	admin_cmd.abort.hdid = cpu_to_le32(hdid);
+	admin_cmd.abort.sqid = cpu_to_le16(qid);
+	admin_cmd.abort.cid = cpu_to_le16(cid);
+
+	return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+static int hiraid_send_reset_cmd(struct hiraid_dev *hdev, u8 type, u32 hdid)
+{
+	struct hiraid_admin_command admin_cmd;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.reset.opcode = HIRAID_ADMIN_RESET;
+	admin_cmd.reset.hdid = cpu_to_le32(hdid);
+	admin_cmd.reset.type = type;
+
+	return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static bool hiraid_dev_state_trans(struct hiraid_dev *hdev, enum hiraid_dev_state new_state)
+{
+	unsigned long flags;
+	enum hiraid_dev_state old_state;
+	bool change = false;
+
+	spin_lock_irqsave(&hdev->state_lock, flags);
+
+	old_state = hdev->state;
+	switch (new_state) {
+	case DEV_LIVE:
+		switch (old_state) {
+		case DEV_NEW:
+		case DEV_RESETTING:
+			change = true;
+			break;
+		default:
+			break;
+		}
+		break;
+	case DEV_RESETTING:
+		switch (old_state) {
+		case DEV_LIVE:
+			change = true;
+			break;
+		default:
+			break;
+		}
+		break;
+	case DEV_DELETING:
+		if (old_state != DEV_DELETING)
+			change = true;
+		break;
+	case DEV_DEAD:
+		switch (old_state) {
+		case DEV_NEW:
+		case DEV_LIVE:
+		case DEV_RESETTING:
+			change = true;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	if (change)
+		hdev->state = new_state;
+	spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+	dev_info(hdev->dev, "oldstate[%d]->newstate[%d], change[%d]\n",
+		old_state, new_state, change);
+
+	return change;
+}
+
+static void hiraid_drain_pending_ios(struct hiraid_dev *hdev);
+
+static void hiraid_flush_running_cmds(struct hiraid_dev *hdev)
+{
+	int i, j;
+
+	scsi_block_requests(hdev->shost);
+	hiraid_drain_pending_ios(hdev);
+	scsi_unblock_requests(hdev->shost);
+
+	j = HIRAID_AQ_BLK_MQ_DEPTH;
+	for (i = 0; i < j; i++) {
+		if (READ_ONCE(hdev->adm_cmds[i].state) == CMD_FLIGHT) {
+			dev_info(hdev->dev, "flush admin, cid[%d]\n", i);
+			hdev->adm_cmds[i].status = 0xFFFF;
+			WRITE_ONCE(hdev->adm_cmds[i].state, CMD_COMPLETE);
+			complete(&(hdev->adm_cmds[i].cmd_done));
+		}
+	}
+
+	j = HIRAID_TOTAL_PTCMDS(hdev->online_queues - 1);
+	for (i = 0; i < j; i++) {
+		if (READ_ONCE(hdev->io_ptcmds[i].state) == CMD_FLIGHT) {
+			hdev->io_ptcmds[i].status = 0xFFFF;
+			WRITE_ONCE(hdev->io_ptcmds[i].state, CMD_COMPLETE);
+			complete(&(hdev->io_ptcmds[i].cmd_done));
+		}
+	}
+}
+
+static int hiraid_dev_disable(struct hiraid_dev *hdev, bool shutdown)
+{
+	int ret = -ENODEV;
+	struct hiraid_queue *adminq = &hdev->queues[0];
+	u16 start, end;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		if (shutdown)
+			hiraid_shutdown_control(hdev);
+		else
+			ret = hiraid_disable_control(hdev);
+	}
+
+	if (hdev->queue_count == 0) {
+		dev_err(hdev->dev, "warn: queue has been delete\n");
+		return ret;
+	}
+
+	spin_lock_irq(&adminq->cq_lock);
+	hiraid_process_cq(adminq, &start, &end, -1);
+	spin_unlock_irq(&adminq->cq_lock);
+	hiraid_complete_cqes(adminq, start, end);
+
+	hiraid_pci_disable(hdev);
+
+	hiraid_flush_running_cmds(hdev);
+
+	return ret;
+}
+
+static void hiraid_reset_work(struct work_struct *work)
+{
+	int ret = 0;
+	struct hiraid_dev *hdev = container_of(work, struct hiraid_dev, reset_work);
+
+	if (hdev->state != DEV_RESETTING) {
+		dev_err(hdev->dev, "err, host is not reset state\n");
+		return;
+	}
+
+	dev_info(hdev->dev, "enter host reset\n");
+
+	if (hdev->ctrl_config & HIRAID_CC_ENABLE) {
+		dev_info(hdev->dev, "start dev_disable\n");
+		ret = hiraid_dev_disable(hdev, false);
+	}
+
+	if (ret)
+		goto out;
+
+	ret = hiraid_pci_enable(hdev);
+	if (ret)
+		goto out;
+
+	ret = hiraid_setup_admin_queue(hdev);
+	if (ret)
+		goto pci_disable;
+
+	ret = hiraid_setup_io_queues(hdev);
+	if (ret || hdev->online_queues != hdev->last_qcnt)
+		goto pci_disable;
+
+	hiraid_dev_state_trans(hdev, DEV_LIVE);
+
+	hiraid_init_async_event(hdev);
+
+	hiraid_queue_scan(hdev);
+
+	return;
+
+pci_disable:
+	hiraid_pci_disable(hdev);
+out:
+	hiraid_dev_state_trans(hdev, DEV_DEAD);
+	dev_err(hdev->dev, "err, host reset failed\n");
+}
+
+static int hiraid_reset_work_sync(struct hiraid_dev *hdev)
+{
+	if (!hiraid_dev_state_trans(hdev, DEV_RESETTING)) {
+		dev_info(hdev->dev, "can't change to reset state\n");
+		return -EBUSY;
+	}
+
+	if (!queue_work(work_queue, &hdev->reset_work)) {
+		dev_err(hdev->dev, "err, host is already in reset state\n");
+		return -EBUSY;
+	}
+
+	flush_work(&hdev->reset_work);
+	if (hdev->state != DEV_LIVE)
+		return -ENODEV;
+
+	return 0;
+}
+
+static int hiraid_wait_io_completion(struct hiraid_mapmange *mapbuf)
+{
+	u16 times = 0;
+
+	do {
+		if (READ_ONCE(mapbuf->state) == CMD_TMO_COMPLETE)
+			break;
+		msleep(500);
+		times++;
+	} while (times <= HIRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+	/* wait command completion timeout after abort/reset success */
+	if (times >= HIRAID_WAIT_ABNL_CMD_TIMEOUT)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static bool hiraid_tgt_rst_pending_io_count(struct request *rq, void *data)
+{
+	unsigned int id = *(unsigned int *)data;
+	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+	struct hiraid_mapmange *mapbuf;
+	struct hiraid_sdev_hostdata *hostdata;
+
+	if (scmd) {
+		mapbuf = scsi_cmd_priv(scmd);
+		if ((mapbuf->state == CMD_FLIGHT) || (mapbuf->state == CMD_TIMEOUT)) {
+			if ((scmd->device) && (scmd->device->id == id)) {
+				hostdata = scmd->device->hostdata;
+				hostdata->pend_count++;
+			}
+		}
+	}
+	return true;
+}
+static bool hiraid_clean_pending_io(struct request *rq, void *data)
+{
+	struct hiraid_dev *hdev = data;
+	struct scsi_cmnd *scmd;
+	struct hiraid_mapmange *mapbuf;
+
+	if (unlikely(!rq || !blk_mq_request_started(rq)))
+		return true;
+
+	scmd = blk_mq_rq_to_pdu(rq);
+	mapbuf = scsi_cmd_priv(scmd);
+
+	if ((cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_COMPLETE) != CMD_FLIGHT) &&
+	    (cmpxchg(&mapbuf->state, CMD_TIMEOUT, CMD_COMPLETE) != CMD_TIMEOUT))
+		return true;
+
+	set_host_byte(scmd, DID_NO_CONNECT);
+	if (mapbuf->sge_cnt)
+		scsi_dma_unmap(scmd);
+	hiraid_free_mapbuf(hdev, mapbuf);
+	dev_warn_ratelimited(hdev->dev, "back unfinished CQE, cid[%d] qid[%d]\n",
+		 mapbuf->cid, mapbuf->hiraidq->qid);
+	scsi_done(scmd);
+
+	return true;
+}
+
+static void hiraid_drain_pending_ios(struct hiraid_dev *hdev)
+{
+	blk_mq_tagset_busy_iter(&hdev->shost->tag_set, hiraid_clean_pending_io, (void *)(hdev));
+}
+
+static int wait_tgt_reset_io_done(struct scsi_cmnd *scmd)
+{
+	u16 timeout = 0;
+	struct hiraid_sdev_hostdata *hostdata;
+	struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+
+	hostdata = scmd->device->hostdata;
+
+	do {
+		hostdata->pend_count = 0;
+		blk_mq_tagset_busy_iter(&hdev->shost->tag_set, hiraid_tgt_rst_pending_io_count,
+			(void *)(&scmd->device->id));
+
+		if (!hostdata->pend_count)
+			return 0;
+
+		msleep(500);
+		timeout++;
+	} while (timeout <= HIRAID_WAIT_RST_IO_TIMEOUT);
+
+	return -ETIMEDOUT;
+}
+
+static int hiraid_abort(struct scsi_cmnd *scmd)
+{
+	struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+	struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+	struct hiraid_sdev_hostdata *hostdata;
+	u16 hwq, cid;
+	int ret;
+
+	scsi_print_command(scmd);
+
+	if (hdev->state != DEV_LIVE || !hiraid_wait_io_completion(mapbuf) ||
+	    hiraid_check_scmd_finished(scmd))
+		return SUCCESS;
+
+	hostdata = scmd->device->hostdata;
+	cid = mapbuf->cid;
+	hwq = mapbuf->hiraidq->qid;
+
+	dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, send abort\n", cid, hwq);
+	ret = hiraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+	if (ret != -ETIME) {
+		ret = hiraid_wait_io_completion(mapbuf);
+		if (ret) {
+			dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, not found\n", cid, hwq);
+			return FAILED;
+		}
+		dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq);
+		return SUCCESS;
+	}
+	dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n", cid, hwq);
+	return FAILED;
+}
+
+static int hiraid_scsi_reset(struct scsi_cmnd *scmd, enum hiraid_rst_type rst)
+{
+	struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+	struct hiraid_sdev_hostdata *hostdata;
+	int ret;
+
+	if (hdev->state != DEV_LIVE)
+		return SUCCESS;
+
+	hostdata = scmd->device->hostdata;
+
+	dev_warn(hdev->dev, "sdev[%d:%d] send %s reset\n", scmd->device->channel, scmd->device->id,
+		 rst ? "bus" : "target");
+	ret = hiraid_send_reset_cmd(hdev, rst, hostdata->hdid);
+	if ((ret == 0) || (ret == FW_EH_DEV_NONE && rst == HIRAID_RESET_TARGET)) {
+		if (rst == HIRAID_RESET_TARGET) {
+			ret = wait_tgt_reset_io_done(scmd);
+			if (ret) {
+				dev_warn(hdev->dev, "sdev[%d:%d] target has %d peding cmd, target reset failed\n",
+					scmd->device->channel, scmd->device->id,
+					hostdata->pend_count);
+				return FAILED;
+			}
+		}
+		dev_warn(hdev->dev, "sdev[%d:%d] %s reset success\n",
+			scmd->device->channel, scmd->device->id, rst ? "bus" : "target");
+		return SUCCESS;
+	}
+
+	dev_warn(hdev->dev, "sdev[%d:%d] %s reset failed\n",
+		scmd->device->channel, scmd->device->id, rst ? "bus" : "target");
+	return FAILED;
+}
+
+static int hiraid_target_reset(struct scsi_cmnd *scmd)
+{
+	return hiraid_scsi_reset(scmd, HIRAID_RESET_TARGET);
+}
+
+static int hiraid_bus_reset(struct scsi_cmnd *scmd)
+{
+	return hiraid_scsi_reset(scmd, HIRAID_RESET_BUS);
+}
+
+static int hiraid_host_reset(struct scsi_cmnd *scmd)
+{
+	struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+
+	if (hdev->state != DEV_LIVE)
+		return SUCCESS;
+
+	dev_warn(hdev->dev, "sdev[%d:%d] send host reset\n",
+		scmd->device->channel, scmd->device->id);
+	if (hiraid_reset_work_sync(hdev) == -EBUSY)
+		flush_work(&hdev->reset_work);
+
+	if (hdev->state != DEV_LIVE) {
+		dev_warn(hdev->dev, "sdev[%d:%d] host reset failed\n",
+			scmd->device->channel, scmd->device->id);
+		return FAILED;
+	}
+
+	dev_warn(hdev->dev, "sdev[%d:%d] host reset success\n",
+		scmd->device->channel, scmd->device->id);
+
+	return SUCCESS;
+}
+
+static pci_ers_result_t hiraid_pci_error_detected(struct pci_dev *pdev,
+						  pci_channel_state_t state)
+{
+	struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+	dev_info(hdev->dev, "pci error detected, state[%d]\n", state);
+
+	switch (state) {
+	case pci_channel_io_normal:
+		dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		dev_warn(hdev->dev, "channel io frozen, need reset controller\n");
+
+		scsi_block_requests(hdev->shost);
+
+		hiraid_dev_state_trans(hdev, DEV_RESETTING);
+
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		dev_warn(hdev->dev, "channel io failure, disconnect\n");
+
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hiraid_pci_slot_reset(struct pci_dev *pdev)
+{
+	struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+	dev_info(hdev->dev, "restart after slot reset\n");
+
+	pci_restore_state(pdev);
+
+	if (!queue_work(work_queue, &hdev->reset_work)) {
+		dev_err(hdev->dev, "err, the device is resetting state\n");
+		return PCI_ERS_RESULT_NONE;
+	}
+
+	flush_work(&hdev->reset_work);
+
+	scsi_unblock_requests(hdev->shost);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void hiraid_reset_pci_finish(struct pci_dev *pdev)
+{
+	struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+	dev_info(hdev->dev, "enter hiraid reset finish\n");
+}
+
+static ssize_t csts_pp_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct hiraid_dev *hdev = shost_priv(shost);
+	int ret = -1;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_PP_MASK);
+		ret >>= HIRAID_CSTS_PP_SHIFT;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct hiraid_dev *hdev = shost_priv(shost);
+	int ret = -1;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_SHST_MASK);
+		ret >>= HIRAID_CSTS_SHST_SHIFT;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct hiraid_dev *hdev = shost_priv(shost);
+	int ret = -1;
+
+	if (pci_device_is_present(hdev->pdev)) {
+		ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_CFS_MASK);
+		ret >>= HIRAID_CSTS_CFS_SHIFT;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct hiraid_dev *hdev = shost_priv(shost);
+	int ret = -1;
+
+	if (pci_device_is_present(hdev->pdev))
+		ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_RDY);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct hiraid_dev *hdev = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fw_version);
+}
+
+static ssize_t hdd_dispatch_store(struct device *cdev, struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int val = 0;
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct hiraid_dev *hdev = shost_priv(shost);
+
+	if (kstrtoint(buf, 0, &val) != 0)
+		return -EINVAL;
+	if (val < DISPATCH_BY_CPU || val > DISPATCH_BY_DISK)
+		return -EINVAL;
+	hdev->hdd_dispatch = val;
+
+	return strlen(buf);
+}
+static ssize_t hdd_dispatch_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct hiraid_dev *hdev = shost_priv(shost);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hdev->hdd_dispatch);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+static DEVICE_ATTR_RW(hdd_dispatch);
+
+static struct attribute *hiraid_host_attrs[] = {
+	&dev_attr_csts_rdy.attr,
+	&dev_attr_csts_pp.attr,
+	&dev_attr_csts_cfs.attr,
+	&dev_attr_fw_version.attr,
+	&dev_attr_csts_shst.attr,
+	&dev_attr_hdd_dispatch.attr,
+	NULL,
+};
+
+static const struct attribute_group hiraid_host_attrs_group = {
+	.attrs = hiraid_host_attrs
+};
+
+const struct attribute_group *hiraid_host_attrs_groups[] = {
+	&hiraid_host_attrs_group,
+	NULL
+};
+
+static int hiraid_get_vd_info(struct hiraid_dev *hdev, struct hiraid_vd_info *vd_info, u16 vid)
+{
+	struct hiraid_admin_command admin_cmd;
+	u8 *data_ptr = NULL;
+	dma_addr_t buffer_phy = 0;
+	int ret;
+
+	if (hdev->state >= DEV_RESETTING) {
+		dev_err(hdev->dev, "err, host state[%d] is not right\n", hdev->state);
+		return -EBUSY;
+	}
+
+	data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+	if (!data_ptr)
+		return -ENOMEM;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+	admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+	admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+	admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+	admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+	admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+	ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, USRCMD_TIMEOUT);
+	if (!ret)
+		memcpy(vd_info, data_ptr, sizeof(struct hiraid_vd_info));
+
+	dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy);
+
+	return ret;
+}
+
+static int hiraid_get_bgtask(struct hiraid_dev *hdev, struct hiraid_bgtask *bgtask)
+{
+	struct hiraid_admin_command admin_cmd;
+	u8 *data_ptr = NULL;
+	dma_addr_t buffer_phy = 0;
+	int ret;
+
+	if (hdev->state >= DEV_RESETTING) {
+		dev_err(hdev->dev, "err, host state[%d] is not right\n", hdev->state);
+		return -EBUSY;
+	}
+
+	data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+	if (!data_ptr)
+		return -ENOMEM;
+
+	memset(&admin_cmd, 0, sizeof(admin_cmd));
+	admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+	admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+	admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+	admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+	ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, USRCMD_TIMEOUT);
+	if (!ret)
+		memcpy(bgtask, data_ptr, sizeof(struct hiraid_bgtask));
+
+	dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy);
+
+	return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev;
+	struct hiraid_dev *hdev;
+	struct hiraid_vd_info *vd_info;
+	struct hiraid_sdev_hostdata *hostdata;
+	int ret;
+
+	sdev = to_scsi_device(dev);
+	hdev = shost_priv(sdev->host);
+	hostdata = sdev->hostdata;
+
+	vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+	if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+		return snprintf(buf, PAGE_SIZE, "NA\n");
+
+	ret = hiraid_get_vd_info(hdev, vd_info, sdev->id);
+	if (ret)
+		vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+	ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+	       vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+	kfree(vd_info);
+
+	return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev;
+	struct hiraid_dev *hdev;
+	struct hiraid_vd_info *vd_info;
+	struct hiraid_sdev_hostdata *hostdata;
+	int ret;
+
+	sdev = to_scsi_device(dev);
+	hdev = shost_priv(sdev->host);
+	hostdata = sdev->hostdata;
+
+	vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+	if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+		return snprintf(buf, PAGE_SIZE, "NA\n");
+
+	ret = hiraid_get_vd_info(hdev, vd_info, sdev->id);
+	if (ret) {
+		vd_info->vd_status = 0;
+		vd_info->rg_id = 0xff;
+	}
+
+	ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? vd_info->vd_status : 0;
+
+	kfree(vd_info);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev;
+	struct hiraid_dev *hdev;
+	struct hiraid_vd_info *vd_info;
+	struct hiraid_bgtask *bgtask;
+	struct hiraid_sdev_hostdata *hostdata;
+	u8 rg_id, i, progress = 0;
+	int ret;
+
+	sdev = to_scsi_device(dev);
+	hdev = shost_priv(sdev->host);
+	hostdata = sdev->hostdata;
+
+	vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+	if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+		return snprintf(buf, PAGE_SIZE, "NA\n");
+
+	ret = hiraid_get_vd_info(hdev, vd_info, sdev->id);
+	if (ret)
+		goto out;
+
+	rg_id = vd_info->rg_id;
+
+	bgtask = (struct hiraid_bgtask *)vd_info;
+	ret = hiraid_get_bgtask(hdev, bgtask);
+	if (ret)
+		goto out;
+	for (i = 0; i < bgtask->task_num; i++) {
+		if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+		    (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+			progress = bgtask->bgtask[i].progress;
+	}
+
+out:
+	kfree(vd_info);
+	return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static ssize_t dispatch_hwq_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct hiraid_sdev_hostdata *hostdata;
+
+	hostdata = to_scsi_device(dev)->hostdata;
+	return snprintf(buf, PAGE_SIZE, "%d\n", hostdata->hwq);
+}
+
+static ssize_t dispatch_hwq_store(struct device *dev, struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	int val;
+	struct hiraid_dev *hdev;
+	struct scsi_device *sdev;
+	struct hiraid_sdev_hostdata *hostdata;
+
+	sdev = to_scsi_device(dev);
+	hdev = shost_priv(sdev->host);
+	hostdata = sdev->hostdata;
+
+	if (kstrtoint(buf, 0, &val) != 0)
+		return -EINVAL;
+	if (val <= 0 || val >= hdev->online_queues)
+		return -EINVAL;
+	if (!hiraid_disk_is_hdd(hostdata->attr))
+		return -EINVAL;
+
+	hostdata->hwq = val;
+	return strlen(buf);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+static DEVICE_ATTR_RW(dispatch_hwq);
+
+static struct attribute *hiraid_dev_attrs[] = {
+	&dev_attr_raid_state.attr,
+	&dev_attr_raid_level.attr,
+	&dev_attr_raid_resync.attr,
+	&dev_attr_dispatch_hwq.attr,
+	NULL,
+};
+
+static const struct attribute_group hiraid_dev_group = {
+	.attrs = hiraid_dev_attrs
+};
+
+const struct attribute_group *hiraid_dev_groups[] = {
+	&hiraid_dev_group,
+	NULL
+};
+
+static struct pci_error_handlers hiraid_err_handler = {
+	.error_detected = hiraid_pci_error_detected,
+	.slot_reset = hiraid_pci_slot_reset,
+	.reset_done = hiraid_reset_pci_finish,
+};
+
+static int hiraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+	int ret;
+	struct hiraid_dev *hdev = shost_priv(shost);
+
+	dev_info(hdev->dev, "start sysfs host reset cmd\n");
+	ret = hiraid_reset_work_sync(hdev);
+	dev_info(hdev->dev, "stop sysfs host reset cmd[%d]\n", ret);
+
+	return ret;
+}
+
+static int hiraid_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+	struct hiraid_dev *hdev = shost_priv(shost);
+
+	hiraid_scan_work(&hdev->scan_work);
+
+	return 1;
+}
+
+static const struct scsi_host_template hiraid_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= "hiraid",
+	.proc_name		= "hiraid",
+	.queuecommand		= hiraid_queue_command,
+	.slave_alloc		= hiraid_slave_alloc,
+	.slave_destroy		= hiraid_slave_destroy,
+	.slave_configure	= hiraid_slave_configure,
+	.scan_finished		= hiraid_scan_finished,
+	.eh_timed_out		= hiraid_timed_out,
+	.eh_abort_handler	= hiraid_abort,
+	.eh_target_reset_handler	= hiraid_target_reset,
+	.eh_bus_reset_handler		= hiraid_bus_reset,
+	.eh_host_reset_handler		= hiraid_host_reset,
+	.change_queue_depth		= scsi_change_queue_depth,
+	.this_id			= -1,
+	.shost_groups			= hiraid_host_attrs_groups,
+	.sdev_groups			= hiraid_dev_groups,
+	.host_reset			= hiraid_sysfs_host_reset,
+	.cmd_size			= sizeof(struct hiraid_mapmange) + HIRAID_ALLOC_SIZE_MAX,
+};
+
+static void hiraid_shutdown(struct pci_dev *pdev)
+{
+	struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+	hiraid_delete_io_queues(hdev);
+	hiraid_disable_admin_queue(hdev, true);
+}
+
+static bool hiraid_bsg_is_valid(struct bsg_job *job)
+{
+	u64 timeout = 0;
+	struct request *rq = blk_mq_rq_from_pdu(job);
+	struct hiraid_bsg_request *bsg_req = job->request;
+	struct hiraid_dev *hdev = shost_priv(dev_to_shost(job->dev));
+
+	if (bsg_req == NULL || job->request_len != sizeof(struct hiraid_bsg_request))
+		return false;
+
+	switch (bsg_req->msgcode) {
+	case HIRAID_BSG_ADMIN:
+		timeout = msecs_to_jiffies(bsg_req->admcmd.timeout_ms);
+		break;
+	case HIRAID_BSG_IOPTHRU:
+		timeout = msecs_to_jiffies(bsg_req->pthrucmd.timeout_ms);
+		break;
+	default:
+		dev_info(hdev->dev, "bsg unsupport msgcode[%d]\n", bsg_req->msgcode);
+		return false;
+	}
+
+	if ((timeout + CTL_RST_TIME) > rq->timeout) {
+		dev_err(hdev->dev, "bsg invalid time\n");
+		return false;
+	}
+
+	return true;
+}
+
+/* bsg dispatch user command */
+static int hiraid_bsg_dispatch(struct bsg_job *job)
+{
+	struct Scsi_Host *shost = dev_to_shost(job->dev);
+	struct hiraid_dev *hdev = shost_priv(shost);
+	struct request *rq = blk_mq_rq_from_pdu(job);
+	struct hiraid_bsg_request *bsg_req = job->request;
+	int ret = -ENOMSG;
+
+	job->reply_len = 0;
+
+	if (!hiraid_bsg_is_valid(job)) {
+		bsg_job_done(job, ret, 0);
+		return 0;
+	}
+
+	dev_log_dbg(hdev->dev, "bsg msgcode[%d] msglen[%d] timeout[%d];"
+		"reqnsge[%d], reqlen[%d]\n",
+		bsg_req->msgcode, job->request_len, rq->timeout,
+		job->request_payload.sg_cnt, job->request_payload.payload_len);
+
+	switch (bsg_req->msgcode) {
+	case HIRAID_BSG_ADMIN:
+		ret = hiraid_user_send_admcmd(hdev, job);
+		break;
+	case HIRAID_BSG_IOPTHRU:
+		ret = hiraid_user_send_ptcmd(hdev, job);
+		break;
+	default:
+		break;
+	}
+
+	if (ret > 0)
+		ret = ret | (ret << 8);
+
+	bsg_job_done(job, ret, 0);
+	return 0;
+}
+
+static inline void hiraid_unregist_bsg(struct hiraid_dev *hdev)
+{
+	bsg_remove_queue(hdev->bsg_queue);
+}
+static int hiraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct hiraid_dev *hdev;
+	struct Scsi_Host *shost;
+	int node, ret;
+	char bsg_name[15];
+
+	shost = scsi_host_alloc(&hiraid_driver_template, sizeof(*hdev));
+	if (!shost) {
+		dev_err(&pdev->dev, "failed to allocate scsi host\n");
+		return -ENOMEM;
+	}
+	hdev = shost_priv(shost);
+	hdev->pdev = pdev;
+	hdev->dev = get_device(&pdev->dev);
+
+	node = dev_to_node(hdev->dev);
+	if (node == NUMA_NO_NODE) {
+		node = first_memory_node;
+		set_dev_node(hdev->dev, node);
+	}
+	hdev->numa_node = node;
+	hdev->shost = shost;
+	hdev->instance = shost->host_no;
+	pci_set_drvdata(pdev, hdev);
+
+	ret = hiraid_dev_map(hdev);
+	if (ret)
+		goto put_dev;
+
+	init_rwsem(&hdev->dev_rwsem);
+	INIT_WORK(&hdev->scan_work, hiraid_scan_work);
+	INIT_WORK(&hdev->timesyn_work, hiraid_timesyn_work);
+	INIT_WORK(&hdev->reset_work, hiraid_reset_work);
+	INIT_WORK(&hdev->fwact_work, hiraid_fwactive_work);
+	spin_lock_init(&hdev->state_lock);
+
+	ret = hiraid_alloc_resources(hdev);
+	if (ret)
+		goto dev_unmap;
+
+	ret = hiraid_pci_enable(hdev);
+	if (ret)
+		goto resources_free;
+
+	ret = hiraid_setup_admin_queue(hdev);
+	if (ret)
+		goto pci_disable;
+
+	ret = hiraid_init_control_info(hdev);
+	if (ret)
+		goto disable_admin_q;
+
+	ret = hiraid_setup_io_queues(hdev);
+	if (ret)
+		goto disable_admin_q;
+
+	hiraid_shost_init(hdev);
+
+	ret = scsi_add_host(hdev->shost, hdev->dev);
+	if (ret) {
+		dev_err(hdev->dev, "add shost to system failed, ret[%d]\n", ret);
+		goto remove_io_queues;
+	}
+
+	snprintf(bsg_name, sizeof(bsg_name), "hiraid%d", shost->host_no);
+	hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name, hiraid_bsg_dispatch,
+					  NULL, hiraid_get_max_cmd_size(hdev));
+	if (IS_ERR(hdev->bsg_queue)) {
+		dev_err(hdev->dev, "err, setup bsg failed\n");
+		hdev->bsg_queue = NULL;
+		goto remove_io_queues;
+	}
+
+	if (hdev->online_queues == HIRAID_ADMIN_QUEUE_NUM) {
+		dev_warn(hdev->dev, "warn: only admin queue can be used\n");
+		return 0;
+	}
+
+	hdev->state = DEV_LIVE;
+
+	hiraid_init_async_event(hdev);
+
+	ret = hiraid_dev_list_init(hdev);
+	if (ret)
+		goto unregist_bsg;
+
+	ret = hiraid_configure_timestamp(hdev);
+	if (ret)
+		dev_warn(hdev->dev, "time synchronization failed\n");
+
+	ret = hiraid_alloc_io_ptcmds(hdev);
+	if (ret)
+		goto unregist_bsg;
+
+	scsi_scan_host(hdev->shost);
+
+	return 0;
+
+unregist_bsg:
+	hiraid_unregist_bsg(hdev);
+remove_io_queues:
+	hiraid_delete_io_queues(hdev);
+disable_admin_q:
+	hiraid_free_sense_buffer(hdev);
+	hiraid_disable_admin_queue(hdev, false);
+pci_disable:
+	hiraid_free_all_queues(hdev);
+	hiraid_pci_disable(hdev);
+resources_free:
+	hiraid_free_resources(hdev);
+dev_unmap:
+	hiraid_dev_unmap(hdev);
+put_dev:
+	put_device(hdev->dev);
+	scsi_host_put(shost);
+
+	return -ENODEV;
+}
+
+static void hiraid_remove(struct pci_dev *pdev)
+{
+	struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+	struct Scsi_Host *shost = hdev->shost;
+
+	dev_info(hdev->dev, "enter hiraid remove\n");
+
+	hiraid_dev_state_trans(hdev, DEV_DELETING);
+	flush_work(&hdev->reset_work);
+
+	if (!pci_device_is_present(pdev))
+		hiraid_flush_running_cmds(hdev);
+
+	hiraid_unregist_bsg(hdev);
+	scsi_remove_host(shost);
+	hiraid_free_io_ptcmds(hdev);
+	kfree(hdev->dev_info);
+	hiraid_delete_io_queues(hdev);
+	hiraid_free_sense_buffer(hdev);
+	hiraid_disable_admin_queue(hdev, false);
+	hiraid_free_all_queues(hdev);
+	hiraid_pci_disable(hdev);
+	hiraid_free_resources(hdev);
+	hiraid_dev_unmap(hdev);
+	put_device(hdev->dev);
+	scsi_host_put(shost);
+
+	dev_info(hdev->dev, "exit hiraid remove\n");
+}
+
+static const struct pci_device_id hiraid_hw_card_ids[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, HIRAID_SERVER_DEVICE_HBA_DID) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, HIRAID_SERVER_DEVICE_HBAS_DID) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, HIRAID_SERVER_DEVICE_RAID_DID) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, HIRAID_SERVER_DEVICE_RAIDS_DID) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, hiraid_hw_card_ids);
+
+static struct pci_driver hiraid_driver = {
+	.name		= "hiraid",
+	.id_table	= hiraid_hw_card_ids,
+	.probe		= hiraid_probe,
+	.remove		= hiraid_remove,
+	.shutdown	= hiraid_shutdown,
+	.err_handler	= &hiraid_err_handler,
+};
+
+static int __init hiraid_init(void)
+{
+	int ret;
+
+	work_queue = alloc_workqueue("hiraid-wq", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+	if (!work_queue)
+		return -ENOMEM;
+
+	hiraid_class = class_create("hiraid");
+	if (IS_ERR(hiraid_class)) {
+		ret = PTR_ERR(hiraid_class);
+		goto destroy_wq;
+	}
+
+	ret = pci_register_driver(&hiraid_driver);
+	if (ret < 0)
+		goto destroy_class;
+
+	return 0;
+
+destroy_class:
+	class_destroy(hiraid_class);
+destroy_wq:
+	destroy_workqueue(work_queue);
+
+	return ret;
+}
+
+static void __exit hiraid_exit(void)
+{
+	pci_unregister_driver(&hiraid_driver);
+	class_destroy(hiraid_class);
+	destroy_workqueue(work_queue);
+}
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION("Huawei RAID driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(HIRAID_DRV_VERSION);
+module_init(hiraid_init);
+module_exit(hiraid_exit);
-- 
2.22.0.windows.1
                    
                  
                  
                          
                            
                            2
                            
                          
                          
                            
                            1
                            
                          
                          
                            
    
                          
                        
                    