This patchset fixes several bugs in drivers and kernel. Also backport serveral CVE fixes from mainline.
Eric Dumazet (1): net: silence KCSAN warnings around sk_add_backlog() calls
Ganapathi Bhat (1): mwifiex: fix possible heap overflow in mwifiex_process_country_ie()
Gao Chuan (1): hisi_sas: Solve the bug of hisi sas tried to access other's interrupt.
Hongbo Yao (1): Revert "vfio: relieve mmap_sem reader cacheline bouncing by holding it longer"
Junxin Chen (2): net: hns3: fix bug when config of HNS3 is y net: hns3: fix bugs found by codereview
Liu Yanshi (1): pcie: hisi pcie add saftly check
Mike Christie (1): nbd: fix shutdown and recv work deadlock v2
Shaozhengchao (1): net/hinic: rectify issue
Wei Li (1): sched/debug: Reset watchdog on all CPUs while processing sysrq-t
Xiongfeng Wang (1): PCI: fix the wrong class type for HiSilicon NP 5896 device
YueHaibing (1): dccp: Fix memleak in __feat_register_sp
Zhang Wei (1): acc: fixup security problem and modify reset process
fengsheng (2): drivers : localbus fixup some parameter dont check the legitimacy drivers : sysctl fixup some parameter dont check the legitimacy
wanglin (2): RDMA/hns: Optimize roce code by review advice RDMA/hns: modify roce dfx code by review advice
yangerkun (1): ext4: stop IO for page without buffer_head
Zhang Wei (1): fixup sec2 free c_key but not cleanup it
zhangwei (1): hpre: fixup security check review problem
drivers/block/nbd.c | 6 +- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 22 +- drivers/crypto/hisilicon/hpre/hpre_main.c | 157 ++++++----- drivers/crypto/hisilicon/qm.c | 205 +++++++++++++-- drivers/crypto/hisilicon/qm.h | 23 +- drivers/crypto/hisilicon/rde/rde.h | 25 +- drivers/crypto/hisilicon/rde/rde_api.c | 244 +++++++++--------- drivers/crypto/hisilicon/rde/rde_api.h | 2 +- drivers/crypto/hisilicon/rde/rde_data.c | 28 +- drivers/crypto/hisilicon/rde/rde_main.c | 183 +++++++------ drivers/crypto/hisilicon/sec/sec_algs.c | 13 +- drivers/crypto/hisilicon/sec2/sec.h | 10 +- drivers/crypto/hisilicon/sec2/sec_crypto.c | 43 +-- drivers/crypto/hisilicon/sec2/sec_crypto.h | 10 +- drivers/crypto/hisilicon/sec2/sec_main.c | 244 ++++++++++-------- drivers/crypto/hisilicon/sec2/sec_usr_if.h | 10 +- drivers/crypto/hisilicon/sgl.c | 6 +- drivers/crypto/hisilicon/zip/zip_crypto.c | 4 +- drivers/crypto/hisilicon/zip/zip_main.c | 190 +++++++++----- .../infiniband/hw/hns/hns_roce_hw_sysfs_v2.c | 6 +- .../hw/hns/roce-customer/rdfx_entry.c | 24 +- .../hw/hns/roce-customer/rdfx_hw_v2.c | 58 +---- .../hw/hns/roce-customer/rdfx_main.c | 2 + .../hw/hns/roce-customer/rdfx_sysfs.c | 96 +------ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +- .../hns3/hns-customer/hns3_enet_it.c | 2 +- .../hns3/hns-customer/hns3pf/hclge_ext.c | 12 +- .../hisilicon/hns3/hns3_cae/hns3_cae_cmd.c | 2 +- .../hisilicon/hns3/hns3_cae/hns3_cae_fd.c | 2 +- .../hisilicon/hns3/hns3_cae/hns3_cae_init.c | 13 +- .../hisilicon/hns3/hns3_cae/hns3_cae_pkt.c | 39 ++- .../hisilicon/hns3/hns3_cae/hns3_cae_pkt.h | 3 + .../hisilicon/hns3/hns3_cae/hns3_cae_qinfo.c | 2 +- .../hns3/hns3_cae/hns3_cae_version.c | 4 +- .../hns3/hns3_cae/hns3_cae_version.h | 2 +- .../net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +- .../hisilicon/hns3/hns3pf/hclge_main.h | 2 +- .../hisilicon/hns3/hns3vf/hclgevf_main.h | 2 +- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 2 +- .../net/wireless/marvell/mwifiex/sta_ioctl.c | 13 +- .../hisi-pcie-customer/hisi_pcie_cae.c | 108 +++++++- drivers/pci/quirks.c | 2 +- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 2 +- drivers/soc/hisilicon/lbc/hs_lbc_pltfm.c | 12 +- drivers/soc/hisilicon/sysctl/sysctl_drv.c | 4 +- drivers/soc/hisilicon/sysctl/sysctl_pmbus.c | 47 +++- drivers/vfio/vfio_iommu_type1.c | 28 +- fs/ext4/inode.c | 26 ++ kernel/sched/debug.c | 11 +- net/core/sock.c | 2 +- net/dccp/feat.c | 7 +- net/ipv4/tcp_ipv4.c | 2 +- net/llc/llc_conn.c | 2 +- net/sctp/input.c | 4 +- net/tipc/socket.c | 6 +- net/x25/x25_dev.c | 2 +- 56 files changed, 1152 insertions(+), 828 deletions(-)
From: yangerkun yangerkun@huawei.com
hulk inclusion category: bugfix bugzilla: 27600 CVE: NA ---------------------------
dio_bio_complete will set page dirty without consider is there still buffer_head valid with this page. This will trigger some problem while ext4 try to writeback this page. For ext4, we fix it by skip writeback the page without buffer_head.
[1] https://lwn.net/Articles/774411/ : "DMA and get_user_pages()" [2] https://lwn.net/Articles/753027/ : "The Trouble with get_user_pages()" [3] https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- fs/ext4/inode.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5abbb915f4e3..be8e89720735 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2075,6 +2075,20 @@ static int __ext4_journalled_writepage(struct page *page, return ret; }
+static void cancel_page_dirty_status(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + unsigned long flags; + + cancel_dirty_page(page); + xa_lock_irqsave(&mapping->i_pages, flags); + radix_tree_tag_clear(&mapping->i_pages, page_index(page), + PAGECACHE_TAG_DIRTY); + radix_tree_tag_clear(&mapping->i_pages, page_index(page), + PAGECACHE_TAG_TOWRITE); + xa_unlock_irqrestore(&mapping->i_pages, flags); +} + /* * Note that we don't need to start a transaction unless we're journaling data * because we should have holes filled from ext4_page_mkwrite(). We even don't @@ -2133,6 +2147,12 @@ static int ext4_writepage(struct page *page, return -EIO; }
+ if (WARN_ON(!page_has_buffers(page))) { + cancel_page_dirty_status(page); + unlock_page(page); + return 0; + } + trace_ext4_writepage(page); size = i_size_read(inode); if (page->index == size >> PAGE_SHIFT) @@ -2686,6 +2706,12 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) continue; }
+ if (WARN_ON(!page_has_buffers(page))) { + cancel_page_dirty_status(page); + unlock_page(page); + continue; + } + wait_on_page_writeback(page); BUG_ON(PageWriteback(page));
From: Eric Dumazet edumazet@google.com
mainline inclusion from mainline-5.4-rc4 commit 8265792bf887 category: bugfix bugzilla: 24071 CVE: NA
-------------------------------------------------
sk_add_backlog() callers usually read sk->sk_rcvbuf without owning the socket lock. This means sk_rcvbuf value can be changed by other cpus, and KCSAN complains.
Add READ_ONCE() annotations to document the lockless nature of these reads.
Note that writes over sk_rcvbuf should also use WRITE_ONCE(), but this will be done in separate patches to ease stable backports (if we decide this is relevant for stable trees).
BUG: KCSAN: data-race in tcp_add_backlog / tcp_recvmsg
write to 0xffff88812ab369f8 of 8 bytes by interrupt on cpu 1: __sk_add_backlog include/net/sock.h:902 [inline] sk_add_backlog include/net/sock.h:933 [inline] tcp_add_backlog+0x45a/0xcc0 net/ipv4/tcp_ipv4.c:1737 tcp_v4_rcv+0x1aba/0x1bf0 net/ipv4/tcp_ipv4.c:1925 ip_protocol_deliver_rcu+0x51/0x470 net/ipv4/ip_input.c:204 ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252 dst_input include/net/dst.h:442 [inline] ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523 __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5004 __netif_receive_skb+0x37/0xf0 net/core/dev.c:5118 netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5208 napi_skb_finish net/core/dev.c:5671 [inline] napi_gro_receive+0x28f/0x330 net/core/dev.c:5704 receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061 virtnet_receive drivers/net/virtio_net.c:1323 [inline] virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428 napi_poll net/core/dev.c:6352 [inline] net_rx_action+0x3ae/0xa50 net/core/dev.c:6418
read to 0xffff88812ab369f8 of 8 bytes by task 7271 on cpu 0: tcp_recvmsg+0x470/0x1a30 net/ipv4/tcp.c:2047 inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838 sock_recvmsg_nosec net/socket.c:871 [inline] sock_recvmsg net/socket.c:889 [inline] sock_recvmsg+0x92/0xb0 net/socket.c:885 sock_read_iter+0x15f/0x1e0 net/socket.c:967 call_read_iter include/linux/fs.h:1864 [inline] new_sync_read+0x389/0x4f0 fs/read_write.c:414 __vfs_read+0xb1/0xc0 fs/read_write.c:427 vfs_read fs/read_write.c:461 [inline] vfs_read+0x143/0x2c0 fs/read_write.c:446 ksys_read+0xd5/0x1b0 fs/read_write.c:587 __do_sys_read fs/read_write.c:597 [inline] __se_sys_read fs/read_write.c:595 [inline] __x64_sys_read+0x4c/0x60 fs/read_write.c:595 do_syscall_64+0xcf/0x2f0 arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x44/0xa9
Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 7271 Comm: syz-fuzzer Not tainted 5.3.0+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Signed-off-by: Eric Dumazet edumazet@google.com Reported-by: syzbot syzkaller@googlegroups.com Signed-off-by: Jakub Kicinski jakub.kicinski@netronome.com Signed-off-by: Huang Guobin huangguobin4@huawei.com Reviewed-by: Wenan Mao maowenan@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- net/core/sock.c | 2 +- net/ipv4/tcp_ipv4.c | 2 +- net/llc/llc_conn.c | 2 +- net/sctp/input.c | 4 ++-- net/tipc/socket.c | 6 +++--- net/x25/x25_dev.c | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/net/core/sock.c b/net/core/sock.c index 935d9a8f12c2..f0b465789041 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -473,7 +473,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); - } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { + } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { bh_unlock_sock(sk); atomic_inc(&sk->sk_drops); goto discard_and_relse; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c5d38eec7296..0830a3f75f9b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1618,7 +1618,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { - u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf; + u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf); struct skb_shared_info *shinfo; const struct tcphdr *th; struct tcphdr *thtail; diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index ed2aca12460c..f114e12635ba 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -832,7 +832,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) else { dprintk("%s: adding to backlog...\n", __func__); llc_set_backlog_type(skb, LLC_PACKET); - if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) + if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) goto drop_unlock; } out: diff --git a/net/sctp/input.c b/net/sctp/input.c index bfe29158afcc..81bb91118cee 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -334,7 +334,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { - if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) + if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) sctp_chunk_free(chunk); else backloged = 1; @@ -370,7 +370,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) struct sctp_ep_common *rcvr = chunk->rcvr; int ret;
- ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf); + ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); if (!ret) { /* Hold the assoc/ep while hanging on the backlog queue. * This way, we know structures we need will not disappear diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 5841d62ff580..b206a5dfd8dc 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2097,13 +2097,13 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) struct tipc_msg *hdr = buf_msg(skb);
if (unlikely(msg_in_group(hdr))) - return sk->sk_rcvbuf; + return READ_ONCE(sk->sk_rcvbuf);
if (unlikely(!msg_connected(hdr))) - return sk->sk_rcvbuf << msg_importance(hdr); + return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) - return sk->sk_rcvbuf; + return READ_ONCE(sk->sk_rcvbuf);
return FLOWCTL_MSG_LIM; } diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 39231237e1c3..1763e19817fc 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -60,7 +60,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) if (!sock_owned_by_user(sk)) { queued = x25_process_rx_frame(sk, skb); } else { - queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); + queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); } bh_unlock_sock(sk); sock_put(sk);
From: Liu Yanshi liuyanshi@huawei.com
driver inclusion category: feature bugzilla: NA CVE: NA
add check method at pcie_reg_mmap about pcie mmap size and chip_id; add support pcie nvme and virtio mmap;
Signed-off-by: Liu Yanshi liuyanshi@huawei.com Reviewed-by: kang fenglong kangfenglong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../hisi-pcie-customer/hisi_pcie_cae.c | 108 ++++++++++++++++-- 1 file changed, 99 insertions(+), 9 deletions(-)
diff --git a/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c b/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c index 66113e816f7b..8d0c8012cf85 100644 --- a/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c +++ b/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c @@ -5,12 +5,40 @@ #include <linux/miscdevice.h> #include <linux/device.h> #include <linux/module.h> +#include <linux/io.h>
-#define CHIP_OFFSET 0x200000000000UL +#define CHIP_OFFSET 0x200000000000UL #define APB_SUBCTRL_BASE 0x148070000UL +#define NVME_BAR_BASE 0x148800000UL +#define VIRTIO_BAR_BASE 0x148a00000UL +#define CHIP_MMAP_MASK 0xf +#define TYPE_MMAP_MASK 0xf0 +#define SYSCTRL_SC_ECO_RSV1 0x9401ff04 +#define PCIE_REG_SIZE 0X390000UL +#define NVME_BAR_SIZE 0x200000UL +#define VIRTIO_BAR_SIZE 0x200000UL +#define MAX_CHIP_NUM 4 +#define CHIP_INFO_REG_SIZE 4 +#define TYPE_SHIFT 4 +#define BIT_SHIFT_8 8
#define DEVICE_NAME "pcie_reg_dev"
+enum chip_type_t { + CHIP1620 = 0x13, + CHIP1620s = 0x12, + CHIP1601 = 0x10, + CHIPNONE = 0x0, +}; + +enum { + MMAP_TYPE_APB, + MMAP_TYPE_NVME, + MMAP_TYPE_VIRTIO +}; + +static int current_chip_nums; + static const struct vm_operations_struct mmap_pcie_mem_ops = { #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys @@ -20,31 +48,94 @@ static const struct vm_operations_struct mmap_pcie_mem_ops = { static int pcie_reg_mmap(struct file *filep, struct vm_area_struct *vma) { u64 size = vma->vm_end - vma->vm_start; - u32 chip_id = (u32)vma->vm_pgoff; + u32 chip_id = (u32)vma->vm_pgoff & CHIP_MMAP_MASK; + u32 type = ((u32)vma->vm_pgoff & TYPE_MMAP_MASK) >> TYPE_SHIFT; u64 phy_addr;
- pr_info("[PCIe Base] tools map chipid:%d\n", chip_id); - phy_addr = APB_SUBCTRL_BASE + CHIP_OFFSET * chip_id; + if (chip_id >= current_chip_nums) { + pr_info("[PCIe Base] input chip_id %u is invalid\n", chip_id); + return -EINVAL; + } + /* It's illegal to wrap around the end of the physical address space. */ + switch (type) { + case MMAP_TYPE_APB: + phy_addr = APB_SUBCTRL_BASE + CHIP_OFFSET * chip_id; + if (size > PCIE_REG_SIZE) { + pr_info("[PCIe Base] mmap_type_apb map size is invalid\n"); + return -EINVAL; + } + break; + case MMAP_TYPE_NVME: + phy_addr = NVME_BAR_BASE + CHIP_OFFSET * chip_id; + if (size > NVME_BAR_SIZE) { + pr_info("[PCIe Base] mmap_type_nvme map size is invalid\n"); + return -EINVAL; + } + break; + case MMAP_TYPE_VIRTIO: + phy_addr = VIRTIO_BAR_BASE + CHIP_OFFSET * chip_id; + if (size > VIRTIO_BAR_SIZE) { + pr_info("[PCIe Base] mmap_type_virtio map size is invalid\n"); + return -EINVAL; + } + break; + default: + pr_info("[PCIe Base] input addr type %u is invalid\n", type); + return -EINVAL; + } vma->vm_pgoff = phy_addr >> PAGE_SHIFT; - vma->vm_page_prot = pgprot_device(vma->vm_page_prot); - vma->vm_ops = &mmap_pcie_mem_ops; - /* Remap-pfn-range will mark the range VM_IO */ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, vma->vm_page_prot)) { + pr_info("[PCIe Base] map pcie reg zone failed\n"); return -EAGAIN; } + return 0; }
+int pcie_get_chipnums(u32 cpu_info) +{ + int i; + int chip_count = 0; + u32 chip_i_info; + + for (i = 0; i < MAX_CHIP_NUM; i++) { + chip_i_info = ((cpu_info & (0xFF << (BIT_SHIFT_8 * i))) >> + (BIT_SHIFT_8 * i)); + if ((chip_i_info == CHIP1620) || + (chip_i_info == CHIP1620s) || + (chip_i_info == CHIP1601)) { + chip_count++; + } + } + + return chip_count; +} + static int pcie_open(struct inode *inode, struct file *f) { + void __iomem *addr_base; + u32 val; + + addr_base = ioremap_nocache(SYSCTRL_SC_ECO_RSV1, CHIP_INFO_REG_SIZE); + if (!addr_base) { + pr_info("[PCIe Base] map chip_info_reg zone failed\n"); + return -EPERM; + } + + val = readl(addr_base); + current_chip_nums = pcie_get_chipnums(val); + + iounmap(addr_base); + addr_base = NULL; + return 0; }
@@ -82,5 +173,4 @@ module_exit(misc_dev_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huawei Technology Company"); MODULE_DESCRIPTION("PCIe DFX TOOL"); -MODULE_VERSION("V1.0"); - +MODULE_VERSION("V1.1");
From: Zhang Wei zhangwei375@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
Feature or Bugfix:Bugfix
Signed-off-by: Zhang Wei zhangwei375@huawei.com Reviewed-by: liujunxian liujunxian3@huawei.com Reviewed-by: wangyuan wangyuan46@huawei.com Reviewed-by: hucheng hucheng.hu@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 4467dd277985..4164c05f2d18 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -529,6 +529,7 @@ static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm) hrtimer_cancel(&ctx->timer);
if (c_ctx->c_key) { + memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key, c_ctx->c_key_dma); c_ctx->c_key = NULL;
From: Ganapathi Bhat gbhat@marvell.com
mainline inclusion from mainline-v5.5-rc3 commit 3d94a4a8373bf5f45cf5f939e88b8354dbf2311b category: bugfix bugzilla: 13690 CVE: CVE-2019-14895
-------------------------------------------------
mwifiex_process_country_ie() function parse elements of bss descriptor in beacon packet. When processing WLAN_EID_COUNTRY element, there is no upper limit check for country_ie_len before calling memcpy. The destination buffer domain_info->triplet is an array of length MWIFIEX_MAX_TRIPLET_802_11D(83). The remote attacker can build a fake AP with the same ssid as real AP, and send malicous beacon packet with long WLAN_EID_COUNTRY elemen (country_ie_len > 83). Attacker can force STA connect to fake AP on a different channel. When the victim STA connects to fake AP, will trigger the heap buffer overflow. Fix this by checking for length and if found invalid, don not connect to the AP.
This fix addresses CVE-2019-14895.
Reported-by: huangwen huangwenabc@gmail.com Signed-off-by: Ganapathi Bhat gbhat@marvell.com Signed-off-by: Kalle Valo kvalo@codeaurora.org Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: yangerkun yangerkun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 74e50566db1f..6dd835f1efc2 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -229,6 +229,14 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, "11D: skip setting domain info in FW\n"); return 0; } + + if (country_ie_len > + (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { + mwifiex_dbg(priv->adapter, ERROR, + "11D: country_ie_len overflow!, deauth AP\n"); + return -EINVAL; + } + memcpy(priv->adapter->country_code, &country_ie[2], 2);
domain_info->country_code[0] = country_ie[2]; @@ -272,8 +280,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, priv->scan_block = false;
if (bss) { - if (adapter->region_code == 0x00) - mwifiex_process_country_ie(priv, bss); + if (adapter->region_code == 0x00 && + mwifiex_process_country_ie(priv, bss)) + return -EINVAL;
/* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
From: Mike Christie mchristi@redhat.com
mainline inclusion from mainline-v5.5-rc3 commit 1c05839aa973cfae8c3db964a21f9c0eef8fcc21 category: bugfix bugzilla: 24798 CVE: NA
------------------------------------------------- This fixes a regression added with:
commit e9e006f5fcf2bab59149cb38a48a4817c1b538b4 Author: Mike Christie mchristi@redhat.com Date: Sun Aug 4 14:10:06 2019 -0500
nbd: fix max number of supported devs
where we can deadlock during device shutdown. The problem occurs if the recv_work's nbd_config_put occurs after nbd_start_device_ioctl has returned and the userspace app has droppped its reference via closing the device and running nbd_release. The recv_work nbd_config_put call would then drop the refcount to zero and try to destroy the config which would try to do destroy_workqueue from the recv work.
This patch just has nbd_start_device_ioctl do a flush_workqueue when it wakes so we know after the ioctl returns running works have exited. This also fixes a possible race where we could try to reuse the device while old recv_works are still running.
Cc: stable@vger.kernel.org Fixes: e9e006f5fcf2 ("nbd: fix max number of supported devs") Signed-off-by: Mike Christie mchristi@redhat.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Sun Ke sunke32@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/block/nbd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 996b1ef5f076..b9d321bdaa8a 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1247,10 +1247,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b mutex_unlock(&nbd->config_lock); ret = wait_event_interruptible(config->recv_wq, atomic_read(&config->recv_threads) == 0); - if (ret) { + if (ret) sock_shutdown(nbd); - flush_workqueue(nbd->recv_workq); - } + flush_workqueue(nbd->recv_workq); + mutex_lock(&nbd->config_lock); nbd_bdev_reset(bdev); /* user requested, ignore socket errors */
From: Junxin Chen chenjunxin1@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
----------------------------------
THIS_MODULE is a pointer that need to be inited by insmod or modprobe, which may cause access NULL pointer when kernel config is y.
This patch remove THIS_MODULE to avoid it, and change version to 1.9.29.0.
Signed-off-by: Junxin Chen chenjunxin1@huawei.com Reviewed-by: Zhong Zhaohui zhongzhaohui@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +- .../net/ethernet/hisilicon/hns3/hns-customer/hns3_enet_it.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c | 2 +- .../net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.c | 4 ++-- .../net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 739a0266e8fd..bcd8c183d2b4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -30,7 +30,7 @@ #include <linux/pci.h> #include <linux/types.h>
-#define HNAE3_MOD_VERSION "1.0" +#define HNAE3_MOD_VERSION "1.9.29.0"
#define HNAE3_MIN_VECTOR_NUM 2 /* one for msi-x, another for IO */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns-customer/hns3_enet_it.c b/drivers/net/ethernet/hisilicon/hns3/hns-customer/hns3_enet_it.c index b98729512ded..a6a724da0548 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns-customer/hns3_enet_it.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns-customer/hns3_enet_it.c @@ -51,7 +51,7 @@ static int __init hns3_init_module_it(void) pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
- strncpy(hns3_driver_version, THIS_MODULE->version, + strncpy(hns3_driver_version, HNS3_MOD_VERSION, strlen(hns3_driver_version));
client.type = HNAE3_CLIENT_KNIC; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c index 0c5db2241ce4..cb61e673cba3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c @@ -563,5 +563,5 @@ static void __exit hns3_cae_exit(void) module_init(hns3_cae_init); module_exit(hns3_cae_exit); MODULE_DESCRIPTION("HNS3 CAE Driver"); -MODULE_VERSION(HNAE_DRIVER_VERSION); +MODULE_VERSION(HNS3_CAE_MOD_VERSION); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.c index b55eaf2011af..53e1354808fb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.c @@ -79,10 +79,10 @@ int hns3_cae_get_driver_ver(struct hns3_nic_priv *nic_dev, void *buf_in, u32 in_size, void *buf_out, u32 out_size) { - if (!buf_out || out_size < strlen(THIS_MODULE->version)) + if (!buf_out || out_size < strlen(HNS3_CAE_MOD_VERSION)) return -ENOMEM;
- strncpy(buf_out, THIS_MODULE->version, strlen(THIS_MODULE->version)); + strncpy(buf_out, HNS3_CAE_MOD_VERSION, strlen(HNS3_CAE_MOD_VERSION));
return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h index 3e0711d0494f..e2e96edcb820 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h @@ -4,7 +4,7 @@ #ifndef __HNS3_CAE_VERSION_H__ #define __HNS3_CAE_VERSION_H__
-#define HNAE_DRIVER_VERSION "1.9.25.2" +#define HNS3_CAE_MOD_VERSION "1.9.29.0"
#define CMT_ID_LEN 8 #define RESV_LEN 3 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index e9737ad84892..d09599a877af 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -8,7 +8,7 @@
#include "hnae3.h"
-#define HNS3_MOD_VERSION "1.9.26.0" +#define HNS3_MOD_VERSION "1.9.29.0"
extern char hns3_driver_version[];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index f6a8919bd398..0b5c13241335 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -12,7 +12,7 @@ #include "hclge_cmd.h" #include "hnae3.h"
-#define HCLGE_MOD_VERSION "1.9.26.0" +#define HCLGE_MOD_VERSION "1.9.29.0" #define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_MAX_PF_NUM 8 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 72dec4359bcf..1e963a35a76e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -10,7 +10,7 @@ #include "hclgevf_cmd.h" #include "hnae3.h"
-#define HCLGEVF_MOD_VERSION "1.0" +#define HCLGEVF_MOD_VERSION "1.9.29.0" #define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_MAX_VLAN_ID 4095
From: Shaozhengchao shaozhengchao@huawei.com
driver inclusion category:bugfix bugzilla:4472 CVE:NA
-----------------------------------------------------------------------
rectify issue
Signed-off-by: Shaozhengchao shaozhengchao@huawei.com Reviewed-by: Luoshaokai luoshaokai@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 9b584be590a5..c1ea1e6f7265 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -1123,7 +1123,7 @@ int hinic_setup_all_tx_resources(struct net_device *netdev)
init_txq_err: for (i = 0; i < q_id; i++) { - txq = &nic_dev->txqs[q_id]; + txq = &nic_dev->txqs[i]; kfree(txq->tx_info); }
From: Gao Chuan gaochuan4@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
Hisi sas tried to alloc 16 affinity interrupts. When cpu cores > 16, it will own all what it alloced. However when cpu cores < 16, such as cpu cores = 8, only 8 affinity interrupts could be alloced. Then hisi sas set "nvecs = 8" to record the number. When resetting hisi sas host, it tried to operate all it'self interrupts. it used "queue_count" instead of "nvecs" to find the interrupts. But the "queue_count" was 16 forever, which lead to try to operate other's interrupt finally.
Feature or Bugfix:Bugfix
Signed-off-by: Gao Chuan gaochuan4@huawei.com Reviewed-by: zhouyupeng zhouyupeng1@huawei.com Reviewed-by: luojian luojian5@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 23bf4750f358..acf2fc667ac0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2691,7 +2691,7 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) synchronize_irq(pci_irq_vector(pdev, PCI_IRQ_PHY)); synchronize_irq(pci_irq_vector(pdev, PCI_IRQ_CHANNEL)); synchronize_irq(pci_irq_vector(pdev, PCI_IRQ_AXI_FATAL)); - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->nvecs; i++) { hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); synchronize_irq(pci_irq_vector(pdev, i + PCI_IRQ_CQ_BASE)); }
From: Wei Li liwei391@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
Lengthy output of sysrq-t may take a lot of time on slow serial console with lots of processes and CPUs.
So we need to reset NMI-watchdog to avoid spurious lockup messages, and we also reset softlockup watchdogs on all other CPUs since another CPU might be blocked waiting for us to process an IPI or stop_machine.
Signed-off-by: Wei Li liwei391@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/sched/debug.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index f664a7c2ea3e..c542d84dafce 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -775,9 +775,16 @@ void sysrq_sched_debug_show(void) int cpu;
sched_debug_header(NULL); - for_each_online_cpu(cpu) + for_each_online_cpu(cpu) { + /* + * Need to reset softlockup watchdogs on all CPUs, because + * another CPU might be blocked waiting for us to process + * an IPI or stop_machine. + */ + touch_nmi_watchdog(); + touch_all_softlockup_watchdogs(); print_cpu(NULL, cpu); - + } }
static void print_cpu_tidy(struct seq_file *m, int cpu)
From: fengsheng fengsheng5@huawei.com
driver inclusion category: Bugfix bugzilla: NA CVE: NA
1. fixup lbc_read8 and lbc_read8_nolock dont determine if the value is empty.
Signed-off-by: fengsheng fengsheng5@huawei.com Reviewed-by: zhangmu zhangmu1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/soc/hisilicon/lbc/hs_lbc_pltfm.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/drivers/soc/hisilicon/lbc/hs_lbc_pltfm.c b/drivers/soc/hisilicon/lbc/hs_lbc_pltfm.c index 47f4c9fc5ade..2b3543bb5686 100644 --- a/drivers/soc/hisilicon/lbc/hs_lbc_pltfm.c +++ b/drivers/soc/hisilicon/lbc/hs_lbc_pltfm.c @@ -29,7 +29,7 @@
#include "hs_lbc_pltfm.h"
-#define LBC_DRIVER_VERSION "1.8.15.0" +#define LBC_DRIVER_VERSION "1.9.30.0"
struct hisi_lbc_dev g_lbc_dev = {0};
@@ -213,6 +213,11 @@ int lbc_read8(unsigned int index, unsigned int offset, unsigned char *value) return -EINVAL; }
+ if (!value) { + pr_err("value is null\n"); + return -EINVAL; + } + *value = (unsigned char)lbc_read(index, offset, LBC_RWDATA_WIDTH_8);
return 0; @@ -227,6 +232,11 @@ int lbc_read8_nolock(unsigned int index, unsigned int offset, unsigned char *val return -EINVAL; }
+ if (!value) { + pr_err("value is null\n"); + return -EINVAL; + } + *value = (unsigned char)lbc_read_unlock(index, offset, LBC_RWDATA_WIDTH_8); return 0; }
From: fengsheng fengsheng5@huawei.com
driver inclusion category: Bugfix bugzilla: NA CVE: NA
1. the param of sysctl_cpu_voltage_adjust: value add legitimacy check. 2. the param of pmbus: slave_addr add legitimacy check.
Signed-off-by: fengsheng fengsheng5@huawei.com Reviewed-by: zhangmu zhangmu1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/soc/hisilicon/sysctl/sysctl_drv.c | 4 +- drivers/soc/hisilicon/sysctl/sysctl_pmbus.c | 47 +++++++++++++++------ 2 files changed, 37 insertions(+), 14 deletions(-)
diff --git a/drivers/soc/hisilicon/sysctl/sysctl_drv.c b/drivers/soc/hisilicon/sysctl/sysctl_drv.c index 75d75228ce1f..3a45b0e4307c 100644 --- a/drivers/soc/hisilicon/sysctl/sysctl_drv.c +++ b/drivers/soc/hisilicon/sysctl/sysctl_drv.c @@ -48,7 +48,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DEBUG
-#define SYSCTL_DRIVER_VERSION "1.8.15.2" +#define SYSCTL_DRIVER_VERSION "1.9.31.0"
unsigned int g_sysctrl_debug;
@@ -816,7 +816,7 @@ EXPORT_SYMBOL(hip_sysctrl_remove); module_init(his_sysctrl_init); module_exit(his_sysctrl_exit);
-MODULE_DESCRIPTION("sysctrl for hisillicon platform"); +MODULE_DESCRIPTION("sysctrl for hisilicon platform"); MODULE_VERSION(SYSCTL_DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:hip-sysctl"); diff --git a/drivers/soc/hisilicon/sysctl/sysctl_pmbus.c b/drivers/soc/hisilicon/sysctl/sysctl_pmbus.c index 3c6541b37f3d..2c13343b4016 100644 --- a/drivers/soc/hisilicon/sysctl/sysctl_pmbus.c +++ b/drivers/soc/hisilicon/sysctl/sysctl_pmbus.c @@ -19,6 +19,9 @@ #include "sysctl_drv.h" #include "sysctl_pmbus.h"
+#define SLAVE_ADDR_MAX (1 << 7) +#define CPU_VOL_MIN 500 + static void __iomem *g_sysctl_pmbus_base[CHIP_ID_NUM_MAX];
static void his_sysctrl_reg_rd(const void __iomem *addr, u32 reg, unsigned int *val) @@ -217,8 +220,9 @@ int sysctl_pmbus_cfg(u8 chip_id, u8 addr, u8 page, u32 slave_addr) { void __iomem *base = NULL;
- if (chip_id >= CHIP_ID_NUM_MAX) { - pr_err("[sysctl pmbus]read chip_id range[0x0-0x3]is err!\n"); + if ((chip_id >= CHIP_ID_NUM_MAX) || (slave_addr >= SLAVE_ADDR_MAX)) { + pr_err("[sysctl pmbus] cfg param err,chipid=0x%x,slave_addr=0x%x\n", + chip_id, slave_addr); return SYSCTL_ERR_PARAM; }
@@ -239,8 +243,11 @@ int sysctl_pmbus_write(u8 chip_id, u8 addr, u32 slave_addr, u32 data_len, u32 bu u32 temp_data = addr; void __iomem *base = NULL;
- if ((chip_id >= CHIP_ID_NUM_MAX) || (data_len > 0x4)) { - pr_err("[sysctl pmbus]write chip_id range[0x0-0x3] or data_len range[0x0-0x4] is err!\n"); + if ((chip_id >= CHIP_ID_NUM_MAX) || + (data_len > DATA_NUM_MAX) || + (slave_addr >= SLAVE_ADDR_MAX)) { + pr_err("[sysctl pmbus] write param err,chipid=0x%x,data_len=0x%x,slave_addr=0x%x!\n", + chip_id, data_len, slave_addr); return SYSCTL_ERR_PARAM; }
@@ -371,8 +378,10 @@ int sysctl_pmbus_read(u8 chip_id, u8 addr, u32 slave_addr, u32 data_len, u32 *bu
if ((chip_id >= CHIP_ID_NUM_MAX) || (data_len > DATA_NUM_MAX) || - (data_len == 0x0)) { - pr_err("[sysctl pmbus]read chip_id range[0x0-0x3] or data_len range[0x1-0x4] is err!\n"); + (data_len == 0x0) || + (slave_addr >= SLAVE_ADDR_MAX)) { + pr_err("[sysctl pmbus]read param err,chipid=0x%x,data_len=0x%x,slave_addr=0x%x!\n", + chip_id, data_len, slave_addr); return SYSCTL_ERR_PARAM; }
@@ -407,8 +416,9 @@ int sysctl_cpu_voltage_password_cfg(u8 chip_id, u32 slave_addr) { void __iomem *base = NULL;
- if (chip_id >= CHIP_ID_NUM_MAX) { - pr_err("[sysctl pmbus]read chip_id range[0x0-0x3]is err!\n"); + if ((chip_id >= CHIP_ID_NUM_MAX) || (slave_addr >= SLAVE_ADDR_MAX)) { + pr_err("[sysctl pmbus] voltage_password_cfg param err,chipid=0x%x,slave_addr=0x%x!\n", + chip_id, slave_addr); return SYSCTL_ERR_PARAM; }
@@ -422,7 +432,7 @@ int sysctl_cpu_voltage_password_cfg(u8 chip_id, u32 slave_addr) return 0; }
-static int hi_vrd_info_check_params(u8 chip_id, u8 page, u32 data_len) +static int hi_vrd_info_check_params(u8 chip_id, u8 page, u32 data_len, u32 slave_addr) { if (chip_id >= CHIP_ID_NUM_MAX) { pr_err("[sysctl pmbus] read chip_id range[0x0-0x3]is err!\n"); @@ -439,6 +449,11 @@ static int hi_vrd_info_check_params(u8 chip_id, u8 page, u32 data_len) return SYSCTL_ERR_PARAM; }
+ if (slave_addr >= SLAVE_ADDR_MAX) { + pr_err("[sysctl pmbus] vrd_info slave_addr=0x%x err!\n", slave_addr); + return SYSCTL_ERR_PARAM; + } + return SYSCTL_ERR_OK; }
@@ -447,7 +462,7 @@ int hi_vrd_info_get(u8 chip_id, u8 addr, u8 page, u32 slave_addr, u32 data_len, u32 retry_time = 0x10; u32 ret;
- ret = hi_vrd_info_check_params(chip_id, page, data_len); + ret = hi_vrd_info_check_params(chip_id, page, data_len, slave_addr); if (ret != SYSCTL_ERR_OK) return ret;
@@ -509,6 +524,11 @@ int sysctl_cpu_voltage_read(u8 chip_id, u8 loop, u32 slave_addr) return SYSCTL_ERR_PARAM; }
+ if (slave_addr >= SLAVE_ADDR_MAX) { + pr_err("[sysctl pmbus] cpu_voltage_read slave_addr=0x%x err!\n", slave_addr); + return SYSCTL_ERR_PARAM; + } + /* read voltage mode */ ret = hi_vrd_info_get(chip_id, 0x20, loop, slave_addr, 0x1, (u32 *)&vout_mode); if (ret) @@ -555,8 +575,11 @@ int sysctl_cpu_voltage_adjust (u8 chip_id, u8 loop, u32 slave_addr, u32 value) pmbus_vout_mode vout_mode; void __iomem *base = NULL;
- if (chip_id >= CHIP_ID_NUM_MAX) { - pr_err("[sysctl pmbus]read chip_id range[0x0-0x3]is err!\n"); + if ((chip_id >= CHIP_ID_NUM_MAX) || + (slave_addr >= SLAVE_ADDR_MAX) || + (value < CPU_VOL_MIN)) { + pr_err("[sysctl pmbus]cpu_voltage_adjust param err,chipid=0x%x,slave_addr=0x%x,value=0x%x!\n", + chip_id, slave_addr, value); return SYSCTL_ERR_PARAM; }
From: Zhang Wei zhangwei375@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
1. add ecc-mbit processing in reset process 2. fixup some codingstyle by code review 3. fixup by security problem 4. add qm_wait_task_complete before fail callback
Signed-off-by: Zhang Wei zhangwei375@huawei.com Reviewed-by: zhouguangwei zhouguangwei5@huawei.com Reviewed-by: hucheng hucheng.hu@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 15 +- drivers/crypto/hisilicon/hpre/hpre_main.c | 157 +++++++------ drivers/crypto/hisilicon/qm.c | 205 ++++++++++++++-- drivers/crypto/hisilicon/qm.h | 23 +- drivers/crypto/hisilicon/rde/rde.h | 25 +- drivers/crypto/hisilicon/rde/rde_api.c | 244 ++++++++++---------- drivers/crypto/hisilicon/rde/rde_api.h | 2 +- drivers/crypto/hisilicon/rde/rde_data.c | 28 +-- drivers/crypto/hisilicon/rde/rde_main.c | 183 ++++++++------- drivers/crypto/hisilicon/sec/sec_algs.c | 13 +- drivers/crypto/hisilicon/sec2/sec.h | 10 +- drivers/crypto/hisilicon/sec2/sec_crypto.c | 42 ++-- drivers/crypto/hisilicon/sec2/sec_crypto.h | 10 +- drivers/crypto/hisilicon/sec2/sec_main.c | 244 +++++++++++--------- drivers/crypto/hisilicon/sec2/sec_usr_if.h | 10 +- drivers/crypto/hisilicon/sgl.c | 6 +- drivers/crypto/hisilicon/zip/zip_crypto.c | 4 +- drivers/crypto/hisilicon/zip/zip_main.c | 190 +++++++++------ 18 files changed, 840 insertions(+), 571 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index e58bedeb006f..5ca82af73ed9 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -677,7 +677,7 @@ static int hpre_rsa_enc(struct akcipher_request *req) if (ret) return ret;
- msg->dw0 |= HPRE_ALG_NC_NCRT; + msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT); msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); @@ -807,10 +807,8 @@ static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { - ctx->rsa.pubkey = NULL; + if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) return -EINVAL; - }
memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
@@ -927,7 +925,7 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) }
if (ctx->rsa.prikey) { - memset(ctx->rsa.prikey, 0, ctx->key_sz); + memset(ctx->rsa.prikey, 0, ctx->key_sz << 1); dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey, ctx->rsa.dma_prikey); ctx->rsa.prikey = NULL; @@ -1039,6 +1037,7 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); + int ret;
ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0); if (IS_ERR(ctx->rsa.soft_tfm)) { @@ -1046,7 +1045,11 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(ctx->rsa.soft_tfm); }
- return hpre_ctx_init(ctx); + ret = hpre_ctx_init(ctx); + if (ret) + crypto_free_akcipher(ctx->rsa.soft_tfm); + + return ret; }
static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 9824d212e0e4..78c32047bd85 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -49,9 +49,9 @@ #define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410 -#define HPRE_HAC_RAS_CE_ENABLE 0x3f +#define HPRE_HAC_RAS_CE_ENABLE 0x1 #define HPRE_RAS_NFE_ENB 0x301414 -#define HPRE_HAC_RAS_NFE_ENABLE 0x3fffc0 +#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe #define HPRE_RAS_FE_ENB 0x301418 #define HPRE_HAC_RAS_FE_ENABLE 0
@@ -87,6 +87,7 @@ #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044 #define AM_OOO_SHUTDOWN_ENABLE BIT(0) #define AM_OOO_SHUTDOWN_DISABLE 0xFFFFFFFE +#define HPRE_WR_MSI_PORT 0xFFFB
#define HPRE_HW_ERROR_IRQ_ENABLE 1 #define HPRE_HW_ERROR_IRQ_DISABLE 0 @@ -94,6 +95,11 @@ #define HPRE_CORE_ECC_2BIT_ERR BIT(1) #define HPRE_OOO_ECC_2BIT_ERR BIT(5)
+#define HPRE_QM_BME_FLR BIT(7) +#define HPRE_QM_PM_FLR BIT(11) +#define HPRE_QM_SRIOV_FLR BIT(12) + +#define HPRE_USLEEP 10
/* function index: * 1 for hpre bypass mode, @@ -355,10 +361,6 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre) writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE)); writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
- /* disable FLR triggered by BME(bus master enable) */ - writel(PEH_AXUSER_CFG, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); - writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); - /* HPRE need more time, we close this interrupt */ val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK)); val |= BIT(HPRE_TIMEOUT_ABNML_BIT); @@ -399,6 +401,13 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre) if (ret) dev_err(dev, "acpi_evaluate_dsm err.\n");
+ /* disable FLR triggered by BME(bus master enable) */ + val = readl(hpre->qm.io_base + QM_PEH_AXUSER_CFG); + val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR); + val |= HPRE_QM_PM_FLR; + writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG)); + writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE)); + return ret; }
@@ -442,6 +451,9 @@ static void hpre_hw_error_enable(struct hpre *hpre) struct hisi_qm *qm = &hpre->qm; u32 val;
+ /* clear HPRE hw error source if having */ + writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); + /* enable hpre hw error interrupts */ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); @@ -476,7 +488,6 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) u32 num_vfs = hpre->num_vfs; u32 vfq_num, tmp;
- if (val > num_vfs) return -EINVAL;
@@ -700,7 +711,9 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug) int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) { - sprintf(buf, "cluster%d", i); + ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); + if (ret < 0) + return -EINVAL;
tmp_d = debugfs_create_dir(buf, debug->debug_root); if (!tmp_d) @@ -836,11 +849,46 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
static void hpre_hw_err_init(struct hpre *hpre) { - hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, QM_BASE_NFE, - 0, QM_DB_RANDOM_INVALID); + hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE, + QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, + 0, QM_DB_RANDOM_INVALID); hpre_hw_error_enable(hpre); }
+static void hpre_open_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + writel(val & AM_OOO_SHUTDOWN_DISABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); + writel(val | AM_OOO_SHUTDOWN_ENABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); +} + +static u32 hpre_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + HPRE_HAC_INT_STATUS); +} + +static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); +} + +static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) +{ + const struct hpre_hw_error *err = hpre_hw_errors; + struct device *dev = &qm->pdev->dev; + + while (err->msg) { + if (err->int_msk & err_sts) + dev_warn(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + err++; + } +} + static int hpre_pf_probe_init(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; @@ -850,6 +898,13 @@ static int hpre_pf_probe_init(struct hpre *hpre) return -EINVAL;
qm->ctrl_q_num = HPRE_QUEUE_NUM_V2; + qm->err_ini.qm_wr_port = HPRE_WR_MSI_PORT; + qm->err_ini.ecc_2bits_mask = (HPRE_CORE_ECC_2BIT_ERR | + HPRE_OOO_ECC_2BIT_ERR); + qm->err_ini.open_axi_master_ooo = hpre_open_master_ooo; + qm->err_ini.get_dev_hw_err_status = hpre_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hpre_clear_hw_err_status; + qm->err_ini.log_dev_hw_err = hpre_log_hw_error;
ret = hpre_set_user_domain_and_cache(hpre); if (ret) @@ -1058,8 +1113,12 @@ static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
static void hpre_remove_wait_delay(struct hpre *hpre) { - while (hisi_qm_frozen(&hpre->qm)) - ; + struct hisi_qm *qm = &hpre->qm; + + while (hisi_qm_frozen(&hpre->qm) || + ((qm->fun_type == QM_HW_PF) && + hpre_try_frozen_vfs(hpre->qm.pdev))) + usleep_range(HPRE_USLEEP, HPRE_USLEEP); udelay(HPRE_WAIT_DELAY); }
@@ -1067,20 +1126,15 @@ static void hpre_remove(struct pci_dev *pdev) { struct hpre *hpre = pci_get_drvdata(pdev); struct hisi_qm *qm = &hpre->qm; - int ret;
if (uacce_mode != UACCE_MODE_NOUACCE) hpre_remove_wait_delay(hpre);
hpre_algs_unregister(); hpre_remove_from_list(hpre); - if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) { - ret = hpre_sriov_disable(pdev); - if (ret) { - pci_err(pdev, "Disable SRIOV fail!\n"); - return; - } - } + if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) + hpre_sriov_disable(pdev); + if (qm->fun_type == QM_HW_PF) { hpre_cnt_regs_clear(qm); qm->debug.curr_qm_qp_num = 0; @@ -1095,50 +1149,11 @@ static void hpre_remove(struct pci_dev *pdev) hisi_qm_uninit(qm); }
-static void hpre_log_hw_error(struct hpre *hpre, u32 err_sts) -{ - const struct hpre_hw_error *err = hpre_hw_errors; - struct device *dev = &hpre->qm.pdev->dev; - - while (err->msg) { - if (err->int_msk & err_sts) - dev_warn(dev, "%s [error status=0x%x] found\n", - err->msg, err->int_msk); - err++; - } -} - -static pci_ers_result_t hpre_hw_error_handle(struct hpre *hpre) -{ - u32 err_sts; - - /* read err sts */ - err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS); - if (err_sts) { - hpre_log_hw_error(hpre, err_sts); - - /* clear error interrupts */ - writel(err_sts, hpre->qm.io_base + HPRE_HAC_SOURCE_INT); - return PCI_ERS_RESULT_NEED_RESET; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t hpre_process_hw_error(struct pci_dev *pdev) +static void hpre_shutdown(struct pci_dev *pdev) { struct hpre *hpre = pci_get_drvdata(pdev); - pci_ers_result_t qm_ret, hpre_ret; - - /* log qm error */ - qm_ret = hisi_qm_hw_error_handle(&hpre->qm);
- /* log hpre error */ - hpre_ret = hpre_hw_error_handle(hpre); - - return (qm_ret == PCI_ERS_RESULT_NEED_RESET || - hpre_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; + hisi_qm_stop(&hpre->qm, QM_NORMAL); }
static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev, @@ -1151,7 +1166,7 @@ static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT;
- return hpre_process_hw_error(pdev); + return hisi_qm_process_dev_error(pdev); }
static int hpre_vf_reset_prepare(struct pci_dev *pdev, @@ -1240,6 +1255,7 @@ static int hpre_soft_reset(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; + unsigned long long value = 0; int ret; u32 val;
@@ -1259,6 +1275,9 @@ static int hpre_soft_reset(struct hpre *hpre) return ret; }
+ /* Set qm ecc if dev ecc happened to hold on ooo */ + hisi_qm_set_ecc(qm); + /* OOO register set and check */ writel(MASTER_GLOBAL_CTRL_SHUTDOWN, hpre->qm.io_base + HPRE_MASTER_GLOBAL_CTRL); @@ -1282,7 +1301,6 @@ static int hpre_soft_reset(struct hpre *hpre)
/* The reset related sub-control registers are not in PCI BAR */ if (ACPI_HANDLE(dev)) { - unsigned long long value = 0; acpi_status s;
s = acpi_evaluate_integer(ACPI_HANDLE(dev), "HRST", @@ -1358,7 +1376,8 @@ static int hpre_controller_reset_done(struct hpre *hpre) ret = hpre_set_user_domain_and_cache(hpre); if (ret) return ret; - hpre_hw_err_init(hpre); + + hisi_qm_restart_prepare(qm);
ret = hisi_qm_restart(qm); if (ret) { @@ -1375,6 +1394,9 @@ static int hpre_controller_reset_done(struct hpre *hpre) return -EPERM; }
+ hisi_qm_restart_done(qm); + hpre_hw_err_init(hpre); + return 0; }
@@ -1438,17 +1460,15 @@ static void hpre_set_hw_error(struct hpre *hisi_hpre, bool enable)
if (enable) { hisi_qm_hw_error_init(qm, QM_BASE_CE, - QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, - 0, QM_DB_RANDOM_INVALID); + QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, + 0, QM_DB_RANDOM_INVALID); hpre_hw_error_enable(hpre); } else { hisi_qm_hw_error_uninit(qm); hpre_hw_error_disable(hpre); } - }
- static int hpre_get_hw_error_status(struct hpre *hpre) { u32 err_sts; @@ -1589,6 +1609,7 @@ static struct pci_driver hpre_pci_driver = { .remove = hpre_remove, .sriov_configure = hpre_sriov_configure, .err_handler = &hpre_err_handler, + .shutdown = hpre_shutdown, };
static void hpre_register_debugfs(void) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 0def6ba571a1..9018c214455e 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -113,6 +113,7 @@ #define QM_ABNORMAL_INT_MASK 0x100004 #define QM_HW_ERROR_IRQ_DISABLE GENMASK(12, 0) #define QM_ABNORMAL_INT_STATUS 0x100008 +#define QM_ABNORMAL_INT_SET 0x10000c #define QM_ABNORMAL_INF00 0x100010 #define QM_FIFO_OVERFLOW_TYPE 0xc0 #define QM_FIFO_OVERFLOW_VF 0x3f @@ -163,6 +164,13 @@
#define WAIT_PERIOD 20 #define MAX_WAIT_COUNTS 1000 +#define MAX_WAIT_TASK_COUNTS 10 + +#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT +#define AM_CFG_PORT_WR_EN 0x30001C +#define AM_CFG_PORT_WR_EN_VALUE 0xFFFF +#define AM_ROB_ECC_INT_STS 0x300104 +#define ROB_ECC_ERR_MULTPL BIT(1)
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ @@ -524,7 +532,6 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) atomic_dec(&qp->qp_status.used); } } - } }
@@ -1134,13 +1141,13 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) /* read err sts */ tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); error_status = qm->error_mask & tmp; - if (error_status) { - qm_log_hw_error(qm, error_status); - - /* clear err sts */ - writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + if (error_status & QM_ECC_MBIT) + qm->err_ini.is_qm_ecc_mbit = 1; + else + qm->err_ini.is_qm_ecc_mbit = 0;
+ qm_log_hw_error(qm, error_status); return PCI_ERS_RESULT_NEED_RESET; }
@@ -1460,10 +1467,27 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) } EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
+/* Callback function should be called whether task completed or not. */ +static void qp_stop_fail_cb(struct hisi_qp *qp) +{ + struct hisi_qm *qm = qp->qm; + int cur_head, cur_tail; + int j, cnt, pos; + + cur_tail = qp->qp_status.sq_tail; + cnt = atomic_read(&qp->qp_status.used); + cur_head = (cur_tail + QM_Q_DEPTH - cnt) % QM_Q_DEPTH; + + for (j = 0; j < cnt; j++) { + pos = (j + cur_head) % QM_Q_DEPTH; + qp->req_cb(qp, qp->sqe + qm->sqe_size * pos); + atomic_dec(&qp->qp_status.used); + } +} + static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp) { struct device *dev = &qp->qm->pdev->dev; - int i = 0;
/* it is stopped */ if (atomic_read(&qp->qp_status.flags) == QP_STOP) @@ -1473,14 +1497,11 @@ static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp)
atomic_set(&qp->qp_status.flags, QP_STOP);
- while (atomic_read(&qp->qp_status.used)) { - i++; - msleep(WAIT_PERIOD); - if (i == MAX_WAIT_COUNTS) { - dev_err(dev, "Cannot drain out data for stopping, system may hang up!!!\n"); - break; - } - } + /* waiting for increase used count in hisi_qp_send */ + udelay(WAIT_PERIOD); + + if (atomic_read(&qp->qp_status.used)) + qp_stop_fail_cb(qp);
dev_dbg(dev, "stop queue %u!", qp->qp_id);
@@ -1528,8 +1549,9 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg) void *sqe = qm_get_avail_sqe(qp);
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || - atomic_read(&qp->qm->status.flags) == QM_STOP)) { - dev_info(&qp->qm->pdev->dev, "QM resetting...\n"); + atomic_read(&qp->qm->status.flags) == QM_STOP) || + qp->is_resetting == true) { + dev_info_ratelimited(&qp->qm->pdev->dev, "QM resetting...\n"); return -EAGAIN; }
@@ -1897,7 +1919,6 @@ static int qm_register_uacce(struct hisi_qm *qm) if (qm->use_sva) { uacce->flags = UACCE_DEV_SVA | UACCE_DEV_DRVMAP_DUS; } else { - uacce->flags = UACCE_DEV_NOIOMMU | UACCE_DEV_DRVMAP_DUS; if (qm->ver == QM_HW_V1) @@ -1912,7 +1933,6 @@ static int qm_register_uacce(struct hisi_qm *qm) for (i = 0; i < UACCE_QFRT_MAX; i++) uacce->qf_pg_start[i] = UACCE_QFR_NA;
- return uacce_register(uacce); }
@@ -2512,6 +2532,43 @@ static int qm_stop_started_qp(struct hisi_qm *qm) return 0; }
+static void qm_set_resetting_flag(struct hisi_qm *qm) +{ + struct hisi_qp *qp; + int i; + + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp && atomic_read(&qp->qp_status.flags) == QP_START) + qp->is_resetting = true; + } +} + +static void qm_wait_task_complete(struct hisi_qm *qm) +{ + struct hisi_qp *qp; + int tmcnt = 0; + int last_num; + int task_num; + int i; + + task_num = 0; + do { + last_num = task_num; + task_num = 0; + msleep(WAIT_PERIOD); + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp) + task_num += atomic_read(&qp->qp_status.used); + } + if (task_num && last_num == task_num) + tmcnt++; + else + tmcnt = 0; + } while (task_num && tmcnt < MAX_WAIT_TASK_COUNTS); +} + /** * hisi_qm_stop() - Stop a qm. * @qm: The qm which will be stopped. @@ -2535,6 +2592,12 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) goto err_unlock; }
+ if (qm->status.stop_reason == QM_SOFT_RESET || + qm->status.stop_reason == QM_FLR) { + qm_set_resetting_flag(qm); + qm_wait_task_complete(qm); + } + if (qm->status.stop_reason == QM_SOFT_RESET || qm->status.stop_reason == QM_FLR) { ret = qm_stop_started_qp(qm); @@ -2560,6 +2623,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
hisi_qm_clear_queues(qm); atomic_set(&qm->status.flags, QM_STOP); + err_unlock: up_write(&qm->qps_lock); return ret; @@ -2752,6 +2816,46 @@ int hisi_qm_get_hw_error_status(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_get_hw_error_status);
+static pci_ers_result_t hisi_qm_dev_err_handle(struct hisi_qm *qm) +{ + u32 err_sts; + + if (!qm->err_ini.get_dev_hw_err_status || + !qm->err_ini.log_dev_hw_err) + return PCI_ERS_RESULT_RECOVERED; + + /* read err sts */ + err_sts = qm->err_ini.get_dev_hw_err_status(qm); + if (err_sts) { + if (err_sts & qm->err_ini.ecc_2bits_mask) + qm->err_ini.is_dev_ecc_mbit = 1; + else + qm->err_ini.is_dev_ecc_mbit = 0; + + qm->err_ini.log_dev_hw_err(qm, err_sts); + return PCI_ERS_RESULT_NEED_RESET; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + pci_ers_result_t qm_ret, dev_ret; + + /* log qm error */ + qm_ret = hisi_qm_hw_error_handle(qm); + + /* log device error */ + dev_ret = hisi_qm_dev_err_handle(qm); + + return (qm_ret == PCI_ERS_RESULT_NEED_RESET || + dev_ret == PCI_ERS_RESULT_NEED_RESET) ? + PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; +} +EXPORT_SYMBOL_GPL(hisi_qm_process_dev_error); + int hisi_qm_reg_test(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -2822,8 +2926,8 @@ int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
for (i = 0; i < MAX_WAIT_COUNTS; i++) { pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); - if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> - PEH_SRIOV_CTRL_VF_MSE_SHIFT) + if (set == ((sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> + PEH_SRIOV_CTRL_VF_MSE_SHIFT)) return 0;
udelay(1); @@ -2843,6 +2947,8 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set) } else { pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, PEH_MSI_DISABLE); + if (qm->err_ini.is_qm_ecc_mbit || qm->err_ini.is_dev_ecc_mbit) + return 0;
mdelay(1); if (readl(qm->io_base + QM_PEH_DFX_INFO0)) @@ -2853,6 +2959,63 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set) } EXPORT_SYMBOL_GPL(hisi_qm_set_msi);
+void hisi_qm_set_ecc(struct hisi_qm *qm) +{ + u32 nfe_enb; + + if ((!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit) || + (qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.inject_dev_hw_err) || + (qm->err_ini.is_dev_ecc_mbit && qm->err_ini.inject_dev_hw_err)) + return; + + if (qm->err_ini.inject_dev_hw_err) + qm->err_ini.inject_dev_hw_err(qm); + else { + nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); + writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + qm->io_base + QM_RAS_NFE_ENABLE); + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + qm->err_ini.is_qm_ecc_mbit = 1; + } +} +EXPORT_SYMBOL_GPL(hisi_qm_set_ecc); + +void hisi_qm_restart_prepare(struct hisi_qm *qm) +{ + if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit) + return; + + /* close AM wr msi port */ + writel(qm->err_ini.qm_wr_port, qm->io_base + AM_CFG_PORT_WR_EN); + + /* clear dev ecc 2bit error source */ + if (qm->err_ini.clear_dev_hw_err_status) { + qm->err_ini.clear_dev_hw_err_status(qm, + qm->err_ini.ecc_2bits_mask); + } + + /* clear QM ecc mbit error source */ + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + /* clear AM Reorder Buffer ecc mbit source */ + writel(ROB_ECC_ERR_MULTPL, qm->io_base + AM_ROB_ECC_INT_STS); + + if (qm->err_ini.open_axi_master_ooo) + qm->err_ini.open_axi_master_ooo(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_restart_prepare); + +void hisi_qm_restart_done(struct hisi_qm *qm) +{ + if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit) + return; + + writel(AM_CFG_PORT_WR_EN_VALUE, qm->io_base + AM_CFG_PORT_WR_EN); + qm->err_ini.is_qm_ecc_mbit = 0; + qm->err_ini.is_dev_ecc_mbit = 0; +} +EXPORT_SYMBOL_GPL(hisi_qm_restart_done); + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang wangzhou1@hisilicon.com"); MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index b13aa364866d..e360796fe30d 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -52,7 +52,7 @@ #define AXI_M_CFG_ENABLE 0xffffffff #define QM_PEH_AXUSER_CFG 0x1000cc #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 -#define PEH_AXUSER_CFG 0x401001 +#define PEH_AXUSER_CFG 0x400801 #define PEH_AXUSER_CFG_ENABLE 0xffffffff
#define QM_DFX_MB_CNT_VF 0x104010 @@ -235,6 +235,21 @@ struct hisi_qm_status { int stop_reason; };
+struct hisi_qm; + +struct hisi_qm_err_ini { + u32 qm_wr_port; + u32 is_qm_ecc_mbit; + u32 is_dev_ecc_mbit; + u32 ecc_2bits_mask; + void (*open_axi_master_ooo)(struct hisi_qm *qm); + u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); + void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); + void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); + /* design for module can not hold on ooo through qm, such as zip */ + void (*inject_dev_hw_err)(struct hisi_qm *qm); +}; + struct hisi_qm { enum qm_hw_ver ver; enum qm_fun_type fun_type; @@ -257,7 +272,7 @@ struct hisi_qm { dma_addr_t aeqe_dma;
struct hisi_qm_status status; - + struct hisi_qm_err_ini err_ini; struct rw_semaphore qps_lock; unsigned long *qp_bitmap; struct hisi_qp **qp_array; @@ -355,10 +370,14 @@ void hisi_qm_clear_queues(struct hisi_qm *qm); enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); int hisi_qm_restart(struct hisi_qm *qm); int hisi_qm_get_hw_error_status(struct hisi_qm *qm); +pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev); int hisi_qm_reg_test(struct hisi_qm *qm); int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set); int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set); int hisi_qm_set_msi(struct hisi_qm *qm, bool set); +void hisi_qm_set_ecc(struct hisi_qm *qm); +void hisi_qm_restart_prepare(struct hisi_qm *qm); +void hisi_qm_restart_done(struct hisi_qm *qm);
struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, diff --git a/drivers/crypto/hisilicon/rde/rde.h b/drivers/crypto/hisilicon/rde/rde.h index c9ee329bc1e3..aa7887ad2d58 100644 --- a/drivers/crypto/hisilicon/rde/rde.h +++ b/drivers/crypto/hisilicon/rde/rde.h @@ -70,7 +70,7 @@ struct hisi_rde { #define RDE_DONE_SHIFT 7 #define RDE_PER_SRC_COEF_SIZE 32 #define RDE_PER_SRC_COEF_TIMES 4 -#define RDE_TASK_TMOUT_MS 10000 +#define RDE_TASK_TMOUT_MS 3000
#define RDE_GN_WITH_MODE(column, mode, parity) \ @@ -286,11 +286,12 @@ static inline void rde_bd_dump(struct hisi_rde_sqe *bd) { int i;
- pr_info("====== BD info start======\n"); + pr_info_ratelimited("====== BD info start======\n"); for (i = 0; i < sizeof(struct hisi_rde_sqe) / sizeof(u64); i++) - pr_info("sqe-word[%d]: 0x%llx.\n", i, *((u64 *)bd + i)); + pr_info_ratelimited("sqe-word[%d]: 0x%llx.\n", + i, *((u64 *)bd + i));
- pr_info("====== BD info end======\n"); + pr_info_ratelimited("====== BD info end======\n"); }
static inline void rde_table_dump(const struct hisi_rde_msg *req) @@ -299,26 +300,26 @@ static inline void rde_table_dump(const struct hisi_rde_msg *req)
for (i = 0; i < SRC_ADDR_TABLE_NUM; i++) { if (req->src_addr->content[i]) - pr_info("Table0 info[%d] is 0x%llx.\n", - i, req->src_addr->content[i]); + pr_info_ratelimited("Table0 info[%d] is 0x%llx.\n", + i, req->src_addr->content[i]); }
for (i = 0; i < SRC_DIF_TABLE_NUM; i++) { if (req->src_tag_addr->content[i]) - pr_info("Table1 info[%d] is 0x%llx.\n", - i, req->src_tag_addr->content[i]); + pr_info_ratelimited("Table1 info[%d] is 0x%llx.\n", + i, req->src_tag_addr->content[i]); }
for (i = 0; i < DST_ADDR_TABLE_NUM; i++) { if (req->dst_addr->content[i]) - pr_info("Table2 info[%d] is 0x%llx.\n", - i, req->dst_addr->content[i]); + pr_info_ratelimited("Table2 info[%d] is 0x%llx.\n", + i, req->dst_addr->content[i]); }
for (i = 0; i < DST_DIF_TABLE_NUM; i++) { if (req->dst_tag_addr->content[i]) - pr_info("Table3 info[%d] is 0x%llx.\n", - i, req->dst_tag_addr->content[i]); + pr_info_ratelimited("Table3 info[%d] is 0x%llx.\n", + i, req->dst_tag_addr->content[i]); } }
diff --git a/drivers/crypto/hisilicon/rde/rde_api.c b/drivers/crypto/hisilicon/rde/rde_api.c index 6fceb008d21b..1be468a40976 100644 --- a/drivers/crypto/hisilicon/rde/rde_api.c +++ b/drivers/crypto/hisilicon/rde/rde_api.c @@ -63,7 +63,7 @@ static u32 rde_matrix_len(u8 alg_type, u8 cm_len) break; case MPCC: len = (RDE_PER_SRC_COEF_SIZE * - RDE_PER_SRC_COEF_TIMES * cm_len); + RDE_PER_SRC_COEF_TIMES * cm_len); break; default: pr_err("[%s] Err alg type.\n", __func__); @@ -74,9 +74,8 @@ static u32 rde_matrix_len(u8 alg_type, u8 cm_len) }
static int rde_sgl_src_scatterlist_release(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u32 num) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u32 num) { u32 i; int ret; @@ -84,8 +83,8 @@ static int rde_sgl_src_scatterlist_release(struct pci_dev *pdev, for (i = 0; i < num; i++) { if (req->src_record[i]) { ret = acc_sgl_phys_to_virt(pdev, - (void *)req->src_record[i], - rde_ctx->smmu_state); + (void *)req->src_record[i], + rde_ctx->smmu_state); if (ret) { dev_err(&pdev->dev, "[%s] Src[%d] fail.\n", __func__, i); @@ -108,8 +107,8 @@ static int rde_sgl_dst_scatterlist_release(struct pci_dev *pdev, for (i = 0; i < num; i++) { if (req->dst_record[i]) { ret = acc_sgl_phys_to_virt(pdev, - (void *)req->dst_record[i], - rde_ctx->smmu_state); + (void *)req->dst_record[i], + rde_ctx->smmu_state); if (ret) { dev_err(&pdev->dev, "[%s] Dst[%d] fail.\n", __func__, i); @@ -122,9 +121,8 @@ static int rde_sgl_dst_scatterlist_release(struct pci_dev *pdev, }
static void rde_pbuf_src_addr_unmap(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u32 num) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u32 num) { u32 i; u32 gn_cnt; @@ -137,16 +135,16 @@ static void rde_pbuf_src_addr_unmap(struct pci_dev *pdev, gn_cnt = RDE_GN_CNT(i) + i; if (req->src_addr->content[gn_cnt]) { acc_phys_to_virt(pdev, - (dma_addr_t)req->src_addr->content[gn_cnt], - (size_t)udata->data_len, rde_ctx->smmu_state); + req->src_addr->content[gn_cnt], + (size_t)udata->data_len, + rde_ctx->smmu_state); } } }
static void rde_pbuf_dst_addr_unmap(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u32 num) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u32 num) { u32 i; u32 gn_cnt; @@ -159,15 +157,15 @@ static void rde_pbuf_dst_addr_unmap(struct pci_dev *pdev, gn_cnt = RDE_GN_CNT(i) + i; if (req->dst_addr->content[gn_cnt]) { acc_phys_to_virt(pdev, - (dma_addr_t)req->dst_addr->content[gn_cnt], - (size_t)udata->data_len, rde_ctx->smmu_state); + req->dst_addr->content[gn_cnt], + (size_t)udata->data_len, + rde_ctx->smmu_state); } } }
-static void rde_cm_addr_unmap(struct pci_dev *pdev, - struct hisi_rde_sqe *bd, u8 alg_type, - struct hisi_rde_ctx *rde_ctx) +static void rde_cm_addr_unmap(struct pci_dev *pdev, struct hisi_rde_sqe *bd, + u8 alg_type, struct hisi_rde_ctx *rde_ctx) { u32 matrix_len;
@@ -177,13 +175,12 @@ static void rde_cm_addr_unmap(struct pci_dev *pdev, matrix_len = rde_matrix_len(alg_type, (u8)bd->cm_len); if (bd->coef_matrix_addr && matrix_len) acc_phys_to_virt(pdev, (dma_addr_t)bd->coef_matrix_addr, - (size_t)matrix_len, rde_ctx->smmu_state); + (size_t)matrix_len, rde_ctx->smmu_state); }
static void rde_bd_addr_release(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u8 buf_mode) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u8 buf_mode) { int ret = 0; struct raid_ec_ctrl *udata = req->udata; @@ -195,29 +192,36 @@ static void rde_bd_addr_release(struct pci_dev *pdev, rde_pbuf_dst_addr_unmap(pdev, rde_ctx, req, dst_num); } else if (buf_mode == SGL) { ret = rde_sgl_src_scatterlist_release(pdev, - rde_ctx, req, src_num); + rde_ctx, req, src_num); if (ret) dev_err(&pdev->dev, "[%s] Src release fail.\n", __func__);
ret = rde_sgl_dst_scatterlist_release(pdev, - rde_ctx, req, dst_num); + rde_ctx, req, dst_num); if (ret) dev_err(&pdev->dev, "[%s] Dst release fail.\n", __func__); } }
-static int rde_cm_len_check(struct device *dev, u8 alg_type, u8 cm_len) +static int rde_cm_len_check(struct device *dev, struct raid_ec_ctrl *req, + u8 alg_type) { + if (unlikely(req->src_num > RDE_MAX_SRC_PLATE_NUM || + req->dst_num > RDE_MAX_DST_PLATE_NUM)) { + dev_err(dev, "Error!Invalid disk num.\n"); + return -EINVAL; + } + if (alg_type == MPCC) { - if (unlikely(cm_len > RDE_MPCC_MAX_SRC_NUM)) { + if (unlikely(req->cm_len > RDE_MPCC_MAX_SRC_NUM)) { dev_err(dev, "Error!mpcc cmlen should smaller than 17.\n"); return -EINVAL; } } else if (alg_type == PQ_FLEXEC) { - if (unlikely(cm_len > RDE_FLEXEC_MAX_SRC_NUM)) { + if (unlikely(req->cm_len > RDE_FLEXEC_MAX_SRC_NUM)) { dev_err(dev, "Error!flexec cmlen should smaller than 32.\n"); return -EINVAL; @@ -230,9 +234,8 @@ static int rde_cm_len_check(struct device *dev, u8 alg_type, u8 cm_len) return 0; }
-static int rde_io_para_check(struct acc_ctx *ctx, - struct raid_ec_ctrl *req, - u8 op_type, u8 alg_type) +static int rde_io_para_check(struct acc_ctx *ctx, struct raid_ec_ctrl *req, + u8 op_type, u8 alg_type) { struct hisi_rde_ctx *rde_ctx;
@@ -256,7 +259,7 @@ static int rde_io_para_check(struct acc_ctx *ctx, }
if (unlikely(!req->input_block || !req->data_len)) { - dev_err(rde_ctx->dev, "Error!invalid input_block.\n"); + dev_err(rde_ctx->dev, "Error!invalid input block.\n"); return -EINVAL; }
@@ -265,7 +268,7 @@ static int rde_io_para_check(struct acc_ctx *ctx, return -EINVAL; }
- return rde_cm_len_check(rde_ctx->dev, alg_type, req->cm_len); + return rde_cm_len_check(rde_ctx->dev, req, alg_type); }
static void src_dif_package(struct hisi_rde_msg *req) @@ -320,9 +323,10 @@ static void dst_dif_package(struct hisi_rde_msg *req) } }
-static int rde_disk_sgl_addr_translation(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, struct sgl_hw *sgl_addr, - u64 *content, u64 *record) +static int rde_disk_sgl_addr_tran(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + struct sgl_hw *sgl_addr, + u64 *content, u64 *record) { int ret; void *sg_head = NULL; @@ -330,7 +334,7 @@ static int rde_disk_sgl_addr_translation(struct pci_dev *pdev, switch (rde_ctx->addr_type) { case VA_FROM_NORMAL_DMA_ZONE: ret = acc_sgl_virt_to_phys(pdev, sgl_addr, &sg_head, - rde_ctx->smmu_state); + rde_ctx->smmu_state); if (unlikely(ret)) return ret; break; @@ -349,9 +353,8 @@ static int rde_disk_sgl_addr_translation(struct pci_dev *pdev, }
static int sgl_src_addr_package(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u8 mode) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u8 mode) { int ret, r_ret; u32 i; @@ -369,21 +372,22 @@ static int sgl_src_addr_package(struct pci_dev *pdev,
memset(&req->src_record[0], 0, num * sizeof(u64)); for (i = 0; i < num; i++) { - gn = RDE_GN_WITH_MODE(rde_sgl_src->column, - mode, rde_sgl_src->parity); - sgl_data = (rde_sgl_src->buf_offset << - SGL_DATA_OFFSET_SHIFT) | (u32)gn; + gn = RDE_GN_WITH_MODE(rde_sgl_src->column, mode, + rde_sgl_src->parity); + sgl_data = (rde_sgl_src->buf_offset << SGL_DATA_OFFSET_SHIFT) | + (u32)gn; gn_cnt = RDE_GN_CNT(i) + i; gn_flag = RDE_GN_FLAG(i); cur_cnt = gn_cnt - gn_flag; req->src_addr->content[cur_cnt] |= ((u64)sgl_data << RDE_GN_SHIFT(gn_flag)); - ret = rde_disk_sgl_addr_translation(pdev, rde_ctx, - rde_sgl_src->ctrl, &req->src_addr->content[gn_cnt], - &req->src_record[i]); + ret = rde_disk_sgl_addr_tran(pdev, rde_ctx, + rde_sgl_src->ctrl, + &req->src_addr->content[gn_cnt], + &req->src_record[i]); if (ret) { - r_ret = rde_sgl_src_scatterlist_release(pdev, - rde_ctx, req, i); + r_ret = rde_sgl_src_scatterlist_release(pdev, rde_ctx, + req, i); if (r_ret) return r_ret; return ret; @@ -396,8 +400,8 @@ static int sgl_src_addr_package(struct pci_dev *pdev, }
static int sgl_dst_addr_package(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req) { int ret, r_ret; u32 i; @@ -417,19 +421,20 @@ static int sgl_dst_addr_package(struct pci_dev *pdev, memset(&req->dst_record[0], 0, num * sizeof(u64)); for (i = 0; i < num; i++) { gn = (u8)(rde_sgl_dst->column); - sgl_data = (rde_sgl_dst->buf_offset << - SGL_DATA_OFFSET_SHIFT) | (u32)gn; + sgl_data = (rde_sgl_dst->buf_offset << SGL_DATA_OFFSET_SHIFT) | + (u32)gn; gn_cnt = RDE_GN_CNT(i) + i; gn_flag = RDE_GN_FLAG(i); cur_cnt = gn_cnt - gn_flag; req->dst_addr->content[cur_cnt] |= ((u64)sgl_data << - RDE_GN_SHIFT(gn_flag)); - ret = rde_disk_sgl_addr_translation(pdev, rde_ctx, - rde_sgl_dst->ctrl, &req->dst_addr->content[gn_cnt], - &req->dst_record[i]); + RDE_GN_SHIFT(gn_flag)); + ret = rde_disk_sgl_addr_tran(pdev, rde_ctx, + rde_sgl_dst->ctrl, + &req->dst_addr->content[gn_cnt], + &req->dst_record[i]); if (ret) { - r_ret = rde_sgl_dst_scatterlist_release(pdev, - rde_ctx, req, i); + r_ret = rde_sgl_dst_scatterlist_release(pdev, rde_ctx, + req, i); if (r_ret) return r_ret; return ret; @@ -441,15 +446,16 @@ static int sgl_dst_addr_package(struct pci_dev *pdev, return 0; }
-static int rde_disk_pbuf_addr_translation(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, u64 *content, char *addr, u32 data_len) +static int rde_disk_pbuf_addr_tran(struct pci_dev *pdev, + struct hisi_rde_ctx *rde_ctx, + u64 *content, char *addr, u32 data_len) { dma_addr_t pa = 0;
switch (rde_ctx->addr_type) { case VA_FROM_NORMAL_DMA_ZONE: pa = acc_virt_to_phys(pdev, addr, (size_t)data_len, - rde_ctx->smmu_state); + rde_ctx->smmu_state); break; case VA_FROM_HIGHMEM_ZONE: pa = acc_pfn_to_phys(addr); @@ -473,9 +479,8 @@ static int rde_disk_pbuf_addr_translation(struct pci_dev *pdev, }
static int pbuf_src_addr_package(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - u8 mode) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, u8 mode) { u32 i; int ret; @@ -487,16 +492,17 @@ static int pbuf_src_addr_package(struct pci_dev *pdev, struct rde_pbuf *rde_pbuf_src = (struct rde_pbuf *)(ctrl->src_data);
for (i = 0; i < num; i++) { - gn = RDE_GN_WITH_MODE(rde_pbuf_src->column, - mode, rde_pbuf_src->parity); + gn = RDE_GN_WITH_MODE(rde_pbuf_src->column, mode, + rde_pbuf_src->parity); gn_cnt = RDE_GN_CNT(i) + i; gn_flag = RDE_GN_FLAG(i); cur_cnt = gn_cnt - gn_flag; req->src_addr->content[cur_cnt] |= ((u64)gn << - RDE_GN_SHIFT(gn_flag)); - ret = rde_disk_pbuf_addr_translation(pdev, rde_ctx, - &req->src_addr->content[gn_cnt], - rde_pbuf_src->pbuf, data_len_nbytes); + RDE_GN_SHIFT(gn_flag)); + ret = rde_disk_pbuf_addr_tran(pdev, rde_ctx, + &req->src_addr->content[gn_cnt], + rde_pbuf_src->pbuf, + data_len_nbytes); if (ret) { rde_pbuf_src_addr_unmap(pdev, rde_ctx, req, i); return ret; @@ -509,8 +515,8 @@ static int pbuf_src_addr_package(struct pci_dev *pdev, }
static int pbuf_dst_addr_package(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req) { u32 i; int ret; @@ -527,9 +533,9 @@ static int pbuf_dst_addr_package(struct pci_dev *pdev, gf_flag = RDE_GN_FLAG(i); cur_cnt = gf_cnt - gf_flag; req->dst_addr->content[cur_cnt] |= ((u64)gf_coef << - RDE_GN_SHIFT(gf_flag)); - ret = rde_disk_pbuf_addr_translation(pdev, rde_ctx, - &req->dst_addr->content[gf_cnt], + RDE_GN_SHIFT(gf_flag)); + ret = rde_disk_pbuf_addr_tran(pdev, rde_ctx, + &req->dst_addr->content[gf_cnt], rde_pbuf_dst->pbuf, data_len_nbytes); if (ret) { rde_pbuf_dst_addr_unmap(pdev, rde_ctx, req, i); @@ -543,11 +549,12 @@ static int pbuf_dst_addr_package(struct pci_dev *pdev, }
static int hisi_rde_fill_addr_tlb(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, - struct hisi_rde_msg *req, - struct rde_type *type) + struct hisi_rde_ctx *rde_ctx, + struct hisi_rde_msg *req, + struct rde_type *type) { int ret, r_ret; + u32 num = req->udata->src_num;
if (type->buf_mode == PBUF) { ret = pbuf_src_addr_package(pdev, rde_ctx, req, type->alg_mode); @@ -558,8 +565,7 @@ static int hisi_rde_fill_addr_tlb(struct pci_dev *pdev, ret = pbuf_dst_addr_package(pdev, rde_ctx, req); if (ret) { dev_err(&pdev->dev, "Pbuf dst addr package fail.\n"); - rde_pbuf_src_addr_unmap(pdev, rde_ctx, req, - req->udata->src_num); + rde_pbuf_src_addr_unmap(pdev, rde_ctx, req, num); return ret; } } else if (type->buf_mode == SGL) { @@ -572,7 +578,7 @@ static int hisi_rde_fill_addr_tlb(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "Sgl dst addr package fail.\n"); r_ret = rde_sgl_src_scatterlist_release(pdev, rde_ctx, - req, req->udata->src_num); + req, num); if (r_ret) return r_ret; return ret; @@ -586,8 +592,9 @@ static int hisi_rde_fill_addr_tlb(struct pci_dev *pdev, }
static int rde_cm_addr_translation(struct pci_dev *pdev, - struct hisi_rde_ctx *rde_ctx, struct raid_ec_ctrl *ctrl, - struct hisi_rde_sqe *bd, u8 alg_type) + struct hisi_rde_ctx *rde_ctx, + struct raid_ec_ctrl *ctrl, + struct hisi_rde_sqe *bd, u8 alg_type) { u32 matrix_len = 0; dma_addr_t pa = 0; @@ -595,7 +602,7 @@ static int rde_cm_addr_translation(struct pci_dev *pdev, if (rde_ctx->addr_type != PA_PASS_THROUGH) { matrix_len = rde_matrix_len(alg_type, ctrl->cm_len); pa = acc_virt_to_phys(pdev, ctrl->coe_matrix, - (size_t)matrix_len, rde_ctx->smmu_state); + (size_t)matrix_len, rde_ctx->smmu_state); if (unlikely(!pa)) { dev_err(rde_ctx->dev, "[%s] Coe_matrix virt to phys fail.\n", @@ -610,7 +617,7 @@ static int rde_cm_addr_translation(struct pci_dev *pdev, }
int hisi_rde_fill_sqe(struct hisi_rde_ctx *rde_ctx, struct hisi_rde_msg *req, - struct rde_type *type) + struct rde_type *type) { int ret; struct raid_ec_ctrl *ctrl = req->udata; @@ -622,7 +629,7 @@ int hisi_rde_fill_sqe(struct hisi_rde_ctx *rde_ctx, struct hisi_rde_msg *req, bd->op_tag = q_id * rde_ctx->session_num + req->req_id; bd->alg_blk_size = ctrl->alg_blk_size; bd->cm_type = (type->alg_mode == - ACC_OPT_RCT) ? CM_DECODE : CM_ENCODE; + ACC_OPT_RCT) ? CM_DECODE : CM_ENCODE; bd->cm_le = ctrl->cm_load; bd->abort = NO_ABORT; bd->src_nblks = ctrl->src_num; @@ -634,18 +641,18 @@ int hisi_rde_fill_sqe(struct hisi_rde_ctx *rde_ctx, struct hisi_rde_msg *req, ctrl->dst_dif.ctrl.verify.grd_verify_type; } bd->op_type = type->alg_mode | type->mem_mode | - type->buf_mode | type->alg_type; + type->buf_mode | type->alg_type; bd->block_size = ctrl->block_size; bd->page_pad_type = ctrl->dst_dif.ctrl.gen.page_layout_pad_type; - bd->dif_type = (ctrl->dst_dif.ctrl.gen.grd_gen_type) ? - RDE_DIF : NO_RDE_DIF; + bd->dif_type = ((ctrl->dst_dif.ctrl.gen.grd_gen_type) ? + RDE_DIF : NO_RDE_DIF); bd->crciv_sel = CRCIV1; bd->crciv_en = CRCIV; bd->cm_len = ctrl->cm_len; bd->transfer_size = ctrl->input_block - 1;
- ret = rde_cm_addr_translation(pdev, rde_ctx, ctrl, - bd, type->alg_type); + ret = rde_cm_addr_translation(pdev, rde_ctx, ctrl, bd, + type->alg_type); if (ret) return ret; bd->src_addr = req->src_dma_addr; @@ -708,15 +715,18 @@ static int rde_task_error_log(struct pci_dev *pdev, u8 err_sts) while (err->msg) { if (err_sts == err->status) { dev_err_ratelimited(&pdev->dev, - "[%s] [Error status=0x%x] found.\n", - err->msg, err->status); + "[%s][Error status=0x%x] found.\n", + err->msg, err->status); break; }
err++; }
- if (err_sts < RDE_CRC_CHK_ERR || err_sts > RDE_DISK16_VERIFY) + /* err_sts is 0, fatal engine*/ + if (err_sts == RDE_STATUS_NULL) + return -EAGAIN; + else if (err_sts < RDE_CRC_CHK_ERR || err_sts > RDE_DISK16_VERIFY) return ACC_INVALID_PARAM; else if (err_sts >= RDE_CRC_CHK_ERR && err_sts <= RDE_REF_CHK_ERR) return ACC_RDE_DIF_ERR; @@ -740,11 +750,8 @@ static void rde_cb(struct hisi_qp *qp, void *resp) req = &rde_ctx->req_list[req_id]; ctrl = req->udata; err_status = wb_sqe->status & RDE_STATUS_MSK; - if (wb_sqe->status != RDE_TASK_DONE_STATUS) { + if (wb_sqe->status != RDE_TASK_DONE_STATUS) req->result = rde_task_error_log(pdev, err_status); - rde_bd_dump(wb_sqe); - rde_table_dump(req); - }
if (ctx->cb) { if (rde_ctx->addr_type != PA_PASS_THROUGH) { @@ -760,7 +767,7 @@ static void rde_cb(struct hisi_qp *qp, void *resp) }
int hisi_rde_io_proc(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl, - u8 op_type, u8 alg_type, bool sync) + u8 op_type, u8 alg_type, bool sync) { int ret, id; struct hisi_rde_ctx *rde_ctx; @@ -809,7 +816,8 @@ int hisi_rde_io_proc(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl, return ret;
if (wait_for_completion_timeout(&req->completion, - msecs_to_jiffies(RDE_TASK_TMOUT_MS)) == 0) { + msecs_to_jiffies(RDE_TASK_TMOUT_MS)) + == 0) { dev_err_ratelimited(rde_ctx->dev, "Sync mode task timeout.\n"); ret = -ETIME; goto addr_unmap; @@ -864,32 +872,32 @@ static void hisi_rde_release_qp(struct hisi_rde_ctx *rde_ctx) static int hisi_rde_tbl_init(struct device *dev, struct hisi_rde_msg *req) { req->src_addr = dma_alloc_coherent(dev, - (size_t)sizeof(struct rde_src_tbl), - &req->src_dma_addr, GFP_KERNEL); + (size_t)sizeof(struct rde_src_tbl), + &req->src_dma_addr, GFP_KERNEL); if (!req->src_addr) { dev_err(dev, "[%s] Alloc rde_src_tlb failed.\n", __func__); return -ENOMEM; }
req->dst_addr = dma_alloc_coherent(dev, - (size_t)sizeof(struct rde_dst_tbl), - &req->dst_dma_addr, GFP_KERNEL); + (size_t)sizeof(struct rde_dst_tbl), + &req->dst_dma_addr, GFP_KERNEL); if (!req->dst_addr) { dev_err(dev, "[%s] Alloc rde_dst_tlb failed.\n", __func__); return -ENOMEM; }
req->src_tag_addr = dma_alloc_coherent(dev, - (size_t)sizeof(struct rde_src_tag_tbl), - &req->src_tag_dma_addr, GFP_KERNEL); + (size_t)sizeof(struct rde_src_tag_tbl), + &req->src_tag_dma_addr, GFP_KERNEL); if (!req->src_tag_addr) { dev_err(dev, "[%s] Alloc rde_src_tag_tlb failed.\n", __func__); return -ENOMEM; }
req->dst_tag_addr = dma_alloc_coherent(dev, - (size_t)sizeof(struct rde_dst_tag_tbl), - &req->dst_tag_dma_addr, GFP_KERNEL); + (size_t)sizeof(struct rde_dst_tag_tbl), + &req->dst_tag_dma_addr, GFP_KERNEL); if (!req->dst_tag_addr) { dev_err(dev, "[%s] Alloc rde_dst_tag_tlb failed.\n", __func__); return -ENOMEM; @@ -907,25 +915,25 @@ static void hisi_rde_tbl_deinit(struct device *dev, struct hisi_rde_msg *req)
if (req->src_addr) { dma_free_coherent(dev, (size_t)sizeof(struct rde_src_tbl), - req->src_addr, req->src_dma_addr); + req->src_addr, req->src_dma_addr); req->src_addr = NULL; }
if (req->dst_addr) { dma_free_coherent(dev, (size_t)sizeof(struct rde_dst_tbl), - req->dst_addr, req->dst_dma_addr); + req->dst_addr, req->dst_dma_addr); req->dst_addr = NULL; }
if (req->src_tag_addr) { dma_free_coherent(dev, (size_t)sizeof(struct rde_src_tag_tbl), - req->src_tag_addr, req->src_tag_dma_addr); + req->src_tag_addr, req->src_tag_dma_addr); req->src_tag_addr = NULL; }
if (req->dst_tag_addr) { dma_free_coherent(dev, (size_t)sizeof(struct rde_dst_tag_tbl), - req->dst_tag_addr, req->dst_tag_dma_addr); + req->dst_tag_addr, req->dst_tag_dma_addr); req->dst_tag_addr = NULL; } } @@ -953,13 +961,13 @@ static int hisi_rde_ctx_init(struct hisi_rde_ctx *rde_ctx, int qlen) int ret;
spin_lock_init(&rde_ctx->req_lock); - rde_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(qlen), - sizeof(long), GFP_KERNEL); + rde_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(qlen), sizeof(long), + GFP_KERNEL); if (!rde_ctx->req_bitmap) return -ENOMEM;
rde_ctx->req_list = kcalloc(qlen, sizeof(struct hisi_rde_msg), - GFP_KERNEL); + GFP_KERNEL); if (!rde_ctx->req_list) { kfree(rde_ctx->req_bitmap); rde_ctx->req_bitmap = NULL; diff --git a/drivers/crypto/hisilicon/rde/rde_api.h b/drivers/crypto/hisilicon/rde/rde_api.h index 4e19386d7f99..0f9021b4c1aa 100644 --- a/drivers/crypto/hisilicon/rde/rde_api.h +++ b/drivers/crypto/hisilicon/rde/rde_api.h @@ -308,7 +308,7 @@ struct acc_dif { * @input_block: number of sector * @data_len: data len of per disk, block_size (with dif)* input_block * @buf_type: denoted by ACC_BUF_TYPE_E - * @src_dif����dif information of source disks + * @src_dif������dif information of source disks * @dst_dif: dif information of dest disks * @cm_load: coe_matrix reload control, 0: do not load, 1: load * @cm_len: length of loaded coe_matrix, equal to src_num diff --git a/drivers/crypto/hisilicon/rde/rde_data.c b/drivers/crypto/hisilicon/rde/rde_data.c index 0bc5173231e7..c25d3f36b9bf 100644 --- a/drivers/crypto/hisilicon/rde/rde_data.c +++ b/drivers/crypto/hisilicon/rde/rde_data.c @@ -90,7 +90,8 @@ static void acc_sgl_to_scatterlist(struct pci_dev *pdev, struct sgl_hw *data, entry->buf); i++) { sg_set_buf(sglist, (void *)entry->buf, entry->len); pa = acc_virt_to_phys(pdev, sg_virt(sglist), - (size_t)sglist->length, smmu_state); + (size_t)sglist->length, + smmu_state); sg_dma_address(sglist) = pa; sglist++; entry->buf = (char *)pa; @@ -99,11 +100,12 @@ static void acc_sgl_to_scatterlist(struct pci_dev *pdev, struct sgl_hw *data, if (cur_sgl->next) { next_sgl = cur_sgl->next; sg_set_buf(sglist, (void *)next_sgl, - (u32)(sizeof(struct sgl_hw) + - sizeof(struct sgl_entry_hw) * - (next_sgl->entry_sum_in_sgl))); + (u32)(sizeof(struct sgl_hw) + + sizeof(struct sgl_entry_hw) * + (next_sgl->entry_sum_in_sgl))); pa = acc_virt_to_phys(pdev, sg_virt(sglist), - (size_t)sglist->length, smmu_state); + (size_t)sglist->length, + smmu_state); sg_dma_address(sglist) = pa; sglist++; cur_sgl->next = (struct sgl_hw *)pa; @@ -126,7 +128,7 @@ int acc_sgl_virt_to_phys(struct pci_dev *pdev, struct sgl_hw *data, }
if (unlikely(!data->entry_sum_in_sgl) || - data->entry_sum_in_sgl > data->entry_num_in_sgl) { + data->entry_sum_in_sgl > data->entry_num_in_sgl) { pr_err("[%s] Para sge num is wrong.\n", __func__); return -EINVAL; } @@ -141,9 +143,9 @@ int acc_sgl_virt_to_phys(struct pci_dev *pdev, struct sgl_hw *data, *sglist_head = sglist; sg_init_table(sglist, addr_cnt); sg_set_buf(sglist, (void *)data, (u32)(sizeof(struct sgl_hw) + - sizeof(struct sgl_entry_hw) * (data->entry_sum_in_sgl))); + sizeof(struct sgl_entry_hw) * (data->entry_sum_in_sgl))); sg_dma_address(sglist) = acc_virt_to_phys(pdev, sg_virt(sglist), - (size_t)sglist->length, smmu_state); + (size_t)sglist->length, smmu_state); sglist++; acc_sgl_to_scatterlist(pdev, data, sglist, smmu_state);
@@ -170,7 +172,7 @@ int acc_sgl_phys_to_virt(struct pci_dev *pdev, void *sglist_head, sg = sglist; cur_sgl = (struct sgl_hw *)sg_virt(sg); acc_phys_to_virt(pdev, sg_dma_address(sg), - (size_t)sg->length, smmu_state); + (size_t)sg->length, smmu_state); while (cur_sgl) { entry = cur_sgl->entries; for (i = 0; (i < cur_sgl->entry_sum_in_sgl && @@ -178,12 +180,12 @@ int acc_sgl_phys_to_virt(struct pci_dev *pdev, void *sglist_head, sg = sg_next(sg); if (unlikely(!sg)) { pr_err("[%s][%d]Scatterlist happens to be NULL.\n", - __func__, __LINE__); + __func__, __LINE__); goto FAIL; } entry->buf = (char *)sg_virt(sg); acc_phys_to_virt(pdev, sg_dma_address(sg), - (size_t)sg->length, smmu_state); + (size_t)sg->length, smmu_state); entry++; }
@@ -191,12 +193,12 @@ int acc_sgl_phys_to_virt(struct pci_dev *pdev, void *sglist_head, sg = sg_next(sg); if (unlikely(!sg)) { pr_err("[%s][%d]Scatterlist happens to be NULL.\n", - __func__, __LINE__); + __func__, __LINE__); goto FAIL; } next_sgl = (struct sgl_hw *)sg_virt(sg); acc_phys_to_virt(pdev, sg_dma_address(sg), - (size_t)sg->length, smmu_state); + (size_t)sg->length, smmu_state); cur_sgl->next = next_sgl; } else { next_sgl = NULL; diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c index 6e7d02913743..c3385f0ec981 100644 --- a/drivers/crypto/hisilicon/rde/rde_main.c +++ b/drivers/crypto/hisilicon/rde/rde_main.c @@ -34,8 +34,6 @@ #define HRDE_RD_TMOUT_US 1000 #define FORMAT_DECIMAL 10 #define HRDE_RST_TMOUT_MS 400 -#define HRDE_OOO_DFX_NUM 9 -#define HRDE_DFX_NUM 14 #define HRDE_ENABLE 1 #define HRDE_DISABLE 0 #define HRDE_PCI_COMMAND_INVALID 0xFFFFFFFF @@ -48,6 +46,7 @@ #define HRDE_INT_ENABLE 0x0 #define HRDE_INT_DISABLE 0x3ffff #define HRDE_INT_SOURCE 0x31030c +#define HRDE_INT_SOURCE_CLEAR GENMASK(17, 0) #define HRDE_INT_STATUS 0x310318 #define HRDE_DFX_CTRL_0 0x310240 #define HRDE_ECC_ERR 0x310234 @@ -68,6 +67,7 @@ #define CHN_CFG 0x5010101 #define HRDE_AXI_SHUTDOWN_EN BIT(26) #define HRDE_AXI_SHUTDOWN_DIS 0xFBFFFFFF +#define HRDE_WR_MSI_PORT 0xFFFE #define HRDE_AWUSER_BD_1 0x310104 #define HRDE_ARUSER_BD_1 0x310114 #define HRDE_ARUSER_SGL_1 0x310124 @@ -79,14 +79,15 @@ #define HRDE_ECC_2BIT_ERR BIT(1) #define HRDE_ECC_1BIT_SHIFT 16 #define HRDE_ECC_2BIT_CNT_MSK GENMASK(15, 0) -#define HRDE_STATE_INT_ERR GENMASK(10, 2) +#define HRDE_STATE_INT_ERR GENMASK(11, 2) +#define HRDE_AM_CURR_PORT_STS 0x300100 +#define HRDE_MASTER_TRANS_RET 0x300150 #define HRDE_FSM_MAX_CNT 0x310280 #define HRDE_QM_IDEL_STATUS 0x1040e4 #define HRDE_QM_PEH_DFX_INFO0 0x1000fc #define PEH_MSI_MASK_SHIFT 0x90 #define HRDE_MASTER_GLOBAL_CTRL 0x300000 #define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 -#define HRDE_MASTER_TRANS_RETURN 0x300150 #define MASTER_TRANS_RETURN_RW 0x3 #define CACHE_CTL 0x1833 #define HRDE_DBGFS_VAL_MAX_LEN 20 @@ -403,13 +404,13 @@ static int current_bd_write(struct ctrl_debug_file *file, u32 val) struct hisi_qm *qm = file_to_qm(file); u32 tmp = 0;
- if (val >= (HRDE_SQE_SIZE / sizeof(u32))) { + if (val >= (HRDE_SQE_SIZE / sizeof(u32))) { pr_err("Width index should be smaller than 16.\n"); return -EINVAL; }
tmp = HRDE_PROBE_DATA_EN | HRDE_PROBE_EN | - (val << HRDE_STRB_CS_SHIFT); + (val << HRDE_STRB_CS_SHIFT); writel(tmp, qm->io_base + HRDE_PROBE_ADDR);
return 0; @@ -505,8 +506,12 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl) struct debugfs_regset32 *regset, *regset_ooo; struct dentry *tmp_d, *tmp; char buf[HRDE_DBGFS_VAL_MAX_LEN]; + int ret; + + ret = snprintf(buf, HRDE_DBGFS_VAL_MAX_LEN, "rde_dfx"); + if (ret < 0) + return -ENOENT;
- snprintf(buf, HRDE_DBGFS_VAL_MAX_LEN, "rde_dfx"); tmp_d = debugfs_create_dir(buf, ctrl->debug_root); if (!tmp_d) return -ENOENT; @@ -517,8 +522,7 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl) regset->regs = hrde_dfx_regs; regset->nregs = ARRAY_SIZE(hrde_dfx_regs); regset->base = qm->io_base; - tmp = debugfs_create_regset32("chn_regs", - 0444, tmp_d, regset); + tmp = debugfs_create_regset32("chn_regs", 0444, tmp_d, regset); if (!tmp) return -ENOENT;
@@ -528,8 +532,7 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl) regset_ooo->regs = hrde_ooo_dfx_regs; regset_ooo->nregs = ARRAY_SIZE(hrde_ooo_dfx_regs); regset_ooo->base = qm->io_base; - tmp = debugfs_create_regset32("ooo_regs", - 0444, tmp_d, regset_ooo); + tmp = debugfs_create_regset32("ooo_regs", 0444, tmp_d, regset_ooo); if (!tmp) return -ENOENT;
@@ -602,7 +605,6 @@ static void hisi_rde_engine_init(struct hisi_rde *hisi_rde) readl(hisi_rde->qm.io_base + HRDE_OP_ERR_CNT); readl(hisi_rde->qm.io_base + HRDE_OP_ABORT_CNT); writel(WRITE_CLEAR_VAL, hisi_rde->qm.io_base + HRDE_FIFO_STAT_0); - writel(WRITE_CLEAR_VAL, hisi_rde->qm.io_base + HRDE_INT_SOURCE); writel(WRITE_CLEAR_VAL, hisi_rde->qm.io_base + HRDE_DFX_STAT_7); writel(WRITE_CLEAR_VAL, hisi_rde->qm.io_base + HRDE_DFX_STAT_8);
@@ -653,17 +655,19 @@ static void hisi_rde_hw_error_set_state(struct hisi_rde *hisi_rde, bool state)
val = readl(hisi_rde->qm.io_base + HRDE_CFG); if (state) { + writel(HRDE_INT_SOURCE_CLEAR, + hisi_rde->qm.io_base + HRDE_INT_SOURCE); writel(HRDE_RAS_ENABLE, hisi_rde->qm.io_base + HRDE_RAS_INT_MSK); /* bd prefetch should bd masked to prevent misreport */ writel((HRDE_INT_ENABLE | BIT(8)), hisi_rde->qm.io_base + HRDE_INT_MSK); - /* when m-bit error occur, master ooo will close */ + /* make master ooo close, when m-bits error happens*/ val = val | HRDE_AXI_SHUTDOWN_EN; } else { writel(ras_msk, hisi_rde->qm.io_base + HRDE_RAS_INT_MSK); writel(HRDE_INT_DISABLE, hisi_rde->qm.io_base + HRDE_INT_MSK); - /* when m-bit error occur, master ooo will not close */ + /* make master ooo open, when m-bits error happens*/ val = val & HRDE_AXI_SHUTDOWN_DIS; }
@@ -674,13 +678,64 @@ static void hisi_rde_set_hw_error(struct hisi_rde *hisi_rde, bool state) { if (state) hisi_qm_hw_error_init(&hisi_rde->qm, QM_BASE_CE, - QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, 0, 0); + QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, + 0, 0); else hisi_qm_hw_error_uninit(&hisi_rde->qm);
hisi_rde_hw_error_set_state(hisi_rde, state); }
+static void hisi_rde_open_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(qm->io_base + HRDE_CFG); + writel(val & HRDE_AXI_SHUTDOWN_DIS, qm->io_base + HRDE_CFG); + writel(val | HRDE_AXI_SHUTDOWN_EN, qm->io_base + HRDE_CFG); +} + +static u32 hisi_rde_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + HRDE_INT_STATUS); +} + +static void hisi_rde_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HRDE_INT_SOURCE); +} + +static void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts) +{ + const struct hisi_rde_hw_error *err = rde_hw_error; + struct device *dev = &qm->pdev->dev; + u32 err_val; + + while (err->msg) { + if (err->int_msk & err_sts) + dev_err_ratelimited(dev, + "[%s] [Error status=0x%x] found.\n", + err->msg, err->int_msk); + err++; + } + + if (HRDE_ECC_2BIT_ERR & err_sts) { + err_val = (readl(qm->io_base + HRDE_ERR_CNT) & + HRDE_ECC_2BIT_CNT_MSK); + dev_err_ratelimited(dev, + "Rde ecc 2bit sram num=0x%x.\n", err_val); + } + + if (HRDE_STATE_INT_ERR & err_sts) { + err_val = readl(qm->io_base + HRDE_AM_CURR_PORT_STS); + dev_err_ratelimited(dev, + "Rde ooo cur port sts=0x%x.\n", err_val); + err_val = readl(qm->io_base + HRDE_MASTER_TRANS_RET); + dev_err_ratelimited(dev, + "Rde ooo outstanding sts=0x%x.\n", err_val); + } +} + static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde) { struct hisi_qm *qm = &hisi_rde->qm; @@ -706,8 +761,15 @@ static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde) return -EINVAL; }
+ qm->err_ini.qm_wr_port = HRDE_WR_MSI_PORT; + qm->err_ini.ecc_2bits_mask = HRDE_ECC_2BIT_ERR; + qm->err_ini.open_axi_master_ooo = hisi_rde_open_master_ooo; + qm->err_ini.get_dev_hw_err_status = hisi_rde_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hisi_rde_clear_hw_err_status; + qm->err_ini.log_dev_hw_err = hisi_rde_hw_error_log; hisi_rde_set_user_domain_and_cache(hisi_rde); hisi_rde_set_hw_error(hisi_rde, true); + qm->err_ini.open_axi_master_ooo(qm);
return 0; } @@ -841,81 +903,11 @@ static void hisi_rde_remove(struct pci_dev *pdev) hisi_qm_uninit(qm); }
-static void hisi_rde_hw_error_log(struct hisi_rde *hisi_rde, u32 err_sts) -{ - const struct hisi_rde_hw_error *err = rde_hw_error; - struct device *dev = &hisi_rde->qm.pdev->dev; - u32 i, err_val; - - while (err->msg) { - if (err->int_msk & err_sts) - dev_err_ratelimited(dev, - "[%s] [Error status=0x%x] found.\n", - err->msg, err->int_msk); - err++; - } - - if (HRDE_ECC_2BIT_ERR & err_sts) { - err_val = (readl(hisi_rde->qm.io_base + HRDE_ERR_CNT) - & HRDE_ECC_2BIT_CNT_MSK); - dev_err_ratelimited(dev, - "Rde ecc 2bit sram num=0x%x.\n", err_val); - } - - if (HRDE_STATE_INT_ERR & err_sts) { - for (i = 0; i < HRDE_DFX_NUM; i++) { - dev_err_ratelimited(dev, "%s=0x%x\n", - hrde_dfx_regs[i].name, - readl(hisi_rde->qm.io_base + - hrde_dfx_regs[i].offset)); - } - for (i = 0; i < HRDE_OOO_DFX_NUM; i++) { - dev_err_ratelimited(dev, "%s=0x%x\n", - hrde_ooo_dfx_regs[i].name, - readl(hisi_rde->qm.io_base + - hrde_ooo_dfx_regs[i].offset)); - } - } -} - -static pci_ers_result_t hisi_rde_hw_error_handle(struct hisi_rde *hisi_rde) -{ - u32 err_sts; - - /* read err sts */ - err_sts = readl(hisi_rde->qm.io_base + HRDE_INT_STATUS); - if (err_sts) { - hisi_rde_hw_error_log(hisi_rde, err_sts); - - /* clear error interrupts */ - writel(err_sts, hisi_rde->qm.io_base + HRDE_INT_SOURCE); - return PCI_ERS_RESULT_NEED_RESET; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t hisi_rde_hw_error_process(struct pci_dev *pdev) +static void hisi_rde_shutdown(struct pci_dev *pdev) { struct hisi_rde *hisi_rde = pci_get_drvdata(pdev); - struct device *dev = &pdev->dev; - pci_ers_result_t qm_ret, rde_ret, ret;
- if (!hisi_rde) { - dev_err(dev, "Can't recover rde-error at dev init.\n"); - return PCI_ERS_RESULT_NONE; - } - - /* log qm error */ - qm_ret = hisi_qm_hw_error_handle(&hisi_rde->qm); - - /* log rde error */ - rde_ret = hisi_rde_hw_error_handle(hisi_rde); - ret = (qm_ret == PCI_ERS_RESULT_NEED_RESET || - rde_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; - - return ret; + hisi_qm_stop(&hisi_rde->qm, QM_NORMAL); }
static int hisi_rde_reset_prepare_rdy(struct hisi_rde *hisi_rde) @@ -982,13 +974,16 @@ static int hisi_rde_soft_reset(struct hisi_rde *hisi_rde) return ret; }
+ /* Set qm ecc if dev ecc happened to hold on ooo */ + hisi_qm_set_ecc(qm); + /* OOO register set and check */ writel(MASTER_GLOBAL_CTRL_SHUTDOWN, hisi_rde->qm.io_base + HRDE_MASTER_GLOBAL_CTRL);
/* If bus lock, reset chip */ ret = readl_relaxed_poll_timeout(hisi_rde->qm.io_base + - HRDE_MASTER_TRANS_RETURN, val, + HRDE_MASTER_TRANS_RET, val, (val == MASTER_TRANS_RETURN_RW), HRDE_RD_INTVRL_US, HRDE_RD_TMOUT_US); if (ret) { @@ -1008,7 +1003,7 @@ static int hisi_rde_soft_reset(struct hisi_rde *hisi_rde) acpi_status s;
s = acpi_evaluate_integer(ACPI_HANDLE(dev), "RRST", - NULL, &value); + NULL, &value); if (ACPI_FAILURE(s)) { dev_err(dev, "No controller reset method.\n"); return -EIO; @@ -1047,7 +1042,7 @@ static int hisi_rde_controller_reset_done(struct hisi_rde *hisi_rde) }
hisi_rde_set_user_domain_and_cache(hisi_rde); - hisi_rde_set_hw_error(hisi_rde, true); + hisi_qm_restart_prepare(qm);
ret = hisi_qm_restart(qm); if (ret) { @@ -1055,6 +1050,9 @@ static int hisi_rde_controller_reset_done(struct hisi_rde *hisi_rde) return -EPERM; }
+ hisi_qm_restart_done(qm); + hisi_rde_set_hw_error(hisi_rde, true); + return 0; }
@@ -1096,7 +1094,7 @@ static void hisi_rde_ras_proc(struct work_struct *work) if (!pdev) return;
- ret = hisi_rde_hw_error_process(pdev); + ret = hisi_qm_process_dev_error(pdev); if (ret == PCI_ERS_RESULT_NEED_RESET) if (hisi_rde_controller_reset(hisi_rde)) dev_err(&pdev->dev, "Hisi_rde reset fail.\n"); @@ -1129,7 +1127,7 @@ static int hisi_rde_get_hw_error_status(struct hisi_rde *hisi_rde) u32 err_sts;
err_sts = readl(hisi_rde->qm.io_base + HRDE_INT_STATUS) & - HRDE_ECC_2BIT_ERR; + HRDE_ECC_2BIT_ERR; if (err_sts) return err_sts;
@@ -1221,6 +1219,7 @@ static struct pci_driver hisi_rde_pci_driver = { .probe = hisi_rde_probe, .remove = hisi_rde_remove, .err_handler = &hisi_rde_err_handler, + .shutdown = hisi_rde_shutdown, };
static void hisi_rde_register_debugfs(void) diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index db2983c51f1e..cdc4f9a171d9 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -215,18 +215,17 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl, dma_addr_t psec_sgl, struct sec_dev_info *info) { struct sec_hw_sgl *sgl_current, *sgl_next; - dma_addr_t sgl_next_dma;
+ if (!hw_sgl) + return; sgl_current = hw_sgl; - while (sgl_current) { + while (sgl_current->next) { sgl_next = sgl_current->next; - sgl_next_dma = sgl_current->next_sgl; - - dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl); - + dma_pool_free(info->hw_sgl_pool, sgl_current, + sgl_current->next_sgl); sgl_current = sgl_next; - psec_sgl = sgl_next_dma; } + dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl); }
static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 208b32b70e64..0e164524d169 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -1,13 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 2018-2019 HiSilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */
#ifndef HISI_SEC_H #define HISI_SEC_H diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 4164c05f2d18..3a362cebb4b9 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -1,5 +1,5 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2019 HiSilicon Limited. */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/crypto.h> #include <linux/hrtimer.h> @@ -19,7 +19,7 @@ #define SEC_ASYNC
#define SEC_INVLD_REQ_ID (-1) -#define SEC_PRIORITY (4001) +#define SEC_PRIORITY 4001 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) @@ -196,7 +196,7 @@ struct hisi_sec_ctx { bool is_fusion; };
-#define DES_WEAK_KEY_NUM (4) +#define DES_WEAK_KEY_NUM 4 u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE, 0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
@@ -325,7 +325,7 @@ static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer) }
static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx, - int qp_ctx_id, int alg_type, int req_type) + int qp_ctx_id, int alg_type, int req_type) { struct hisi_qp *qp; struct hisi_sec_qp_ctx *qp_ctx; @@ -352,7 +352,7 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx, atomic_set(&qp_ctx->req_cnt, 0);
qp_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(QM_Q_DEPTH), sizeof(long), - GFP_ATOMIC); + GFP_ATOMIC); if (!qp_ctx->req_bitmap) { ret = -ENOMEM; goto err_qm_release_qp; @@ -487,8 +487,10 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm) ctx->enc_q_num = ctx->q_num / 2; ctx->qp_ctx = kcalloc(ctx->q_num, sizeof(struct hisi_sec_qp_ctx), GFP_KERNEL); - if (!ctx->qp_ctx) + if (!ctx->qp_ctx) { + dev_err(ctx->dev, "failed to alloc qp_ctx"); return -ENOMEM; + }
hisi_sec_get_fusion_param(ctx, sec);
@@ -617,10 +619,11 @@ static int hisi_sec_cipher_ctx_init_multi_iv(struct crypto_skcipher *tfm) static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp) { struct hisi_sec_sqe *sec_sqe = (struct hisi_sec_sqe *)resp; - u32 req_id; struct hisi_sec_qp_ctx *qp_ctx = qp->qp_ctx; + struct device *dev = &qp->qm->pdev->dev; struct hisi_sec_req *req; struct hisi_sec_dfx *dfx; + u32 req_id;
if (sec_sqe->type == 1) { req_id = sec_sqe->type1.tag; @@ -629,8 +632,10 @@ static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp) req->err_type = sec_sqe->type1.error_type; if (req->err_type || sec_sqe->type1.done != 0x1 || sec_sqe->type1.flag != 0x2) { - pr_err("err_type[%d] done[%d] flag[%d]\n", - req->err_type, sec_sqe->type1.done, + dev_err_ratelimited(dev, + "err_type[%d] done[%d] flag[%d]\n", + req->err_type, + sec_sqe->type1.done, sec_sqe->type1.flag); } } else if (sec_sqe->type == 2) { @@ -640,12 +645,14 @@ static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp) req->err_type = sec_sqe->type2.error_type; if (req->err_type || sec_sqe->type2.done != 0x1 || sec_sqe->type2.flag != 0x2) { - pr_err("err_type[%d] done[%d] flag[%d]\n", - req->err_type, sec_sqe->type2.done, + dev_err_ratelimited(dev, + "err_type[%d] done[%d] flag[%d]\n", + req->err_type, + sec_sqe->type2.done, sec_sqe->type2.flag); } } else { - pr_err("err bd type [%d]\n", sec_sqe->type); + dev_err_ratelimited(dev, "err bd type [%d]\n", sec_sqe->type); return; }
@@ -1153,7 +1160,8 @@ static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
mutex_lock(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); - __sync_add_and_fetch(&ctx->sec->sec_dfx.send_cnt, 1); + if (ret == 0) + ctx->sec->sec_dfx.send_cnt++; mutex_unlock(&qp_ctx->req_lock);
return hisi_sec_get_async_ret(ret, req_cnt, ctx->req_fake_limit); @@ -1363,7 +1371,7 @@ static int sec_io_proc(struct hisi_sec_ctx *ctx, struct hisi_sec_req *in_req) req = sec_request_alloc(ctx, in_req, &fusion_send, &fake_busy);
if (!req) { - dev_err(ctx->dev, "sec_request_alloc failed\n"); + dev_err_ratelimited(ctx->dev, "sec_request_alloc failed\n"); return -ENOMEM; }
@@ -1372,14 +1380,14 @@ static int sec_io_proc(struct hisi_sec_ctx *ctx, struct hisi_sec_req *in_req)
ret = sec_request_transfer(ctx, req); if (ret) { - dev_err(ctx->dev, "sec_transfer failed! ret[%d]\n", ret); + dev_err_ratelimited(ctx->dev, "sec_transfer ret[%d]\n", ret); goto err_free_req; }
ret = sec_request_send(ctx, req); __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_full, 1); if (ret != -EBUSY && ret != -EINPROGRESS) { - dev_err(ctx->dev, "sec_send failed ret[%d]\n", ret); + dev_err_ratelimited(ctx->dev, "sec_send ret[%d]\n", ret); goto err_unmap_req; }
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index d05856eaea23..bffbeba1aca9 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -1,13 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 2018-2019 HiSilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */
#ifndef HISI_SEC_CRYPTO_H #define HISI_SEC_CRYPTO_H diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 22595c1dbfbc..50bf5951fd87 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -6,7 +6,6 @@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. - * */
#include <linux/acpi.h> @@ -42,23 +41,20 @@ #define SEC_CORE_INT_STATUS 0x301008 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) #define SEC_CORE_ECC_INFO 0x301C14 -#define SEC_ECC_NUM_SHIFT 16 -#define SEC_ECC_ADDR_SHIFT 0 -#define SEC_ECC_NUM(err_val) ((err_val >> SEC_ECC_NUM_SHIFT) & 0xFF) -#define SEC_ECC_ADDR(err_val) (err_val >> SEC_ECC_ADDR_SHIFT) +#define SEC_ECC_NUM(err_val) (((err_val) >> 16) & 0xFFFF) +#define SEC_ECC_ADDR(err_val) ((err_val) & 0xFFFF)
#define SEC_CORE_INT_DISABLE 0x0 #define SEC_CORE_INT_ENABLE 0x1ff #define SEC_HW_ERROR_IRQ_ENABLE 1 #define SEC_HW_ERROR_IRQ_DISABLE 0
-#define SEC_SM4_CTR_ENABLE_REG 0x301380 -#define SEC_SM4_CTR_ENABLE_MSK 0xEFFFFFFF -#define SEC_SM4_CTR_DISABLE_MSK 0xFFFFFFFF - -#define SEC_XTS_MIV_ENABLE_REG 0x301384 -#define SEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF -#define SEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF +#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF +#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD +#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF +#define SEC_BD_ERR_CHK_EN_REG0 0x0380 +#define SEC_BD_ERR_CHK_EN_REG1 0x0384 +#define SEC_BD_ERR_CHK_EN_REG3 0x038c
#define SEC_SQE_SIZE 128 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) @@ -86,10 +82,10 @@ #define SEC_TRNG_EN_SHIFT 8 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF +#define SEC_WR_MSI_PORT 0xFFFE
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220 #define SEC_INTERFACE_USER_CTRL1_REG 0x0224 -#define SEC_BD_ERR_CHK_EN_REG(n) (0x0380 + (n) * 0x04)
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) @@ -107,8 +103,10 @@ #define SEC_PCI_COMMAND_INVALID 0xFFFFFFFF
#define FORMAT_DECIMAL 10 +#define FROZEN_RANGE_MIN 10 +#define FROZEN_RANGE_MAX 20
-static const char hisi_sec_name[] = "hisi_sec"; +static const char sec_name[] = "hisi_sec2"; static struct dentry *sec_debugfs_root; static u32 pf_q_num = SEC_PF_DEF_Q_NUM; static struct workqueue_struct *sec_wq; @@ -404,10 +402,6 @@ static int uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 1, 2");
-static int enable_sm4_ctr; -module_param(enable_sm4_ctr, int, 0444); -MODULE_PARM_DESC(enable_sm4_ctr, "Enable ctr(sm4) algorithm 0(default), 1"); - static int ctx_q_num = CTX_Q_NUM_DEF; module_param_cb(ctx_q_num, &ctx_q_num_ops, &ctx_q_num, 0444); MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)"); @@ -502,8 +496,11 @@ static int sec_engine_init(struct hisi_sec *hisi_sec) reg |= SEC_USER1_SMMU_NORMAL; writel(reg, base + SEC_INTERFACE_USER_CTRL1_REG);
- writel(0xfffff7fd, base + SEC_BD_ERR_CHK_EN_REG(1)); - writel(0xffffbfff, base + SEC_BD_ERR_CHK_EN_REG(3)); + /* Enable sm4 extra mode, as ctr/ecb */ + writel(SEC_BD_ERR_CHK_EN0, base + SEC_BD_ERR_CHK_EN_REG0); + /* Enable sm4 xts mode multiple iv */ + writel(SEC_BD_ERR_CHK_EN1, base + SEC_BD_ERR_CHK_EN_REG1); + writel(SEC_BD_ERR_CHK_EN3, base + SEC_BD_ERR_CHK_EN_REG3);
/* enable clock gate control */ reg = readl_relaxed(base + SEC_CONTROL_REG); @@ -515,25 +512,9 @@ static int sec_engine_init(struct hisi_sec *hisi_sec) reg |= sec_get_endian(hisi_sec); writel(reg, base + SEC_CONTROL_REG);
- if (enable_sm4_ctr) - writel(SEC_SM4_CTR_ENABLE_MSK, - qm->io_base + SEC_SM4_CTR_ENABLE_REG); - - writel(SEC_XTS_MIV_ENABLE_MSK, - qm->io_base + SEC_XTS_MIV_ENABLE_REG); - return 0; }
-static void hisi_sec_disable_sm4_ctr(struct hisi_sec *hisi_sec) -{ - struct hisi_qm *qm = &hisi_sec->qm; - - if (enable_sm4_ctr) - writel(SEC_SM4_CTR_DISABLE_MSK, - qm->io_base + SEC_SM4_CTR_ENABLE_REG); -} - static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec) { struct hisi_qm *qm = &hisi_sec->qm; @@ -548,7 +529,11 @@ static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec) /* qm cache */ writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); - writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); + + /* disable FLR triggered by BME(bus master enable) */ + writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); + writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + + QM_PEH_AXUSER_CFG_ENABLE);
/* enable sqc,cqc writeback */ writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | @@ -590,8 +575,8 @@ static void hisi_sec_hw_error_set_state(struct hisi_sec *hisi_sec, bool state) val = readl(base + SEC_CONTROL_REG); if (state) { /* clear SEC hw error source if having */ - writel(SEC_CORE_INT_DISABLE, qm->io_base + - SEC_CORE_INT_SOURCE); + writel(SEC_CORE_INT_ENABLE, + hisi_sec->qm.io_base + SEC_CORE_INT_SOURCE);
/* enable SEC hw error interrupts */ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); @@ -782,8 +767,11 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl) struct debugfs_regset32 *regset; struct dentry *tmp_d, *tmp; char buf[SEC_DBGFS_VAL_MAX_LEN]; + int ret;
- snprintf(buf, SEC_DBGFS_VAL_MAX_LEN, "hisi_sec_dfx"); + ret = snprintf(buf, SEC_DBGFS_VAL_MAX_LEN, "sec_dfx"); + if (ret < 0) + return -ENOENT;
tmp_d = debugfs_create_dir(buf, ctrl->debug_root); if (!tmp_d) @@ -921,6 +909,49 @@ static void hisi_sec_hw_error_init(struct hisi_sec *hisi_sec) hisi_sec_hw_error_set_state(hisi_sec, true); }
+static void hisi_sec_open_master_ooo(struct hisi_qm *qm) +{ + u32 val; + void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF + + SEC_ACC_COMMON_REG_OFF; + + val = readl(qm->io_base + SEC_CONTROL_REG); + writel(val & SEC_AXI_SHUTDOWN_DISABLE, base + SEC_CONTROL_REG); + writel(val | SEC_AXI_SHUTDOWN_ENABLE, base + SEC_CONTROL_REG); +} + +static u32 hisi_sec_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + SEC_CORE_INT_STATUS); +} + +static void hisi_sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); +} + +static void hisi_sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) +{ + const struct hisi_sec_hw_error *err = sec_hw_error; + struct device *dev = &qm->pdev->dev; + u32 err_val; + + while (err->msg) { + if (err->int_msk & err_sts) + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + err++; + } + + if (SEC_CORE_INT_STATUS_M_ECC & err_sts) { + err_val = readl(qm->io_base + SEC_CORE_ECC_INFO); + dev_err(dev, "hisi-sec multi ecc sram num=0x%x\n", + SEC_ECC_NUM(err_val)); + dev_err(dev, "hisi-sec multi ecc sram addr=0x%x\n", + SEC_ECC_ADDR(err_val)); + } +} + static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec) { struct hisi_qm *qm = &hisi_sec->qm; @@ -946,8 +977,15 @@ static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec) return -EINVAL; }
+ qm->err_ini.qm_wr_port = SEC_WR_MSI_PORT; + qm->err_ini.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; + qm->err_ini.open_axi_master_ooo = hisi_sec_open_master_ooo; + qm->err_ini.get_dev_hw_err_status = hisi_sec_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hisi_sec_clear_hw_err_status; + qm->err_ini.log_dev_hw_err = hisi_sec_log_hw_error; hisi_sec_set_user_domain_and_cache(hisi_sec); hisi_sec_hw_error_init(hisi_sec); + qm->err_ini.open_axi_master_ooo(qm); hisi_sec_debug_regs_clear(hisi_sec);
return 0; @@ -965,7 +1003,7 @@ static int hisi_sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->ver = rev_id;
qm->sqe_size = SEC_SQE_SIZE; - qm->dev_name = hisi_sec_name; + qm->dev_name = sec_name; qm->fun_type = (pdev->device == SEC_PCI_DEVICE_ID_PF) ? QM_HW_PF : QM_HW_VF; qm->algs = "sec\ncipher\ndigest\n"; @@ -1159,6 +1197,31 @@ static int hisi_sec_sriov_enable(struct pci_dev *pdev, int max_vfs) #endif }
+static int hisi_sec_try_frozen_vfs(struct pci_dev *pdev) +{ + struct hisi_sec *sec, *vf_sec; + struct pci_dev *dev; + int ret = 0; + + /* Try to frozen all the VFs as disable SRIOV */ + mutex_lock(&hisi_sec_list_lock); + list_for_each_entry(sec, &hisi_sec_list, list) { + dev = sec->qm.pdev; + if (dev == pdev) + continue; + if (pci_physfn(dev) == pdev) { + vf_sec = pci_get_drvdata(dev); + ret = hisi_qm_frozen(&vf_sec->qm); + if (ret) + goto frozen_fail; + } + } + +frozen_fail: + mutex_unlock(&hisi_sec_list_lock); + return ret; +} + static int hisi_sec_sriov_disable(struct pci_dev *pdev) { struct hisi_sec *hisi_sec = pci_get_drvdata(pdev); @@ -1169,9 +1232,13 @@ static int hisi_sec_sriov_disable(struct pci_dev *pdev) return -EPERM; }
+ if (hisi_sec_try_frozen_vfs(pdev)) { + dev_err(&pdev->dev, "try frozen VFs failed!\n"); + return -EBUSY; + } + /* remove in hisi_sec_pci_driver will be called to free VF resources */ pci_disable_sriov(pdev); - return hisi_sec_clear_vft_config(hisi_sec); }
@@ -1185,8 +1252,12 @@ static int hisi_sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
static void hisi_sec_remove_wait_delay(struct hisi_sec *hisi_sec) { - while (hisi_qm_frozen(&hisi_sec->qm)) - ; + struct hisi_qm *qm = &hisi_sec->qm; + + while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) && + hisi_sec_try_frozen_vfs(qm->pdev))) + usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX); + udelay(SEC_WAIT_DELAY); }
@@ -1204,80 +1275,18 @@ static void hisi_sec_remove(struct pci_dev *pdev) hisi_sec_debugfs_exit(hisi_sec); (void)hisi_qm_stop(qm, QM_NORMAL);
- if (qm->fun_type == QM_HW_PF) { + if (qm->fun_type == QM_HW_PF) hisi_sec_hw_error_set_state(hisi_sec, false); - hisi_sec_disable_sm4_ctr(hisi_sec); - }
hisi_qm_uninit(qm); hisi_sec_remove_from_list(hisi_sec); }
-static void hisi_sec_log_hw_error(struct hisi_sec *hisi_sec, u32 err_sts) -{ - const struct hisi_sec_hw_error *err = sec_hw_error; - struct device *dev = &hisi_sec->qm.pdev->dev; - u32 err_val; - - while (err->msg) { - if (err->int_msk & err_sts) { - dev_err(dev, "%s [error status=0x%x] found\n", - err->msg, err->int_msk); - - if (SEC_CORE_INT_STATUS_M_ECC & err_sts) { - err_val = readl(hisi_sec->qm.io_base + - SEC_CORE_ECC_INFO); - dev_err(dev, - "hisi-sec multi ecc sram num=0x%x\n", - SEC_ECC_NUM(err_val)); - dev_err(dev, - "hisi-sec multi ecc sram addr=0x%x\n", - SEC_ECC_ADDR(err_val)); - } - } - err++; - } -} - -static pci_ers_result_t hisi_sec_hw_error_handle(struct hisi_sec *hisi_sec) -{ - u32 err_sts; - - /* read err sts */ - err_sts = readl(hisi_sec->qm.io_base + SEC_CORE_INT_STATUS); - - if (err_sts) { - hisi_sec_log_hw_error(hisi_sec, err_sts); - /* clear error interrupts */ - writel(err_sts, hisi_sec->qm.io_base + SEC_CORE_INT_SOURCE); - - return PCI_ERS_RESULT_NEED_RESET; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t hisi_sec_process_hw_error(struct pci_dev *pdev) +static void hisi_sec_shutdown(struct pci_dev *pdev) { struct hisi_sec *hisi_sec = pci_get_drvdata(pdev); - struct device *dev = &pdev->dev; - pci_ers_result_t qm_ret, sec_ret; - - if (!hisi_sec) { - dev_err(dev, - "Can't recover error occurred during device init\n"); - return PCI_ERS_RESULT_NONE; - }
- /* log qm error */ - qm_ret = hisi_qm_hw_error_handle(&hisi_sec->qm); - - /* log sec error */ - sec_ret = hisi_sec_hw_error_handle(hisi_sec); - - return (qm_ret == PCI_ERS_RESULT_NEED_RESET || - sec_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; + hisi_qm_stop(&hisi_sec->qm, QM_NORMAL); }
static pci_ers_result_t hisi_sec_error_detected(struct pci_dev *pdev, @@ -1290,7 +1299,7 @@ static pci_ers_result_t hisi_sec_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT;
- return hisi_sec_process_hw_error(pdev); + return hisi_qm_process_dev_error(pdev); }
static int hisi_sec_reset_prepare_ready(struct hisi_sec *hisi_sec) @@ -1399,6 +1408,9 @@ static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec) return ret; }
+ /* Set qm ecc if dev ecc happened to hold on ooo */ + hisi_qm_set_ecc(qm); + /* OOO register set and check */ writel(SEC_MASTER_GLOBAL_CTRL_SHUTDOWN, hisi_sec->qm.io_base + SEC_MASTER_GLOBAL_CTRL); @@ -1492,7 +1504,7 @@ static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec) }
hisi_sec_set_user_domain_and_cache(hisi_sec); - hisi_sec_hw_error_init(hisi_sec); + hisi_qm_restart_prepare(qm);
ret = hisi_qm_restart(qm); if (ret) { @@ -1514,6 +1526,9 @@ static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec) return -EPERM; }
+ hisi_qm_restart_done(qm); + hisi_sec_hw_error_init(hisi_sec); + return 0; }
@@ -1713,12 +1728,13 @@ static const struct pci_error_handlers hisi_sec_err_handler = { };
static struct pci_driver hisi_sec_pci_driver = { - .name = "hisi_sec", + .name = "hisi_sec2", .id_table = hisi_sec_dev_ids, .probe = hisi_sec_probe, .remove = hisi_sec_remove, .sriov_configure = hisi_sec_sriov_configure, .err_handler = &hisi_sec_err_handler, + .shutdown = hisi_sec_shutdown, };
static void hisi_sec_register_debugfs(void) @@ -1726,7 +1742,7 @@ static void hisi_sec_register_debugfs(void) if (!debugfs_initialized()) return;
- sec_debugfs_root = debugfs_create_dir("hisi_sec", NULL); + sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); if (IS_ERR_OR_NULL(sec_debugfs_root)) sec_debugfs_root = NULL; } @@ -1740,7 +1756,7 @@ static int __init hisi_sec_init(void) { int ret;
- sec_wq = alloc_workqueue("hisi_sec", WQ_HIGHPRI | WQ_CPU_INTENSIVE | + sec_wq = alloc_workqueue("hisi_sec2", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
if (!sec_wq) { diff --git a/drivers/crypto/hisilicon/sec2/sec_usr_if.h b/drivers/crypto/hisilicon/sec2/sec_usr_if.h index 8147d3016c8a..7c76e19f271a 100644 --- a/drivers/crypto/hisilicon/sec2/sec_usr_if.h +++ b/drivers/crypto/hisilicon/sec2/sec_usr_if.h @@ -1,13 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 2018-2019 HiSilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ +/* Copyright (c) 2018-2019 HiSilicon Limited. */
#ifndef HISI_SEC_USR_IF_H #define HISI_SEC_USR_IF_H diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 85c2bc36d826..523dab7cb4c1 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -56,7 +56,11 @@ struct hisi_acc_sgl_pool { struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, u32 count, u32 sge_nr) { - u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl; + u32 sgl_size; + u32 block_size; + u32 sgl_num_per_block; + u32 block_num; + u32 remain_sgl; struct hisi_acc_sgl_pool *pool; struct mem_block *block; u32 i, j; diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 2b8dba73f5f0..eec79ae92185 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -5,6 +5,9 @@ #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include "zip.h" +#ifndef CONFIG_SG_SPLIT +#include <../lib/sg_split.c> +#endif
/* hisi_zip_sqe dw3 */ #define HZIP_BD_STATUS_M GENMASK(7, 0) @@ -368,7 +371,6 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) int err = 0;
status = sqe->dw3 & HZIP_BD_STATUS_M; - if (status != 0 && status != HZIP_NC_ERR) { dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 510e5874e122..5b4a5c110107 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -19,6 +19,9 @@ #define HZIP_QUEUE_NUM_V1 4096 #define HZIP_QUEUE_NUM_V2 1024
+#define PCI_DEVICE_ID_ZIP_PF 0xa250 +#define PCI_DEVICE_ID_ZIP_VF 0xa251 + #define HZIP_CLOCK_GATE_CTRL 0x301004 #define COMP0_ENABLE BIT(0) #define COMP1_ENABLE BIT(1) @@ -64,6 +67,7 @@
#define HZIP_CORE_INT_SOURCE 0x3010A0 #define HZIP_CORE_INT_MASK 0x3010A4 +#define HZIP_CORE_INT_SET 0x3010A8 #define HZIP_HW_ERROR_IRQ_ENABLE 1 #define HZIP_HW_ERROR_IRQ_DISABLE 0 #define HZIP_CORE_INT_STATUS 0x3010AC @@ -71,6 +75,7 @@ #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 #define HZIP_CORE_INT_RAS_CE_ENB 0x301160 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 +#define HZIP_RAS_NFE_MBIT_DISABLE ~HZIP_CORE_INT_STATUS_M_ECC #define HZIP_CORE_INT_RAS_FE_ENB 0x301168 #define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 @@ -90,6 +95,7 @@ #define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) #define HZIP_AXI_SHUTDOWN_DISABLE 0xFFFFBFFF +#define HZIP_WR_MSI_PORT 0xF7FF
#define HZIP_ENABLE 1 #define HZIP_DISABLE 0 @@ -101,6 +107,9 @@ #define HZIP_RESET_WAIT_TIMEOUT 400 #define HZIP_PCI_COMMAND_INVALID 0xFFFFFFFF
+#define FROZEN_RANGE_MIN 10 +#define FROZEN_RANGE_MAX 20 + static const char hisi_zip_name[] = "hisi_zip"; static struct dentry *hzip_debugfs_root; static LIST_HEAD(hisi_zip_list); @@ -638,13 +647,16 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) struct debugfs_regset32 *regset; struct dentry *tmp_d, *tmp; char buf[HZIP_BUF_SIZE]; - int i; + int i, ret;
for (i = 0; i < HZIP_CORE_NUM; i++) { if (i < HZIP_COMP_CORE_NUM) - sprintf(buf, "comp_core%d", i); + ret = snprintf(buf, HZIP_BUF_SIZE, "comp_core%d", i); else - sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM); + ret = snprintf(buf, HZIP_BUF_SIZE, + "decomp_core%d", i - HZIP_COMP_CORE_NUM); + if (ret < 0) + return -EINVAL;
tmp_d = debugfs_create_dir(buf, ctrl->debug_root); if (!tmp_d) @@ -736,6 +748,54 @@ static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip) hisi_zip_hw_error_set_state(hisi_zip, true); }
+static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + HZIP_CORE_INT_STATUS); +} + +static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); +} + +static void hisi_zip_set_ecc(struct hisi_qm *qm) +{ + u32 nfe_enb; + + nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(nfe_enb & HZIP_RAS_NFE_MBIT_DISABLE, + qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(HZIP_CORE_INT_STATUS_M_ECC, qm->io_base + HZIP_CORE_INT_SET); + qm->err_ini.is_dev_ecc_mbit = 1; +} + +static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) +{ + const struct hisi_zip_hw_error *err = zip_hw_error; + struct device *dev = &qm->pdev->dev; + u32 err_val; + + while (err->msg) { + if (err->int_msk & err_sts) { + dev_err(dev, "%s [error status=0x%x] found\n", + err->msg, err->int_msk); + + if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) { + err_val = readl(qm->io_base + + HZIP_CORE_SRAM_ECC_ERR_INFO); + dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n", + ((err_val >> + HZIP_SRAM_ECC_ERR_NUM_SHIFT) & + 0xFF)); + dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n", + (err_val >> + HZIP_SRAM_ECC_ERR_ADDR_SHIFT)); + } + } + err++; + } +} + static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) { struct hisi_qm *qm = &hisi_zip->qm; @@ -761,6 +821,13 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return -EINVAL; }
+ qm->err_ini.qm_wr_port = HZIP_WR_MSI_PORT; + qm->err_ini.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; + qm->err_ini.get_dev_hw_err_status = hisi_zip_get_hw_err_status; + qm->err_ini.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status; + qm->err_ini.log_dev_hw_err = hisi_zip_log_hw_error; + qm->err_ini.inject_dev_hw_err = hisi_zip_set_ecc; + hisi_zip_set_user_domain_and_cache(hisi_zip); hisi_zip_hw_error_init(hisi_zip); hisi_zip_debug_regs_clear(hisi_zip); @@ -796,7 +863,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) qm->dev_name = hisi_zip_name; qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : QM_HW_VF; - qm->algs = "zlib\ngzip\n"; + qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n";
switch (uacce_mode) { case UACCE_MODE_NOUACCE: @@ -939,7 +1006,6 @@ static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) int pre_existing_vfs, num_vfs, ret;
pre_existing_vfs = pci_num_vf(pdev); - if (pre_existing_vfs) { dev_err(&pdev->dev, "Can't enable VF. Please disable pre-enabled VFs!\n"); @@ -969,6 +1035,31 @@ static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) #endif }
+static int hisi_zip_try_frozen_vfs(struct pci_dev *pdev) +{ + struct hisi_zip *zip, *vf_zip; + struct pci_dev *dev; + int ret = 0; + + /* Try to frozen all the VFs as disable SRIOV */ + mutex_lock(&hisi_zip_list_lock); + list_for_each_entry(zip, &hisi_zip_list, list) { + dev = zip->qm.pdev; + if (dev == pdev) + continue; + if (pci_physfn(dev) == pdev) { + vf_zip = pci_get_drvdata(dev); + ret = hisi_qm_frozen(&vf_zip->qm); + if (ret) + goto frozen_fail; + } + } + +frozen_fail: + mutex_unlock(&hisi_zip_list_lock); + return ret; +} + static int hisi_zip_sriov_disable(struct pci_dev *pdev) { struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); @@ -979,6 +1070,11 @@ static int hisi_zip_sriov_disable(struct pci_dev *pdev) return -EPERM; }
+ if (hisi_zip_try_frozen_vfs(pdev)) { + dev_err(&pdev->dev, "try frozen VFs failed!\n"); + return -EBUSY; + } + /* remove in hisi_zip_pci_driver will be called to free VF resources */ pci_disable_sriov(pdev);
@@ -995,8 +1091,12 @@ static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
static void hisi_zip_remove_wait_delay(struct hisi_zip *hisi_zip) { - while (hisi_qm_frozen(&hisi_zip->qm)) - ; + struct hisi_qm *qm = &hisi_zip->qm; + + while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) && + hisi_zip_try_frozen_vfs(qm->pdev))) + usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX); + udelay(ZIP_WAIT_DELAY); }
@@ -1009,7 +1109,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_zip_remove_wait_delay(hisi_zip);
if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) - hisi_zip_sriov_disable(pdev); + (void)hisi_zip_sriov_disable(pdev);
#ifndef CONFIG_IOMMU_SVA if (uacce_mode != UACCE_MODE_UACCE) @@ -1028,70 +1128,11 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_zip_remove_from_list(hisi_zip); }
-static void hisi_zip_log_hw_error(struct hisi_zip *hisi_zip, u32 err_sts) -{ - const struct hisi_zip_hw_error *err = zip_hw_error; - struct device *dev = &hisi_zip->qm.pdev->dev; - u32 err_val; - - while (err->msg) { - if (err->int_msk & err_sts) { - dev_err(dev, "%s [error status=0x%x] found\n", - err->msg, err->int_msk); - - if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) { - err_val = readl(hisi_zip->qm.io_base + - HZIP_CORE_SRAM_ECC_ERR_INFO); - dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n", - ((err_val >> - HZIP_SRAM_ECC_ERR_NUM_SHIFT) & - 0xFF)); - dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n", - (err_val >> - HZIP_SRAM_ECC_ERR_ADDR_SHIFT)); - } - } - err++; - } -} - -static pci_ers_result_t hisi_zip_hw_error_handle(struct hisi_zip *hisi_zip) -{ - u32 err_sts; - - /* read err sts */ - err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS); - - if (err_sts) { - hisi_zip_log_hw_error(hisi_zip, err_sts); - /* clear error interrupts */ - writel(err_sts, hisi_zip->qm.io_base + HZIP_CORE_INT_SOURCE); - - return PCI_ERS_RESULT_NEED_RESET; - } - - return PCI_ERS_RESULT_RECOVERED; -} - -static pci_ers_result_t hisi_zip_process_hw_error(struct pci_dev *pdev) +static void hisi_zip_shutdown(struct pci_dev *pdev) { struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); - struct device *dev = &pdev->dev; - pci_ers_result_t qm_ret, zip_ret; - - if (!hisi_zip) { - dev_err(dev, - "Can't recover ZIP-error occurred during device init\n"); - return PCI_ERS_RESULT_NONE; - }
- qm_ret = hisi_qm_hw_error_handle(&hisi_zip->qm); - - zip_ret = hisi_zip_hw_error_handle(hisi_zip); - - return (qm_ret == PCI_ERS_RESULT_NEED_RESET || - zip_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; + hisi_qm_stop(&hisi_zip->qm, QM_NORMAL); }
static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev, @@ -1104,7 +1145,7 @@ static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT;
- return hisi_zip_process_hw_error(pdev); + return hisi_qm_process_dev_error(pdev); }
static int hisi_zip_reset_prepare_ready(struct hisi_zip *hisi_zip) @@ -1213,6 +1254,9 @@ static int hisi_zip_soft_reset(struct hisi_zip *hisi_zip) return ret; }
+ /* Set qm ecc if dev ecc happened to hold on ooo */ + hisi_qm_set_ecc(qm); + /* OOO register set and check */ writel(HZIP_MASTER_GLOBAL_CTRL_SHUTDOWN, hisi_zip->qm.io_base + HZIP_MASTER_GLOBAL_CTRL); @@ -1309,7 +1353,7 @@ static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip) }
hisi_zip_set_user_domain_and_cache(hisi_zip); - hisi_zip_hw_error_init(hisi_zip); + hisi_qm_restart_prepare(qm);
ret = hisi_qm_restart(qm); if (ret) { @@ -1331,6 +1375,9 @@ static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip) return -EPERM; }
+ hisi_qm_restart_done(qm); + hisi_zip_hw_error_init(hisi_zip); + return 0; }
@@ -1531,6 +1578,7 @@ static struct pci_driver hisi_zip_pci_driver = { .remove = hisi_zip_remove, .sriov_configure = hisi_zip_sriov_configure, .err_handler = &hisi_zip_err_handler, + .shutdown = hisi_zip_shutdown, };
static void hisi_zip_register_debugfs(void)
From: Xiongfeng Wang wangxiongfeng2@huawei.com
hulk inclusion category: bugfix bugzilla: 13666 CVE: NA
------------------------------
The following patch set the class type as 'PCI_BASE_CLASS_NETWORK'. But 'PCI_BASE_CLASS_NETWORK' is actually the higher 8 bits of the class type of a network device. We should set it as 'PCI_CLASS_NETWORK_ETHERNET'. This patch fixes it.
Fixes: ba8bc9c15d20 ("PCI: Add quirk for hisilicon NP devices 5896") Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/pci/quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 83e3f2c5c7e6..e415fe90fc94 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5179,7 +5179,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
static void pci_quirk_hisi_fixup_class(struct pci_dev *dev) { - dev->class = PCI_BASE_CLASS_NETWORK << 8; + dev->class = PCI_CLASS_NETWORK_ETHERNET << 8; pci_info(dev, "force hisi class type to network\n"); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_HUAWEI, PCIE_DEVICE_ID_HISI_5896,
From: wanglin wanglin137@huawei.com
driver inclusion category: cleanup bugzilla: NA CVE: NA
----------------------------------
This patch cleans up code by review advice
Reviewed-by: taojihua taojihua4@huawei.com Reviewed-by: huchunzhi huchunzhi@huawei.com Signed-off-by: wanglin wanglin137@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../infiniband/hw/hns/hns_roce_hw_sysfs_v2.c | 6 ++--- .../hw/hns/roce-customer/rdfx_entry.c | 24 +++++++++---------- .../hw/hns/roce-customer/rdfx_hw_v2.c | 9 +++---- 3 files changed, 18 insertions(+), 21 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_sysfs_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_sysfs_v2.c index 3779be213c22..dfbdfb2192f1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_sysfs_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_sysfs_v2.c @@ -83,7 +83,7 @@ int hns_roce_v2_query_mpt_stat(struct hns_roce_dev *hr_dev, goto err_cmd;
mpt_ctx = kzalloc(sizeof(*mpt_ctx), GFP_KERNEL); - if (!mpt_ctx) { + if (ZERO_OR_NULL_PTR(mpt_ctx)) { ret = -ENOMEM; goto err_cmd; } @@ -268,7 +268,7 @@ int hns_roce_v2_query_aeqc_stat(struct hns_roce_dev *hr_dev, return PTR_ERR(mailbox);
eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL); - if (!eq_context) { + if (ZERO_OR_NULL_PTR(eq_context)) { ret = -ENOMEM; goto err_context; } @@ -436,7 +436,7 @@ int hns_roce_v2_query_ceqc_stat(struct hns_roce_dev *hr_dev, return PTR_ERR(mailbox);
eq_context = kzalloc(sizeof(*eq_context), GFP_KERNEL); - if (!eq_context) { + if (ZERO_OR_NULL_PTR(eq_context)) { ret = -ENOMEM; goto err_context; } diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_entry.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_entry.c index e717fb1019e8..a2453b31ac91 100644 --- a/drivers/infiniband/hw/hns/roce-customer/rdfx_entry.c +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_entry.c @@ -17,14 +17,14 @@ void rdfx_cp_rq_wqe_buf(struct hns_roce_dev *hr_dev, #endif { struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; - struct rdfx_qp_info *rdfx_qp; - struct rdfx_rq_info *rq; - void *dfx_qp_buf; + struct rdfx_qp_info *rdfx_qp = NULL; + struct rdfx_rq_info *rq = NULL; + void *dfx_qp_buf = NULL;
spin_lock(&(rdfx->qp.qp_lock));
rdfx_qp = rdfx_find_rdfx_qp(rdfx, hr_qp->qpn); - if (!rdfx_qp) { + if (ZERO_OR_NULL_PTR(rdfx_qp)) { dev_err(hr_dev->dev, "find qp 0x%lx failed while cp sq wqe buf\n", hr_qp->qpn); spin_unlock(&(rdfx->qp.qp_lock)); @@ -109,16 +109,16 @@ void rdfx_cp_sq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, #endif { struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; - struct rdfx_qp_info *rdfx_qp; - struct rdfx_sq_info *sq; - void *dfx_qp_buf; - void *dfx_hns_wqe_sge; + struct rdfx_qp_info *rdfx_qp = NULL; + struct rdfx_sq_info *sq = NULL; + void *dfx_hns_wqe_sge = NULL; + void *dfx_qp_buf = NULL; int atomic_en = 0;
spin_lock(&(rdfx->qp.qp_lock));
rdfx_qp = rdfx_find_rdfx_qp(rdfx, qp->qpn); - if (!rdfx_qp) { + if (ZERO_OR_NULL_PTR(rdfx_qp)) { dev_err(hr_dev->dev, "find qp 0x%lx failed while cp sq wqe buf\n", qp->qpn); spin_unlock(&(rdfx->qp.qp_lock)); @@ -153,14 +153,14 @@ void rdfx_cp_cqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *cqe) { struct rdfx_info *rdfx = (struct rdfx_info *)hr_dev->dfx_priv; - struct rdfx_cq_info *rdfx_cq; - void *dfx_cq_buf; + struct rdfx_cq_info *rdfx_cq = NULL; + void *dfx_cq_buf = NULL; u32 ci;
spin_lock(&rdfx->cq.cq_lock);
rdfx_cq = rdfx_find_rdfx_cq(rdfx, hr_cq->cqn); - if (!rdfx_cq) { + if (ZERO_OR_NULL_PTR(rdfx_cq)) { dev_err(hr_dev->dev, "find cq 0x%lx failed while cp cqe buf\n", hr_cq->cqn); spin_unlock(&rdfx->cq.cq_lock); diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c index 32994b0d78aa..c0ab3d2ba636 100644 --- a/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c @@ -504,9 +504,8 @@ void rdfx_v2_qpc_store_print(u32 qpn, u64 bt0_ba, u64 bt1_ba, struct hns_roce_v2_qp_context *qp_context) { int i; - int *qpc; + int *qpc = (int *)qp_context;
- qpc = (int *)qp_context; pr_info("************** QPC INFO ***************\n"); pr_info("QPC(0x%x) BT0: 0x%llx\n", qpn, bt0_ba); pr_info("QPC(0x%x) BT1: 0x%llx\n", qpn, bt1_ba); @@ -711,9 +710,8 @@ void rdfx_v2_srqc_store_print(u32 srqn, u64 bt0_ba, u64 bt1_ba, struct hns_roce_srq_context *srq_context) { int i; - int *srqc; + int *srqc = (int *)srq_context;
- srqc = (int *)srq_context; pr_info("************** SRQC INFO ***************\n"); pr_info("SRQC(0x%x) BT0: 0x%llx\n", srqn, bt0_ba); pr_info("SRQC(0x%x) BT1: 0x%llx\n", srqn, bt1_ba); @@ -815,9 +813,8 @@ void rdfx_v2_mpt_store_print(int key, u64 bt0_ba, u64 bt1_ba, struct hns_roce_v2_mpt_entry *mpt_ctx) { int i; - int *mpt; + int *mpt = (int *)mpt_ctx;
- mpt = (int *)mpt_ctx; pr_info("************** MPT INFO ***************\n"); pr_info("MPT(0x%x) BT0: 0x%llx\n", key, bt0_ba); pr_info("MPT(0x%x) BT1: 0x%llx\n", key, bt1_ba);
From: Junxin Chen chenjunxin1@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
---------------------------------
This patch clears bugs found by codereview.
Signed-off-by: Junxin Chen chenjunxin1@huawei.com Reviewed-by: Zhong Zhaohui zhongzhaohui@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +- .../hns3/hns-customer/hns3pf/hclge_ext.c | 12 +++--- .../hisilicon/hns3/hns3_cae/hns3_cae_cmd.c | 2 +- .../hisilicon/hns3/hns3_cae/hns3_cae_fd.c | 2 +- .../hisilicon/hns3/hns3_cae/hns3_cae_init.c | 11 +++++- .../hisilicon/hns3/hns3_cae/hns3_cae_pkt.c | 39 +++++++++++++++---- .../hisilicon/hns3/hns3_cae/hns3_cae_pkt.h | 3 ++ .../hisilicon/hns3/hns3_cae/hns3_cae_qinfo.c | 2 +- .../hns3/hns3_cae/hns3_cae_version.h | 2 +- .../net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +- .../hisilicon/hns3/hns3pf/hclge_main.h | 2 +- .../hisilicon/hns3/hns3vf/hclgevf_main.h | 2 +- 12 files changed, 58 insertions(+), 23 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index bcd8c183d2b4..e15384886ce5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -30,7 +30,7 @@ #include <linux/pci.h> #include <linux/types.h>
-#define HNAE3_MOD_VERSION "1.9.29.0" +#define HNAE3_MOD_VERSION "1.9.30.0"
#define HNAE3_MIN_VECTOR_NUM 2 /* one for msi-x, another for IO */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns-customer/hns3pf/hclge_ext.c b/drivers/net/ethernet/hisilicon/hns3/hns-customer/hns3pf/hclge_ext.c index 14741512f24f..0863771013e4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns-customer/hns3pf/hclge_ext.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns-customer/hns3pf/hclge_ext.c @@ -283,7 +283,7 @@ static int hclge_get_led_signal(struct hnae3_handle *handle, int opcode, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, - "hclge_get_sgpio_tx_reg failed %d\n", ret); + "hclge get led signal failed %d\n", ret); return ret; }
@@ -349,7 +349,7 @@ static int hclge_get_net_lane_status(struct hnae3_handle *handle, int opcode, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DISABLE_NET_LANE, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, "disable net lane failed %d\n", ret); + dev_err(&hdev->pdev->dev, "get lane status failed %d\n", ret); return ret; } *status = desc.data[0]; @@ -521,12 +521,12 @@ static int hclge_check_phy_opt_param(struct hclge_dev *hdev, enum hclge_phy_op_code opt_type) { if (!phydev) { - dev_err(&hdev->pdev->dev, "this net dev has no phy.\n"); + dev_err(&hdev->pdev->dev, "this net dev has no phy."); return -EINVAL; }
if (!mdio_bus) { - dev_err(&hdev->pdev->dev, "this net dev has no mdio bus.\n"); + dev_err(&hdev->pdev->dev, "this net dev has no mdio bus."); return -EINVAL; }
@@ -550,7 +550,7 @@ static int hclge_mdio_bus_opt(struct hclge_phy_para *para, op_ret = mdio_bus->read(mdio_bus, phyid, para->reg_addr); if (op_ret < 0) { dev_err(&hdev->pdev->dev, - "read phy %d page %d reg %d failed.\n", + "read phy %u page %d reg %d failed.\n", phyid, para->page, para->reg_addr); } else { para->data = (u16)op_ret; @@ -561,7 +561,7 @@ static int hclge_mdio_bus_opt(struct hclge_phy_para *para, para->data); if (op_ret < 0) { dev_err(&hdev->pdev->dev, - "write phy %d page %d reg %d failed.\n", + "write phy %u page %d reg %d failed.\n", phyid, para->page, para->reg_addr); } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.c index 7a51d22c8797..4c93facb1b22 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_cmd.c @@ -157,7 +157,7 @@ void hns3_cae_cmd_setup_basic_desc(struct hclge_desc *desc,
/** * hns3_cae_cmd_send - send command to command queue - * @h: pointer to the hnae3_handle + * @hdev: pointer to the hclge_dev * @desc: prefilled descriptor for describing the command * @num : the number of descriptors to be sent * diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.c index 0cfef8cc4c00..516dc2e41904 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_fd.c @@ -156,11 +156,11 @@ static int hns3_cae_send_tcam_op_cmd(struct hclge_dev *hdev, u8 *buf_in, { #define HNS3_CAE_FD_TCAM_BD_NUM 3 struct fd_param *param = (struct fd_param *)buf_in; + struct hclge_desc desc[HNS3_CAE_FD_TCAM_BD_NUM]; struct hclge_fd_tcam_config_1_cmd *req1; struct hclge_fd_tcam_config_2_cmd *req2; struct hclge_fd_tcam_config_3_cmd *req3; struct hclge_fd_tcam_data *tcam_data; - struct hclge_desc desc[3]; struct hclge_desc *pdesc; bool check; u8 *buf; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c index cb61e673cba3..cc18001641eb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_init.c @@ -420,12 +420,21 @@ static ssize_t hns3_cae_k_write(struct file *pfile, const char __user *ubuf,
static int hns3_cae_k_mmap(struct file *filp, struct vm_area_struct *vma) { + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + size_t size = vma->vm_end - vma->vm_start; int ret;
vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (offset + (phys_addr_t)size - 1 < offset) + return -EINVAL; + + if (offset >> PAGE_SHIFT != vma->vm_pgoff) + return -EINVAL; + ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - vma->vm_end - vma->vm_start, vma->vm_page_prot); + size, vma->vm_page_prot); if (ret) return -EIO;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pkt.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pkt.c index 457147034a82..dffc58ee4967 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pkt.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pkt.c @@ -239,12 +239,17 @@ static struct sk_buff *__hns_assemble_skb(struct net_device *ndev, { const struct ethhdr *ethhead = (const struct ethhdr *)data; const char *head_data = (const char *)data; + struct sk_buff *skb = NULL; struct page *p = NULL; - struct sk_buff *skb; void *buff = NULL; int proc_length; int bnum = 0;
+ if (mss <= 0) { + pr_err("mss(=%d) of packet is invalid\n", mss); + return NULL; + } + /* allocate test skb */ skb = alloc_skb(256, GFP_KERNEL); if (!skb) @@ -409,8 +414,15 @@ void __fill_the_pkt_head(struct net_device *netdev, u8 *payload, 58); break; case HNS3_CAE_PKT_TYPE_IPV4_TRACEROUTE_OPTION: + if (in_info->pkt_len < IPV4_TRACEROUTE_PKT_LEN) { + dev_err(&netdev->dev, + "pkt_len(=%d) of ipv4 trace route pkt must big than %d.\n", + in_info->pkt_len, IPV4_TRACEROUTE_PKT_LEN); + return; + } + hns3_cae_pkt_type_deal(payload, in_info, ifa_list, pkt_payload, - 66); + IPV4_TRACEROUTE_PKT_LEN); break; case HNS3_CAE_PKT_TYPE_IPV6: memcpy(payload, in_info->dst_mac, ETH_ALEN); @@ -420,11 +432,17 @@ void __fill_the_pkt_head(struct net_device *netdev, u8 *payload, 54); break; case HNS3_CAE_PKT_TYPE_IPV6_EXTENSION_ROUTING: + if (in_info->pkt_len < IPV6_EXTENSION_PKT_LEN) { + dev_err(&netdev->dev, + "pkt_len(=%d) of ipv6 extension pkt must big than %d.\n", + in_info->pkt_len, IPV6_EXTENSION_PKT_LEN); + return; + } memcpy(payload, in_info->dst_mac, ETH_ALEN); memcpy(payload + 22, in_info->pkt_inet6_addr, 16);
hns3_cae_pkt_type_deal(payload, in_info, ifa_list, pkt_payload, - 114); + IPV6_EXTENSION_PKT_LEN); break; case HNS3_CAE_PKT_TYPE_SCTP4: memcpy(payload, in_info->dst_mac, ETH_ALEN); @@ -492,8 +510,9 @@ int __hns3_cae_send_pkt(struct hns3_nic_priv *net_priv, struct net_device *netdev = net_priv->netdev;
handle = net_priv->ae_handle; - if (queue_id > handle->kinfo.num_tqps || - queue_id + in_info->multi_queue - 1 > handle->kinfo.num_tqps) { + if (queue_id >= handle->kinfo.num_tqps || + queue_id + in_info->multi_queue - 1 >= handle->kinfo.num_tqps || + queue_id < 0) { pr_err("%s,%d:queue(%d) or multi_queue(%d) is invalid\n", __func__, __LINE__, in_info->queue_id, in_info->multi_queue); @@ -501,6 +520,10 @@ int __hns3_cae_send_pkt(struct hns3_nic_priv *net_priv, }
pkt_len = in_info->pkt_len; + if (pkt_len < MIN_PKT_LEN) { + dev_err(&netdev->dev, "pkt_len(=%d) is invalid\n", pkt_len); + return -EINVAL; + }
payload = kzalloc(pkt_len, GFP_KERNEL); if (ZERO_OR_NULL_PTR(payload)) @@ -643,8 +666,8 @@ int hns3_cae_create_new_thread(int tid,
memcpy(kthread_table[tid]->in_info, in_info, sizeof(*in_info));
- name[13] = tid / 10 + '0'; - name[14] = tid % 10 + '0'; + name[strlen(name) - 2] = tid / 10 + '0'; + name[strlen(name) - 1] = tid % 10 + '0'; kthread_table[tid]->task = kthread_run(__hns3_cae_new_task, kthread_table[tid], "%s", name); if (IS_ERR(kthread_table[tid]->task)) { @@ -697,7 +720,7 @@ int hns3_cae_send_pkt(struct hns3_nic_priv *net_priv,
handle = net_priv->ae_handle; queue_id = in_info->queue_id; - if (queue_id > handle->kinfo.num_tqps) { + if (queue_id >= handle->kinfo.num_tqps || queue_id < 0) { pr_err("%s,%d:queue(%d) is invalid\n", __func__, __LINE__, in_info->queue_id); return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pkt.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pkt.h index 8371a0f3abc0..4d3eff3a19f3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pkt.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_pkt.h @@ -8,6 +8,9 @@
#define IP_ADDR_LEN 4 #define IP6_ADDR_LEN 10 +#define MIN_PKT_LEN 64 +#define IPV4_TRACEROUTE_PKT_LEN 66 +#define IPV6_EXTENSION_PKT_LEN 114
enum PKT_TYPE { HNS3_CAE_PKT_TYPE_ARP = 0, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.c b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.c index 48cd60e5acb7..ffbd51dc73ab 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_qinfo.c @@ -127,7 +127,7 @@ int hns3_cae_qinfo_cfg(struct hns3_nic_priv *net_priv, out_info = (struct qinfo_param *)buf_out; ring_id = *((int *)buf_in);
- if (ring_id > tqps_num || ring_id < 0) { + if (ring_id >= tqps_num || ring_id < 0) { pr_err("please input valid qid\n"); return -1; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h index e2e96edcb820..b1e04e9b9589 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h @@ -4,7 +4,7 @@ #ifndef __HNS3_CAE_VERSION_H__ #define __HNS3_CAE_VERSION_H__
-#define HNS3_CAE_MOD_VERSION "1.9.29.0" +#define HNS3_CAE_MOD_VERSION "1.9.30.0"
#define CMT_ID_LEN 8 #define RESV_LEN 3 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index d09599a877af..152571d85429 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -8,7 +8,7 @@
#include "hnae3.h"
-#define HNS3_MOD_VERSION "1.9.29.0" +#define HNS3_MOD_VERSION "1.9.30.0"
extern char hns3_driver_version[];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 0b5c13241335..04b15f4ac310 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -12,7 +12,7 @@ #include "hclge_cmd.h" #include "hnae3.h"
-#define HCLGE_MOD_VERSION "1.9.29.0" +#define HCLGE_MOD_VERSION "1.9.30.0" #define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_MAX_PF_NUM 8 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 1e963a35a76e..59865406e063 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -10,7 +10,7 @@ #include "hclgevf_cmd.h" #include "hnae3.h"
-#define HCLGEVF_MOD_VERSION "1.9.29.0" +#define HCLGEVF_MOD_VERSION "1.9.30.0" #define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_MAX_VLAN_ID 4095
From: zhangwei zhangwei375@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
Feature or Bugfix:Bugfix
Signed-off-by: zhangwei zhangwei375@huawei.com Reviewed-by: lingmingqiang lingmingqiang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 5ca82af73ed9..a43f82ead0a5 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -559,13 +559,13 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) hisi_qm_stop_qp(ctx->qp);
if (ctx->dh.g) { - memset(ctx->dh.g, 0, sz); + memzero_explicit(ctx->dh.g, sz); dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); ctx->dh.g = NULL; }
if (ctx->dh.xa_p) { - memset(ctx->dh.xa_p, 0, sz); + memzero_explicit(ctx->dh.xa_p, sz); dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, ctx->dh.dma_xa_p); ctx->dh.xa_p = NULL; @@ -918,14 +918,15 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) }
if (ctx->rsa.crt_prikey) { - memset(ctx->rsa.crt_prikey, 0, half_key_sz * HPRE_CRT_PRMS); + memzero_explicit(ctx->rsa.crt_prikey, + half_key_sz * HPRE_CRT_PRMS); dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey); ctx->rsa.crt_prikey = NULL; }
if (ctx->rsa.prikey) { - memset(ctx->rsa.prikey, 0, ctx->key_sz << 1); + memzero_explicit(ctx->rsa.prikey, ctx->key_sz << 1); dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey, ctx->rsa.dma_prikey); ctx->rsa.prikey = NULL;
From: Hongbo Yao yaohongbo@huawei.com
hulk inclusion category: bugfix bugzilla: 27970 CVE: NA
-------------------------------------------------
This reverts commit 038c2f6d971260670b0844208f4e3424b1d55c39. If we do hotplug in vfio-enabled kvm, holding the mmap_sem reader longer will cause hangtask.
[ 1091.426125] Call trace: [ 1091.428572] __switch_to+0xe4/0x148 [ 1091.432054] __schedule+0x31c/0x9c0 [ 1091.435528] schedule+0x2c/0x88 [ 1091.438660] rwsem_down_write_failed+0x138/0x2e8 [ 1091.443261] down_write+0x58/0x70 [ 1091.446569] vaddr_get_pfn+0x54/0x280 [vfio_iommu_type1] [ 1091.451863] vfio_pin_pages_remote+0x88/0x3c0 [vfio_iommu_type1] [ 1091.457848] vfio_pin_map_dma+0xc0/0x300 [vfio_iommu_type1] [ 1091.463401] vfio_iommu_type1_ioctl+0xa5c/0xcc4 [vfio_iommu_type1] [ 1091.469563] vfio_fops_unl_ioctl+0x74/0x2e0 [vfio] [ 1091.474338] do_vfs_ioctl+0xc4/0x8c0 [ 1091.477904] ksys_ioctl+0x8c/0xa0 [ 1091.481210] __arm64_sys_ioctl+0x28/0x38 [ 1091.485121] el0_svc_common+0x78/0x130 [ 1091.488860] el0_svc_handler+0x38/0x78 [ 1091.492598] el0_svc+0x8/0xc
Signed-off-by: Hongbo Yao yaohongbo@huawei.com Reviewed-By: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/vfio/vfio_iommu_type1.c | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 414bb2e8c7bd..306778dcbb9e 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -347,7 +347,7 @@ static int put_pfn(unsigned long pfn, int prot) }
static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, - int prot, unsigned long *pfn, bool handle_mmap_sem) + int prot, unsigned long *pfn) { struct page *page[1]; struct vm_area_struct *vma; @@ -358,8 +358,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, if (prot & IOMMU_WRITE) flags |= FOLL_WRITE;
- if (handle_mmap_sem) - down_read(&mm->mmap_sem); + down_read(&mm->mmap_sem); if (mm == current->mm) { ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas); } else { @@ -377,16 +376,14 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, put_page(page[0]); } } - if (handle_mmap_sem) - up_read(&mm->mmap_sem); + up_read(&mm->mmap_sem);
if (ret == 1) { *pfn = page_to_pfn(page[0]); return 0; }
- if (handle_mmap_sem) - down_read(&mm->mmap_sem); + down_read(&mm->mmap_sem);
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
@@ -396,8 +393,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, ret = 0; }
- if (handle_mmap_sem) - up_read(&mm->mmap_sem); + up_read(&mm->mmap_sem); return ret; }
@@ -419,12 +415,9 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, if (!mm) return -ENODEV;
- down_read(&mm->mmap_sem); - ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base, false); - if (ret) { - up_read(&mm->mmap_sem); + ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); + if (ret) return ret; - }
pinned++; rsvd = is_invalid_reserved_pfn(*pfn_base); @@ -439,7 +432,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, put_pfn(*pfn_base, dma->prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); - up_read(&mm->mmap_sem); return -ENOMEM; } lock_acct++; @@ -451,7 +443,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, /* Lock all the consecutive pages from pfn_base */ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage; pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) { - ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn, false); + ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn); if (ret) break;
@@ -468,7 +460,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); ret = -ENOMEM; - up_read(&mm->mmap_sem); goto unpin_out; } lock_acct++; @@ -476,7 +467,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, }
out: - up_read(&mm->mmap_sem); ret = vfio_lock_acct(dma, lock_acct, false);
unpin_out: @@ -523,7 +513,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, if (!mm) return -ENODEV;
- ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base, true); + ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { ret = vfio_lock_acct(dma, 1, true); if (ret) {
From: YueHaibing yuehaibing@huawei.com
mainline inclusion from mainline-v5.1-rc4 commit 1d3ff0950e2b40dc861b1739029649d03f591820 category: bugfix bugzilla: 13690 CVE: CVE-2019-20096
-------------------------------------------------
If dccp_feat_push_change fails, we forget free the mem which is alloced by kmemdup in dccp_feat_clone_sp_val.
Reported-by: Hulk Robot hulkci@huawei.com Fixes: e8ef967a54f4 ("dccp: Registration routines for changing feature values") Reviewed-by: Mukesh Ojha mojha@codeaurora.org Signed-off-by: YueHaibing yuehaibing@huawei.com Signed-off-by: David S. Miller davem@davemloft.net Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Wenan Mao maowenan@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- net/dccp/feat.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/net/dccp/feat.c b/net/dccp/feat.c index f227f002c73d..db87d9f58019 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local, if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len)) return -ENOMEM;
- return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval); + if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) { + kfree(fval.sp.vec); + return -ENOMEM; + } + + return 0; }
/**
From: wanglin wanglin137@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
----------------------------------
This patch modify roce dfx code by review advice
Reviewed-by: taojihua taojihua4@huawei.com Reviewed-by: huchunzhi huchunzhi@huawei.com Signed-off-by: wanglin wanglin137@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- .../hw/hns/roce-customer/rdfx_hw_v2.c | 49 ---------- .../hw/hns/roce-customer/rdfx_main.c | 2 + .../hw/hns/roce-customer/rdfx_sysfs.c | 96 +------------------ 3 files changed, 4 insertions(+), 143 deletions(-)
diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c index c0ab3d2ba636..aed4a1337edb 100644 --- a/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_hw_v2.c @@ -50,51 +50,6 @@ static void *rdfx_v2_get_dfx(struct ib_device *ib_dev) return hr_dev->dfx_priv; }
-static int rdfx_rdreg(const char *p_buf, struct rdfx_info *rdfx) -{ - struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)rdfx->priv; - unsigned long long a_val[SYSFS_MAX_PARA] = {0}; - char *buf = (char *)p_buf; - u32 offset; - int ret; - u32 param_num; - - ret = check_input(buf, a_val, 2, 1, ¶m_num); - if (ret) - return ret; - - offset = a_val[0]; - - pr_info("Reg(0x%08x): 0x%08x\n", - offset, roce_read(hr_dev, offset)); - - return 0; -} - -static int rdfx_wrreg(const char *p_buf, struct rdfx_info *rdfx) -{ - struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)rdfx->priv; - unsigned long long a_val[SYSFS_MAX_PARA] = {0}; - char *buf = (char *)p_buf; - u32 offset; - u32 value; - int ret; - u32 param_num; - - ret = check_input(buf, a_val, 3, 2, ¶m_num); - if (ret) - return ret; - - offset = a_val[0]; - value = a_val[1]; - - pr_info("Write reg: offset - 0x%08x, value - 0x%08x\n", - offset, value); - roce_write(hr_dev, offset, value); - - return 0; -} - static int rdfx_v2_ooo_show(struct rdfx_info *rdfx) { pr_info("************** OOO INFO ***************\n"); @@ -922,8 +877,6 @@ rdfx_hw_file_attr_def(aeqc, NULL, rdfx_v2_aeqc_store); rdfx_hw_file_attr_def(qpc, NULL, rdfx_v2_qpc_store); rdfx_hw_file_attr_def(cqc, NULL, rdfx_v2_cqc_store); rdfx_hw_file_attr_def(mpt, NULL, rdfx_v2_mpt_store); -rdfx_hw_file_attr_def(rdreg, NULL, rdfx_rdreg); -rdfx_hw_file_attr_def(wrreg, NULL, rdfx_wrreg); rdfx_hw_file_attr_def(srqc, NULL, rdfx_v2_srqc_store);
static struct attribute *rdfx_hw_v2_attrs_list[] = { @@ -936,8 +889,6 @@ static struct attribute *rdfx_hw_v2_attrs_list[] = { HW_ATTRS_LIST_MEMBER(qpc), HW_ATTRS_LIST_MEMBER(cqc), HW_ATTRS_LIST_MEMBER(mpt), - HW_ATTRS_LIST_MEMBER(rdreg), - HW_ATTRS_LIST_MEMBER(wrreg), HW_ATTRS_LIST_MEMBER(srqc), NULL }; diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_main.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_main.c index 445ac25e9920..a28bc0fb0bf0 100644 --- a/drivers/infiniband/hw/hns/roce-customer/rdfx_main.c +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_main.c @@ -76,6 +76,8 @@ int parg_getopt(char *input, char *optstring, char *parg) p++; cnt++; } + if (cnt >= DEF_OPT_STR_LEN) + return -EINVAL; *p = '\0'; p -= cnt; strcpy(parg, p); diff --git a/drivers/infiniband/hw/hns/roce-customer/rdfx_sysfs.c b/drivers/infiniband/hw/hns/roce-customer/rdfx_sysfs.c index 2a6a51a8c2fb..768e4f2c2e21 100644 --- a/drivers/infiniband/hw/hns/roce-customer/rdfx_sysfs.c +++ b/drivers/infiniband/hw/hns/roce-customer/rdfx_sysfs.c @@ -127,14 +127,6 @@ static int show_qp_detail(struct rdfx_qp_info *rdfx_qp) atomic_read(&rdfx_qp->sq.db_cnt), atomic_read(&rdfx_qp->sq.inline_cnt)); pr_info("\n"); - pr_info("SQ_head SQ_head_addr\n"); - pr_info(" 0x%x 0x%llx\n", - atomic_read(&rdfx_qp->sq.head), rdfx_qp->sq.head_addr); - pr_info("\n"); - pr_info("SQ_tail SQ_tail_addr\n"); - pr_info("0x%x 0x%llx\n", - atomic_read(&rdfx_qp->sq.tail), rdfx_qp->sq.tail_addr); - pr_info("\n"); pr_info("***************** RQ INFO *****************\n"); pr_info("rq_wqe_cnt bd_cnt inline_cnt\n"); pr_info(" 0x%x 0x%x 0x%x\n", @@ -142,14 +134,6 @@ static int show_qp_detail(struct rdfx_qp_info *rdfx_qp) atomic_read(&rdfx_qp->rq.db_cnt), atomic_read(&rdfx_qp->rq.inline_cnt)); pr_info("\n"); - pr_info("RQ_head RQ_head_addr\n"); - pr_info(" 0x%x 0x%llx\n", - atomic_read(&rdfx_qp->rq.head), rdfx_qp->rq.head_addr); - pr_info("\n"); - pr_info("RQ_tail RQ_tail_addr\n"); - pr_info(" 0x%x 0x%llx\n", - atomic_read(&rdfx_qp->rq.head), rdfx_qp->rq.tail_addr); - pr_info("\n"); pr_info("***************** QP ATTR *****************\n"); pr_info("state : 0x%x\n", atomic_read(&rdfx_qp->attr.state)); @@ -215,7 +199,7 @@ static int show_cqe(struct rdfx_cq_info *rdfx_cq, int cqe_index) u32 *cqe;
rdfx_cq->cqe_size = CQE_SIZE; - cqe_index = cqe_index & (rdfx_cq->cq_depth); + cqe_index = cqe_index & (rdfx_cq->cq_depth - 1); cqe = rdfx_buf_offset(rdfx_cq->buf, (cqe_index * rdfx_cq->cqe_size)); if (cqe) pr_info("%08x %08x %08x %08x %08x %08x %08x %08x\n", @@ -227,37 +211,6 @@ static int show_cqe(struct rdfx_cq_info *rdfx_cq, int cqe_index) return 0; }
-#ifdef CONFIG_INFINIBAND_HNS_DFX_ENHANCE -static int rdfx_enhance_qp_delete(u32 qpn, struct rdfx_info *rdfx) -{ - struct rdfx_qp_info *rdfx_qp; - struct list_head *pos; - struct list_head *q; - unsigned long flags; - u32 is_existed = 0; - - spin_lock_irqsave(&rdfx->qp.qp_lock, flags); - list_for_each_safe(pos, q, &(rdfx->qp.list)) { - rdfx_qp = list_entry(pos, struct rdfx_qp_info, list); - if (qpn == rdfx_qp->qpn) { - is_existed = 1; - list_del(pos); - break; - } - } - if (!is_existed) { - spin_unlock_irqrestore(&rdfx->qp.qp_lock, flags); - pr_err("QPN %u is not in dfx list!\n", qpn); - return -EINVAL; - } - spin_unlock_irqrestore(&rdfx->qp.qp_lock, flags); - kref_put(&(rdfx_qp->cnt), qp_release); - pr_info("delete qpn:0x%lx\n", rdfx_qp->qpn); - - return 0; -} -#endif - static inline int rdfx_convert_str(char *str, u32 *val) { long long convert_val; @@ -329,13 +282,7 @@ static int rdfx_qp_store(const char *p_buf) show_valid_qpn(&(rdfx->qp.list)); return 0; } -#ifdef CONFIG_INFINIBAND_HNS_DFX_ENHANCE - if (!parg_getopt(buf, "r:", str)) { - if (rdfx_convert_str(str, &qpn)) - return -EINVAL; - return rdfx_enhance_qp_delete(qpn, rdfx); - } -#endif + parg_getopt(buf, "v:", str); if (rdfx_convert_str(str, &qpn)) return -EINVAL; @@ -450,37 +397,6 @@ static void show_valid_cqn(struct list_head *head) pr_info("\n"); }
-#ifdef CONFIG_INFINIBAND_HNS_DFX_ENHANCE -static int rdfx_enhance_cq_delete(u32 cqn, struct rdfx_info *rdfx) -{ - struct rdfx_cq_info *rdfx_cq = NULL; - struct list_head *pos; - struct list_head *q; - unsigned long flags; - u32 is_existed = 0; - - spin_lock_irqsave(&rdfx->cq.cq_lock, flags); - list_for_each_safe(pos, q, &(rdfx->cq.list)) { - rdfx_cq = list_entry(pos, struct rdfx_cq_info, list); - if (cqn == rdfx_cq->cqn) { - is_existed = 1; - list_del(pos); - break; - } - } - if (!is_existed) { - pr_err("CQN %u is not in dfx list!\n", cqn); - spin_unlock_irqrestore(&rdfx->cq.cq_lock, flags); - return -EINVAL; - } - spin_unlock_irqrestore(&rdfx->cq.cq_lock, flags); - kref_put(&(rdfx_cq->cnt), cq_release); - pr_err("delete cqn:0x%lx\n", rdfx_cq->cqn); - - return 0; -} -#endif - static inline int rdfx_show_cq_detail(u32 cqn, struct rdfx_info *rdfx) { struct rdfx_cq_info *rdfx_cq = NULL; @@ -524,14 +440,6 @@ static int rdfx_cq_store(const char *p_buf) return 0; }
-#ifdef CONFIG_INFINIBAND_HNS_DFX_ENHANCE - if (!parg_getopt(buf, "r:", str)) { - if (rdfx_convert_str(str, &cqn)) - return -EINVAL; - - return rdfx_enhance_cq_delete(cqn, rdfx); - } -#endif parg_getopt(buf, "v:", str); if (rdfx_convert_str(str, &cqn)) return -EINVAL;