mailweb.openeuler.org
Manage this list

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

Linuxarm

Threads by month
  • ----- 2025 -----
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2022 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2021 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2020 -----
  • December
linuxarm@openeuler.org

  • 1 participants
  • 803 discussions
[PATCH v2 for-next] RDMA/hns: Add caps flag for UD inline of userspace
by Weihang Li 22 Jan '21

22 Jan '21
HIP09 supports UD inline up to size of 1024 Bytes, the caps flag is got from firmware and passed back to userspace when creating QP. Signed-off-by: Weihang Li <liweihang(a)huawei.com> --- Changes since v1: - Avoid overwriting some fields in set_default_caps(). - Link: https://patchwork.kernel.org/project/linux-rdma/patch/1609810615-50515-1-gi… drivers/infiniband/hw/hns/hns_roce_device.h | 1 + drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 18 +++++++++++------- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 1 + drivers/infiniband/hw/hns/hns_roce_qp.c | 3 +++ include/uapi/rdma/hns-abi.h | 1 + 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 55d5386..87716da 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -214,6 +214,7 @@ enum { HNS_ROCE_CAP_FLAG_FRMR = BIT(8), HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), + HNS_ROCE_CAP_FLAG_UD_SQ_INL = BIT(13), HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), HNS_ROCE_CAP_FLAG_STASH = BIT(17), }; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 833e1f2..1454cd9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1800,7 +1800,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM; caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM; - caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; caps->num_uars = HNS_ROCE_V2_UAR_NUM; caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM; caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM; @@ -1817,7 +1816,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ; caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ; caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ; - caps->qpc_sz = HNS_ROCE_V2_QPC_SZ; caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ; caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ; caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ; @@ -1825,7 +1823,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ; - caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE; caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; caps->reserved_lkey = 0; caps->reserved_pds = 0; @@ -1871,11 +1868,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) HNS_ROCE_CAP_FLAG_SQ_RECORD_DB; caps->pkey_table_len[0] = 1; - caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM; caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM; - caps->aeqe_size = HNS_ROCE_AEQE_SIZE; - caps->ceqe_size = HNS_ROCE_CEQE_SIZE; caps->local_ca_ack_delay = 0; caps->max_mtu = IB_MTU_4096; @@ -1897,7 +1891,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->cqc_timer_buf_pg_sz = 0; caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; - caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ; caps->sccc_ba_pg_sz = 0; caps->sccc_buf_pg_sz = 0; caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM; @@ -1916,6 +1909,16 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->gmv_buf_pg_sz = 0; caps->gid_table_len[0] = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz); + caps->flags |= HNS_ROCE_CAP_FLAG_UD_SQ_INL; + caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INL_EXT; + } else { + caps->aeqe_size = HNS_ROCE_AEQE_SIZE; + caps->ceqe_size = HNS_ROCE_CEQE_SIZE; + caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE; + caps->qpc_sz = HNS_ROCE_V2_QPC_SZ; + caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ; + caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; + caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; } } @@ -5084,6 +5087,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; + qp_attr->cap.max_inline_data = hr_qp->max_inline_data; if (!ibqp->uobject) { qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index bdaccf8..ab685a4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -61,6 +61,7 @@ #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 #define HNS_ROCE_V2_MAX_SQ_INLINE 0x20 +#define HNS_ROCE_V2_MAX_SQ_INL_EXT 0x400 #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 #define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_PHY_UAR_NUM 1 diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index d8e2fe5..b19fcbd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1020,6 +1020,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } if (udata) { + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_UD_SQ_INL) + resp.cap_flags |= HNS_ROCE_QP_CAP_UD_SQ_INL; + ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (ret) { diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index 90b739d..79dba94 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -77,6 +77,7 @@ enum hns_roce_qp_cap_flags { HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0, HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1, HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2, + HNS_ROCE_QP_CAP_UD_SQ_INL = 1 << 3, }; struct hns_roce_ib_create_qp_resp { -- 2.8.1
4 6
0 0
[PATCH] perf metricgroup: Fix for metrics containing duration_time
by John Garry 22 Jan '21

22 Jan '21
Metrics containing duration_time cause a segfault: $./perf stat -v -M L1D_Cache_Fill_BW sleep 1 Using CPUID GenuineIntel-6-3D-4 metric expr 64 * l1d.replacement / 1000000000 / duration_time for L1D_Cache_Fill_BW found event duration_time found event l1d.replacement adding {l1d.replacement}:W,duration_time l1d.replacement -> cpu/umask=0x1,(null)=0x1e8483,event=0x51/ Segmentation fault In commit c2337d67199a ("perf metricgroup: Fix metrics using aliases covering multiple PMUs"), the logic in find_evsel_group() when iter'ing events was changed to not only select events in same group, but also for aliased PMUs. Checking whether events were for aliased PMUs was done by comparing the event PMU name. This was not safe for duration_time event, which has no associated PMU (and no PMU name), so fix by checking if the event PMU name is set also. Fixes: c2337d67199a ("perf metricgroup: Fix metrics using aliases covering multiple PMUs") Reported-by: Joakim Zhang <qiangqing.zhang(a)nxp.com> Signed-off-by: John Garry <john.garry(a)huawei.com> diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 2e60ee170abc..e6d3452031e5 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -162,6 +162,14 @@ static bool contains_event(struct evsel **metric_events, int num_events, return false; } +static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2) +{ + if (!ev1->pmu_name || !ev2->pmu_name) + return false; + + return !strcmp(ev1->pmu_name, ev2->pmu_name); +} + /** * Find a group of events in perf_evlist that correspond to those from a parsed * metric expression. Note, as find_evsel_group is called in the same order as @@ -280,8 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist, */ if (!has_constraint && ev->leader != metric_events[i]->leader && - !strcmp(ev->leader->pmu_name, - metric_events[i]->leader->pmu_name)) + evsel_same_pmu(ev->leader, metric_events[i]->leader)) break; if (!strcmp(metric_events[i]->name, ev->name)) { set_bit(ev->idx, evlist_used); -- 2.26.2
4 6
0 0
[PATCH for-next 0/3] RDMA/hns: Refactor codes about memory
by Weihang Li 22 Jan '21

22 Jan '21
This series refactors memory related codes, including MR, MTR(Memory Translate Region) and MPT(Memory Protection Table). Lang Cheng (2): RDMA/hns: Optimize the MR registration process RDMA/hns: Use new interface to set MPT related fields Xi Wang (1): RDMA/hns: Refactor the MTR creation flow drivers/infiniband/hw/hns/hns_roce_common.h | 22 ++ drivers/infiniband/hw/hns/hns_roce_cq.c | 1 - drivers/infiniband/hw/hns/hns_roce_device.h | 4 +- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 77 +++--- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 39 +++ drivers/infiniband/hw/hns/hns_roce_mr.c | 404 +++++++++++----------------- drivers/infiniband/hw/hns/hns_roce_qp.c | 1 - drivers/infiniband/hw/hns/hns_roce_srq.c | 2 - 8 files changed, 258 insertions(+), 292 deletions(-) -- 2.8.1
2 4
0 0
[PATCH] mm/zswap: fix variable 'entry' is uninitialized when used
by Tian Tao 21 Jan '21

21 Jan '21
the entry has not been initialized when it is used, so allocate PAGE_SIZE Signed-off-by: Tian Tao <tiantao6(a)hisilicon.com> Reported-by: kernel test robot <lkp(a)intel.com> --- mm/zswap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/zswap.c b/mm/zswap.c index 6e0bb61..4b7b6ad 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -944,7 +944,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) if (!zpool_can_sleep_mapped(pool)) { - tmp = kmalloc(entry->length, GFP_ATOMIC); + tmp = kmalloc(PAGE_SIZE, GFP_ATOMIC); if (!tmp) return -ENOMEM; } -- 2.7.4
2 1
0 0
[PATCH net-next] net: hns3: debugfs add dump tm info of nodes, priority and qset
by Huazhong Tan 21 Jan '21

21 Jan '21
From: Guangbin Huang <huangguangbin2(a)huawei.com> To increase methods to dump more tm info, adds three debugfs commands to dump tm info of nodes, priority and qset. And a new tm file of debugfs is created for only dumping tm info. Unlike previous debugfs commands, to dump each tm information, user needs to enter two commands now. The first command writes parameters to tm and the second command reads info from tm. For examples, to dump tm info of priority 0, user needs to enter follow two commands: 1. echo dump priority 0 > tm 2. cat tm The reason for adding new tm file is because we accepted Jakub Kicinski's opinion as link https://lkml.org/lkml/2020/9/29/2101. And in order to avoid generating too many files, we implement write ops to allow user to input parameters. However, If there are two or more users concurrently write parameters to tm, parameters of the latest command will overwrite previous commands, this concurrency problem will confuse users, but now there is no good method to fix it. Signed-off-by: Guangbin Huang <huangguangbin2(a)huawei.com> Signed-off-by: Huazhong Tan <tanhuazhong(a)huawei.com> --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 9 + drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 118 ++++++++++ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 6 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 1 + .../ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 251 +++++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 1 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 23 ++ 8 files changed, 411 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index a7daf6d..39b8ac7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -247,6 +247,10 @@ struct hnae3_vector_info { int vector; }; +enum hnae3_dbg_module_type { + HNAE3_DBG_MODULE_TYPE_TM, +}; + #define HNAE3_RING_TYPE_B 0 #define HNAE3_RING_TYPE_TX 0 #define HNAE3_RING_TYPE_RX 1 @@ -465,6 +469,8 @@ struct hnae3_ae_dev { * Delete clsflower rule * cls_flower_active * Check if any cls flower rule exist + * dbg_read_cmd + * Execute debugfs read command. */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -620,6 +626,8 @@ struct hnae3_ae_ops { int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id, u16 flow_id, struct flow_keys *fkeys); int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf); + int (*dbg_read_cmd)(struct hnae3_handle *handle, const char *cmd_buf, + char *buf, int len); pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev); bool (*get_hw_reset_stat)(struct hnae3_handle *handle); bool (*ae_dev_resetting)(struct hnae3_handle *handle); @@ -757,6 +765,7 @@ struct hnae3_handle { u8 netdev_flags; struct dentry *hnae3_dbgfs; + int dbgfs_type; /* Network interface message level enabled bits */ u32 msg_enable; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 9d4e9c0..e2b6924 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -12,6 +12,10 @@ static struct dentry *hns3_dbgfs_root; +#define HNS3_HELP_INFO "help" + +#define HNS3_DBG_MODULE_NAME_TM "tm" + static int hns3_dbg_queue_info(struct hnae3_handle *h, const char *cmd_buf) { @@ -338,6 +342,23 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "%s", printf_buf); } +static void hns3_dbg_tm_help(struct hnae3_handle *h, char *buf, int len) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + int pos; + + pos = scnprintf(buf, len, "available commands:\n"); + + if (!hns3_is_phys_func(h->pdev)) + return; + + if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) + pos += scnprintf(buf + pos, len - pos, "dump nodes\n"); + + pos += scnprintf(buf + pos, len - pos, "dump priority <pri id>\n"); + pos += scnprintf(buf + pos, len - pos, "dump qset <qset id>\n"); +} + static void hns3_dbg_dev_caps(struct hnae3_handle *h) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); @@ -484,6 +505,93 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, return count; } +static ssize_t hns3_dbg_tm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct hnae3_handle *handle = filp->private_data; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + struct hns3_nic_priv *priv = handle->priv; + char *cmd_buf, *read_buf; + ssize_t size = 0; + int ret = 0; + + if (strncmp(filp->f_path.dentry->d_iname, HNS3_DBG_MODULE_NAME_TM, + strlen(HNS3_DBG_MODULE_NAME_TM)) != 0) + return -EINVAL; + + if (!priv->dbg_in_msg.tm) + return -EINVAL; + + read_buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL); + if (!read_buf) + return -ENOMEM; + + cmd_buf = priv->dbg_in_msg.tm; + handle->dbgfs_type = HNAE3_DBG_MODULE_TYPE_TM; + + if (strncmp(cmd_buf, HNS3_HELP_INFO, strlen(HNS3_HELP_INFO)) == 0) + hns3_dbg_tm_help(handle, read_buf, HNS3_DBG_READ_LEN); + else if (ops->dbg_read_cmd) + ret = ops->dbg_read_cmd(handle, cmd_buf, read_buf, + HNS3_DBG_READ_LEN); + + if (ret) { + dev_info(priv->dev, "unknown command\n"); + goto out; + } + + size = simple_read_from_buffer(buffer, count, ppos, read_buf, + strlen(read_buf)); +out: + kfree(read_buf); + return size; +} + +static ssize_t hns3_dbg_tm_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct hnae3_handle *handle = filp->private_data; + struct hns3_nic_priv *priv = handle->priv; + char *cmd_buf, *cmd_buf_tmp; + int uncopied_bytes; + + if (*ppos != 0) + return 0; + + /* Judge if the instance is being reset. */ + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return 0; + + if (count > HNS3_DBG_WRITE_LEN) + return -ENOSPC; + + kfree(priv->dbg_in_msg.tm); + priv->dbg_in_msg.tm = NULL; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + uncopied_bytes = copy_from_user(cmd_buf, buffer, count); + if (uncopied_bytes) { + kfree(cmd_buf); + return -EFAULT; + } + + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + priv->dbg_in_msg.tm = cmd_buf; + + return count; +} + static const struct file_operations hns3_dbg_cmd_fops = { .owner = THIS_MODULE, .open = simple_open, @@ -491,6 +599,13 @@ static const struct file_operations hns3_dbg_cmd_fops = { .write = hns3_dbg_cmd_write, }; +static const struct file_operations hns3_dbg_tm_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hns3_dbg_tm_read, + .write = hns3_dbg_tm_write, +}; + void hns3_dbg_init(struct hnae3_handle *handle) { const char *name = pci_name(handle->pdev); @@ -499,6 +614,9 @@ void hns3_dbg_init(struct hnae3_handle *handle) debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle, &hns3_dbg_cmd_fops); + + debugfs_create_file(HNS3_DBG_MODULE_NAME_TM, 0600, handle->hnae3_dbgfs, + handle, &hns3_dbg_tm_fops); } void hns3_dbg_uninit(struct hnae3_handle *handle) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 0a7b606..76dd30d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -490,6 +490,10 @@ struct hns3_enet_tqp_vector { unsigned long last_jiffies; } ____cacheline_internodealigned_in_smp; +struct hns3_dbg_input_msg { + char *tm; +}; + struct hns3_nic_priv { struct hnae3_handle *ae_handle; struct net_device *netdev; @@ -510,6 +514,8 @@ struct hns3_nic_priv { struct hns3_enet_coalesce tx_coal; struct hns3_enet_coalesce rx_coal; + + struct hns3_dbg_input_msg dbg_in_msg; }; union l3_hdr_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index edfadb5..f861bdb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -160,6 +160,7 @@ enum hclge_opcode_type { HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + HCLGE_OPC_TM_NODES = 0x0816, HCLGE_OPC_ETS_TC_WEIGHT = 0x0843, HCLGE_OPC_QSET_DFX_STS = 0x0844, HCLGE_OPC_PRI_DFX_STS = 0x0845, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 8f6dea5..1f13a5b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -800,6 +800,224 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, cmd, ret); } +static void hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len) +{ + struct hclge_tm_nodes_cmd *nodes; + struct hclge_desc desc; + int pos = 0; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { + dev_err(&hdev->pdev->dev, "unsupported command!\n"); + return; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to dump tm nodes, ret = %d\n", ret); + return; + } + + nodes = (struct hclge_tm_nodes_cmd *)desc.data; + + pos += scnprintf(buf + pos, len - pos, "PG base_id: %u\n", + nodes->pg_base_id); + pos += scnprintf(buf + pos, len - pos, "PG number: %u\n", + nodes->pg_num); + pos += scnprintf(buf + pos, len - pos, "PRI base_id: %u\n", + nodes->pri_base_id); + pos += scnprintf(buf + pos, len - pos, "PRI number: %u\n", + nodes->pri_num); + pos += scnprintf(buf + pos, len - pos, "QSET base_id: %u\n", + le16_to_cpu(nodes->qset_base_id)); + pos += scnprintf(buf + pos, len - pos, "QSET number: %u\n", + le16_to_cpu(nodes->qset_num)); + pos += scnprintf(buf + pos, len - pos, "QUEUE base_id: %u\n", + le16_to_cpu(nodes->queue_base_id)); + pos += scnprintf(buf + pos, len - pos, "QUEUE number: %u\n", + le16_to_cpu(nodes->queue_num)); +} + +static int hclge_dbg_dump_tm_pri_sch(struct hclge_dev *hdev, u8 pri_id, + char *buf, int len) +{ + struct hclge_priority_weight_cmd *priority_weight; + struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; + enum hclge_opcode_type cmd; + struct hclge_desc desc; + int pos = 0; + int ret; + + cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; + pri_sch_mode->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pri_sch_cmd_send; + + pos += scnprintf(buf + pos, len - pos, "PRI schedule mode: %s\n", + (pri_sch_mode->sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK) ? + "dwrr" : "sp"); + + cmd = HCLGE_OPC_TM_PRI_WEIGHT; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + priority_weight = (struct hclge_priority_weight_cmd *)desc.data; + priority_weight->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pri_sch_cmd_send; + + pos += scnprintf(buf + pos, len - pos, "PRI dwrr: %u\n", + priority_weight->dwrr); + + return pos; + +err_tm_pri_sch_cmd_send: + dev_err(&hdev->pdev->dev, + "failed to dump tm priority(0x%x), ret = %d\n", cmd, ret); + + return pos; +} + +static void hclge_dbg_dump_tm_pri_shaping(struct hclge_dev *hdev, u8 pri_id, + char *buf, int len) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + u8 ir_u, ir_b, ir_s, bs_b, bs_s; + enum hclge_opcode_type cmd; + struct hclge_desc desc; + u32 shapping_para; + int pos = 0; + int ret; + + cmd = HCLGE_OPC_TM_PRI_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + shap_cfg_cmd->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pri_shaping_cmd_send; + + shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); + ir_b = hclge_tm_get_field(shapping_para, IR_B); + ir_u = hclge_tm_get_field(shapping_para, IR_U); + ir_s = hclge_tm_get_field(shapping_para, IR_S); + bs_b = hclge_tm_get_field(shapping_para, BS_B); + bs_s = hclge_tm_get_field(shapping_para, BS_S); + pos += scnprintf(buf + pos, len - pos, + "PRI_C ir_b:%u ir_u:%u ir_s:%u bs_b:%u bs_s:%u\n", + ir_b, ir_u, ir_s, bs_b, bs_s); + pos += scnprintf(buf + pos, len - pos, "PRI_C flag: %#x\n", + shap_cfg_cmd->flag); + pos += scnprintf(buf + pos, len - pos, "PRI_C pri_rate: %u(Mbps)\n", + le32_to_cpu(shap_cfg_cmd->pri_rate)); + + cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + shap_cfg_cmd->pri_id = pri_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pri_shaping_cmd_send; + + shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); + ir_b = hclge_tm_get_field(shapping_para, IR_B); + ir_u = hclge_tm_get_field(shapping_para, IR_U); + ir_s = hclge_tm_get_field(shapping_para, IR_S); + bs_b = hclge_tm_get_field(shapping_para, BS_B); + bs_s = hclge_tm_get_field(shapping_para, BS_S); + pos += scnprintf(buf + pos, len - pos, + "PRI_P ir_b:%u ir_u:%u ir_s:%u bs_b:%u bs_s:%u\n", + ir_b, ir_u, ir_s, bs_b, bs_s); + pos += scnprintf(buf + pos, len - pos, "PRI_P flag: %#x\n", + shap_cfg_cmd->flag); + pos += scnprintf(buf + pos, len - pos, "PRI_P pri_rate: %u(Mbps)\n", + le32_to_cpu(shap_cfg_cmd->pri_rate)); + + return; + +err_tm_pri_shaping_cmd_send: + dev_err(&hdev->pdev->dev, + "failed to dump tm priority(0x%x), ret = %d\n", cmd, ret); +} + +static void hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, const char *cmd_buf, + char *buf, int len) +{ + int ret, pos; + u8 pri_id; + + ret = kstrtou8(cmd_buf, 0, &pri_id); + pri_id = (ret != 0) ? 0 : pri_id; + + pos = scnprintf(buf, len, "priority id: %u\n", pri_id); + + pos += hclge_dbg_dump_tm_pri_sch(hdev, pri_id, buf + pos, len - pos); + hclge_dbg_dump_tm_pri_shaping(hdev, pri_id, buf + pos, len - pos); +} + +static void hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, const char *cmd_buf, + char *buf, int len) +{ + struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; + struct hclge_qs_weight_cmd *qs_weight; + struct hclge_qs_to_pri_link_cmd *map; + enum hclge_opcode_type cmd; + struct hclge_desc desc; + int ret, pos; + u16 qset_id; + + ret = kstrtou16(cmd_buf, 0, &qset_id); + qset_id = (ret != 0) ? 0 : qset_id; + + pos = scnprintf(buf, len, "qset id: %u\n", qset_id); + + cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + map->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_qset_cmd_send; + + pos += scnprintf(buf + pos, len - pos, "QS map pri id: %u\n", + map->priority); + pos += scnprintf(buf + pos, len - pos, "QS map link_vld: %u\n", + map->link_vld); + + cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; + qs_sch_mode->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_qset_cmd_send; + + pos += scnprintf(buf + pos, len - pos, "QS schedule mode: %s\n", + (qs_sch_mode->sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK) ? + "dwrr" : "sp"); + + cmd = HCLGE_OPC_TM_QS_WEIGHT; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + qs_weight = (struct hclge_qs_weight_cmd *)desc.data; + qs_weight->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_qset_cmd_send; + + pos += scnprintf(buf + pos, len - pos, "QS dwrr: %u\n", + qs_weight->dwrr); + + return; + +err_tm_qset_cmd_send: + dev_err(&hdev->pdev->dev, "failed to dump tm qset(0x%x), ret = %d\n", + cmd, ret); +} + static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) { struct hclge_cfg_pause_param_cmd *pause_param; @@ -1591,3 +1809,36 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) return 0; } + +static int hclge_dbg_read_cmd_tm(struct hnae3_handle *handle, + const char *cmd_buf, char *buf, int len) +{ +#define DUMP_TM_NODE "dump nodes" +#define DUMP_TM_PRI "dump priority" +#define DUMP_TM_QSET "dump qset" + + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (strncmp(cmd_buf, DUMP_TM_NODE, strlen(DUMP_TM_NODE)) == 0) + hclge_dbg_dump_tm_nodes(hdev, buf, len); + else if (strncmp(cmd_buf, DUMP_TM_PRI, strlen(DUMP_TM_PRI)) == 0) + hclge_dbg_dump_tm_pri(hdev, &cmd_buf[sizeof(DUMP_TM_PRI)], + buf, len); + else if (strncmp(cmd_buf, DUMP_TM_QSET, strlen(DUMP_TM_QSET)) == 0) + hclge_dbg_dump_tm_qset(hdev, &cmd_buf[sizeof(DUMP_TM_QSET)], + buf, len); + else + return -EINVAL; + + return 0; +} + +int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf, + char *buf, int len) +{ + if (handle->dbgfs_type == HNAE3_DBG_MODULE_TYPE_TM) + return hclge_dbg_read_cmd_tm(handle, cmd_buf, buf, len); + + return -EINVAL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c242883..16ccb1a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -11850,6 +11850,7 @@ static const struct hnae3_ae_ops hclge_ops = { .enable_fd = hclge_enable_fd, .add_arfs_entry = hclge_add_fd_entry_by_arfs, .dbg_run_cmd = hclge_dbg_run_cmd, + .dbg_read_cmd = hclge_dbg_read_cmd, .handle_hw_ras_error = hclge_handle_hw_ras_error, .get_hw_reset_stat = hclge_get_hw_reset_stat, .ae_dev_resetting = hclge_ae_dev_resetting, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index ca46bc9..32e5f82 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -1006,6 +1006,8 @@ int hclge_vport_start(struct hclge_vport *vport); void hclge_vport_stop(struct hclge_vport *vport); int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf); +int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf, + char *buf, int len); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); int hclge_notify_client(struct hclge_dev *hdev, enum hnae3_reset_notify_type type); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 5498d73..4fd7e4f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -65,6 +65,18 @@ struct hclge_priority_weight_cmd { u8 dwrr; }; +struct hclge_pri_sch_mode_cfg_cmd { + u8 pri_id; + u8 rev[3]; + u8 sch_mode; +}; + +struct hclge_qs_sch_mode_cfg_cmd { + __le16 qs_id; + u8 rev[2]; + u8 sch_mode; +}; + struct hclge_qs_weight_cmd { __le16 qs_id; u8 dwrr; @@ -173,6 +185,17 @@ struct hclge_shaper_ir_para { u8 ir_s; /* IR_S parameter of IR shaper */ }; +struct hclge_tm_nodes_cmd { + u8 pg_base_id; + u8 pri_base_id; + __le16 qset_base_id; + __le16 queue_base_id; + u8 pg_num; + u8 pri_num; + __le16 qset_num; + __le16 queue_num; +}; + #define hclge_tm_set_field(dest, string, val) \ hnae3_set_field((dest), \ (HCLGE_TM_SHAP_##string##_MSK), \ -- 2.7.4
2 6
0 0
[PATCH v2 0/8] rasdaemon: add support for memory_failure events,
by Shiju Jose 20 Jan '21

20 Jan '21
Add support for the memory_failure trace event in the rasdaemon and add improvements & support for the vendor specific errors in the util/ras-mc-ctl.in. Changes: v1 -> v2 1. Fix the feedback by Mauro for the fix patch for the exception in the ras-mc-ctl.in. 2. Rebased and grouped the previous v1 rasdaemon patches posted. Shiju Jose (8): rasdaemon: add support for memory_failure events rasdaemon: ras-mc-ctl: Modify ARM processor error summary log rasdaemon: ras-mc-ctl: Add memory failure events rasdaemon: ras-mc-ctl: Fix for exception when an event is not enabled rasdaemon: ras-mc-ctl: Add support for the vendor-specific errors rasdaemon: ras-mc-ctl: Add support for HiSilicon Kunpeng920 errors rasdaemon: ras-mc-ctl: Add support for HiSilicon Kunpeng9xx common errors rasdaemon: Modify confiure.ac for Hisilicon Kunpeng errors .travis.yml | 2 +- Makefile.am | 7 +- configure.ac | 20 +- ras-events.c | 15 + ras-events.h | 1 + ras-memory-failure-handler.c | 179 +++++++++ ras-memory-failure-handler.h | 25 ++ ras-record.c | 70 ++++ ras-record.h | 13 + ras-report.c | 68 ++++ ras-report.h | 2 + util/ras-mc-ctl.in | 737 +++++++++++++++++++++++++---------- 12 files changed, 927 insertions(+), 212 deletions(-) create mode 100644 ras-memory-failure-handler.c create mode 100644 ras-memory-failure-handler.h -- 2.17.1
1 8
0 0
[PATCH for-rc] RDMA/hns: Use mutex instead of spinlock for ida allocation
by Weihang Li 20 Jan '21

20 Jan '21
From: Yangyang Li <liyangyang20(a)huawei.com> GFP_KERNEL may cause ida_alloc_range() to sleep, but the spinlock covering this function is not allowed to sleep, so the spinlock needs to be changed to mutex. As there is a certain chance of memory allocation failure, GFP_ATOMIC is not suitable for QP allocation scenarios. Fixes: 71586dd20010 ("RDMA/hns: Create QP with selected QPN for bank load balance") Signed-off-by: Yangyang Li <liyangyang20(a)huawei.com> Signed-off-by: Weihang Li <liweihang(a)huawei.com> --- drivers/infiniband/hw/hns/hns_roce_device.h | 2 +- drivers/infiniband/hw/hns/hns_roce_qp.c | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 55d5386..ad82532 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -532,7 +532,7 @@ struct hns_roce_qp_table { struct hns_roce_hem_table sccc_table; struct mutex scc_mutex; struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM]; - spinlock_t bank_lock; + struct mutex bank_mutex; }; struct hns_roce_cq_table { diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index d8e2fe5..1116371 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hr_qp->doorbell_qpn = 1; } else { - spin_lock(&qp_table->bank_lock); + mutex_lock(&qp_table->bank_mutex); bankid = get_least_load_bankid_for_qp(qp_table->bank); ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, @@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) if (ret) { ibdev_err(&hr_dev->ib_dev, "failed to alloc QPN, ret = %d\n", ret); - spin_unlock(&qp_table->bank_lock); + mutex_unlock(&qp_table->bank_mutex); return ret; } qp_table->bank[bankid].inuse++; - spin_unlock(&qp_table->bank_lock); + mutex_unlock(&qp_table->bank_mutex); hr_qp->doorbell_qpn = (u32)num; } @@ -408,9 +408,9 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3); - spin_lock(&hr_dev->qp_table.bank_lock); + mutex_lock(&hr_dev->qp_table.bank_mutex); hr_dev->qp_table.bank[bankid].inuse--; - spin_unlock(&hr_dev->qp_table.bank_lock); + mutex_unlock(&hr_dev->qp_table.bank_mutex); } static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, @@ -1371,6 +1371,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) unsigned int i; mutex_init(&qp_table->scc_mutex); + mutex_init(&qp_table->bank_mutex); xa_init(&hr_dev->qp_table_xa); reserved_from_bot = hr_dev->caps.reserved_qps; -- 2.8.1
2 1
0 0
Re: [RFC PATCH 1/2] EDAC/ghes: Add EDAC device for the CPU caches
by Shiju Jose 19 Jan '21

19 Jan '21
Hi Boris, Thanks for the feedback. Apologies for the delay. >-----Original Message----- >From: Borislav Petkov [mailto:bp@alien8.de] >Sent: 31 December 2020 16:44 >To: Shiju Jose <shiju.jose(a)huawei.com> >Cc: linux-edac(a)vger.kernel.org; linux-acpi(a)vger.kernel.org; linux- >kernel(a)vger.kernel.org; james.morse(a)arm.com; >mchehab+huawei(a)kernel.org; tony.luck(a)intel.com; rjw(a)rjwysocki.net; >lenb(a)kernel.org; rrichter(a)marvell.com; Linuxarm <linuxarm(a)huawei.com>; >xuwei (O) <xuwei5(a)huawei.com>; Jonathan Cameron ><jonathan.cameron(a)huawei.com>; John Garry <john.garry(a)huawei.com>; >tanxiaofei <tanxiaofei(a)huawei.com>; Shameerali Kolothum Thodi ><shameerali.kolothum.thodi(a)huawei.com>; Salil Mehta ><salil.mehta(a)huawei.com> >Subject: Re: [RFC PATCH 1/2] EDAC/ghes: Add EDAC device for the CPU >caches > >On Tue, Dec 08, 2020 at 05:29:58PM +0000, Shiju Jose wrote: >> The corrected error count on the CPU caches required reporting to the >> user-space for the predictive failure analysis. For this purpose, add >> an EDAC device and device blocks for the CPU caches found. >> The cache's corrected error count would be stored in the >> /sys/devices/system/edac/cpu/cpu*/cache*/ce_count. > >This still doesn't begin to explain why the kernel needs this. I had already >asked whether errors in CPU caches are something which happen often >enough so that software should count them but nothing came. So pls justify >first why this wants to be added to the kernel. L2 cache corrected errors are detected occasionally on few of our ARM64 hardware boards. Though it is rare, the probability of the CPU cache errors frequently occurring can't be avoided. The earlier failure detection by monitoring the cache corrected errors for the frequent occurrences and taking preventive action could prevent more serious hardware faults. On Intel architectures, cache corrected errors are reported and the affected cores are offline in the architecture specific method. http://www.mcelog.org/cache.html However for the firmware-first error reporting, specifically on ARM64 architectures, there is no provision present for reporting the cache corrected error count to the user-space and taking preventive action such as offline the affected cores. > >> diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index >> 7a47680d6f07..c73eeab27ac9 100644 >> --- a/drivers/edac/Kconfig >> +++ b/drivers/edac/Kconfig >> @@ -74,6 +74,16 @@ config EDAC_GHES >> >> In doubt, say 'Y'. >> >> +config EDAC_GHES_CPU_ERROR >> + bool "EDAC device for reporting firmware-first BIOS detected CPU >error count" > >Why a separate Kconfig item? CONFIG_EDAC_GHES_CPU_CACHE_ERROR is added to make this feature optional only for the platforms which need this and supported. > >> + depends on EDAC_GHES >> + help >> + EDAC device for the firmware-first BIOS detected CPU error count >> +reported > >Well this is not what it is doing - you're talking about cache errors. >"CPU errors" can be a lot more than just cache errors. Sure. I will change. > >> +static void ghes_edac_create_cpu_device(struct device *dev) { >> + int cpu; >> + struct cpu_cacheinfo *this_cpu_ci; >> + >> + /* >> + * Find the maximum number of caches present in the CPU heirarchy >> + * among the online CPUs. >> + */ >> + for_each_online_cpu(cpu) { >> + this_cpu_ci = get_cpu_cacheinfo(cpu); >> + if (!this_cpu_ci) >> + continue; >> + if (max_number_of_caches < this_cpu_ci->num_leaves) >> + max_number_of_caches = this_cpu_ci->num_leaves; > >So this is counting the number of cache levels on the system? So you want to >count the errors per cache levels? Yes. This was the suggestion from James and to offline the affected cores for the shared cache. > >> + } >> + if (!max_number_of_caches) >> + return; >> + >> + /* >> + * EDAC device interface only supports creating the CPU cache >hierarchy for alls >> + * the CPUs together. Thus need to allocate cpu_edac_block_list for >the >> + * max_number_of_caches among all the CPUs irrespective of the >number of caches >> + * per CPU might vary. >> + */ > >So this is lumping all the caches together into a single list? What for? >To untangle to the proper ones when the error gets reported? > >Have you heard of percpu variables? Yes. Changed the list to the percpu variable. > >> @@ -624,6 +787,10 @@ int ghes_edac_register(struct ghes *ghes, struct >device *dev) >> ghes_pvt = pvt; >> spin_unlock_irqrestore(&ghes_lock, flags); >> >> +#if defined(CONFIG_EDAC_GHES_CPU_ERROR) >> + ghes_edac_create_cpu_device(dev); >> +#endif >> + > >Init stuff belongs into ghes_scan_system(). > Did you mean calling ghes_edac_create_cpu_device() in the ghes_scan_system()? >... > >Ok, I've seen enough. "required reporting to the user-space for the predictive >failure analysis." is not even trying to explain *why* you're doing this, what >*actual* problem it is addressing and why should the kernel get it. > >And without a proper problem definition of what you're trying to solve, this >is not going anywhere. > >-- >Regards/Gruss, > Boris. > Thanks, Shiju
2 3
0 0
[PATCH mm/zswap 0/2] Fix the compatibility of zsmalloc and zswap
by Tian Tao 19 Jan '21

19 Jan '21
patch #1 add a flag to zpool, then zswap used to determine if zpool drivers such as zbud/z3fold/zsmalloc whether can sleep in atoimc context. patch #2 set flag sleep_mapped to true indicates that zbud/z3fold can sleep in atomic context. zsmalloc didin't support sleep in atomic context, so not set that flag to true. Tian Tao (2): mm/zswap: add the flag can_sleep_mapped mm: set the sleep_mapped to true for zbud and z3fold include/linux/zpool.h | 3 +++ mm/z3fold.c | 1 + mm/zbud.c | 1 + mm/zpool.c | 13 +++++++++++++ mm/zswap.c | 50 +++++++++++++++++++++++++++++++++++++++++++++----- 5 files changed, 63 insertions(+), 5 deletions(-) -- 2.7.4
2 4
0 0
[PATCH v2 for-next] RDMA/hns: Create CQ with selected CQN for bank load balance
by Weihang Li 19 Jan '21

19 Jan '21
From: Yangyang Li <liyangyang20(a)huawei.com> In order to improve performance by balancing the load between different banks of cache, the CQC cache is desigend to choose one of 4 banks according to lower 2 bits of CQN. The hns driver needs to count the number of CQ on each bank and then assigns the CQ being created to the bank with the minimum load first. Signed-off-by: Yangyang Li <liyangyang20(a)huawei.com> Signed-off-by: Weihang Li <liweihang(a)huawei.com> --- Changes since v1: - Change GFP_ATOMIC to GFP_KERNEL as there is a chance of memory allocation failure, and change the spin lock to mutex lock because ida_alloc_range() may sleep. - Link: https://patchwork.kernel.org/project/linux-rdma/patch/1609742115-47270-1-gi… drivers/infiniband/hw/hns/hns_roce_cq.c | 115 +++++++++++++++++++++++----- drivers/infiniband/hw/hns/hns_roce_device.h | 10 ++- drivers/infiniband/hw/hns/hns_roce_main.c | 8 +- 3 files changed, 105 insertions(+), 28 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 8533fc2..ffb7f7e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -38,11 +38,74 @@ #include "hns_roce_hem.h" #include "hns_roce_common.h" +static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank) +{ + u32 least_load = bank[0].inuse; + u8 bankid = 0; + u32 bankcnt; + u8 i; + + for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) { + bankcnt = bank[i].inuse; + if (bankcnt < least_load) { + least_load = bankcnt; + bankid = i; + } + } + + return bankid; +} + +static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) +{ + struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; + struct hns_roce_bank *bank; + u8 bankid; + int id; + + mutex_lock(&cq_table->bank_mutex); + bankid = get_least_load_bankid_for_cq(cq_table->bank); + bank = &cq_table->bank[bankid]; + + id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL); + if (id < 0) { + mutex_unlock(&cq_table->bank_mutex); + return id; + } + + /* the lower 2 bits is bankid */ + hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid; + bank->inuse++; + mutex_unlock(&cq_table->bank_mutex); + + return 0; +} + +static inline u8 get_cq_bankid(unsigned long cqn) +{ + /* The lower 2 bits of CQN are used to hash to different banks */ + return (u8)(cqn & GENMASK(1, 0)); +} + +static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn) +{ + struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; + struct hns_roce_bank *bank; + + bank = &cq_table->bank[get_cq_bankid(cqn)]; + + ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT); + + mutex_lock(&cq_table->bank_mutex); + bank->inuse--; + mutex_unlock(&cq_table->bank_mutex); +} + static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { + struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_cmd_mailbox *mailbox; - struct hns_roce_cq_table *cq_table; u64 mtts[MTT_MIN_COUNT] = { 0 }; dma_addr_t dma_handle; int ret; @@ -54,13 +117,6 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) return -EINVAL; } - cq_table = &hr_dev->cq_table; - ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); - if (ret) { - ibdev_err(ibdev, "failed to alloc CQ bitmap, ret = %d.\n", ret); - return ret; - } - /* Get CQC memory HEM(Hardware Entry Memory) table */ ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); if (ret) { @@ -110,7 +166,6 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); err_out: - hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); return ret; } @@ -138,7 +193,6 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) wait_for_completion(&hr_cq->free); hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); - hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); } static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, @@ -298,11 +352,17 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, goto err_cq_buf; } + ret = alloc_cqn(hr_dev, hr_cq); + if (ret) { + ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret); + goto err_cq_db; + } + ret = alloc_cqc(hr_dev, hr_cq); if (ret) { ibdev_err(ibdev, "failed to alloc CQ context, ret = %d.\n", ret); - goto err_cq_db; + goto err_cqn; } /* @@ -326,6 +386,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, err_cqc: free_cqc(hr_dev, hr_cq); +err_cqn: + free_cqn(hr_dev, hr_cq->cqn); err_cq_db: free_cq_db(hr_dev, hr_cq, udata); err_cq_buf: @@ -341,9 +403,11 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) if (hr_dev->hw->destroy_cq) hr_dev->hw->destroy_cq(ib_cq, udata); - free_cq_buf(hr_dev, hr_cq); - free_cq_db(hr_dev, hr_cq, udata); free_cqc(hr_dev, hr_cq); + free_cqn(hr_dev, hr_cq->cqn); + free_cq_db(hr_dev, hr_cq, udata); + free_cq_buf(hr_dev, hr_cq); + return 0; } @@ -402,18 +466,33 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) complete(&hr_cq->free); } -int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev) +void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev) { struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; + unsigned int reserved_from_bot; + unsigned int i; + mutex_init(&cq_table->bank_mutex); xa_init(&cq_table->array); - return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs, - hr_dev->caps.num_cqs - 1, - hr_dev->caps.reserved_cqs, 0); + reserved_from_bot = hr_dev->caps.reserved_cqs; + + for (i = 0; i < reserved_from_bot; i++) { + cq_table->bank[get_cq_bankid(i)].inuse++; + cq_table->bank[get_cq_bankid(i)].min++; + } + + for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) { + ida_init(&cq_table->bank[i].ida); + cq_table->bank[i].max = hr_dev->caps.num_cqs / + HNS_ROCE_CQ_BANK_NUM - 1; + } } void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev) { - hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap); + int i; + + for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) + ida_destroy(&hr_dev->cq_table.bank[i].ida); } diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 55d5386..c46b330 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -119,6 +119,9 @@ #define SRQ_DB_REG 0x230 #define HNS_ROCE_QP_BANK_NUM 8 +#define HNS_ROCE_CQ_BANK_NUM 4 + +#define CQ_BANKID_SHIFT 2 /* The chip implementation of the consumer index is calculated * according to twice the actual EQ depth @@ -536,9 +539,10 @@ struct hns_roce_qp_table { }; struct hns_roce_cq_table { - struct hns_roce_bitmap bitmap; struct xarray array; struct hns_roce_hem_table table; + struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM]; + struct mutex bank_mutex; }; struct hns_roce_srq_table { @@ -779,7 +783,7 @@ struct hns_roce_caps { u32 max_cqes; u32 min_cqes; u32 min_wqes; - int reserved_cqs; + u32 reserved_cqs; int reserved_srqs; int num_aeq_vectors; int num_comp_vectors; @@ -1164,7 +1168,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); -int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); +void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index d9179ba..2b78b1f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -748,11 +748,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) goto err_pd_table_free; } - ret = hns_roce_init_cq_table(hr_dev); - if (ret) { - dev_err(dev, "Failed to init completion queue table.\n"); - goto err_mr_table_free; - } + hns_roce_init_cq_table(hr_dev); ret = hns_roce_init_qp_table(hr_dev); if (ret) { @@ -777,8 +773,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) err_cq_table_free: hns_roce_cleanup_cq_table(hr_dev); - -err_mr_table_free: hns_roce_cleanup_mr_table(hr_dev); err_pd_table_free: -- 2.8.1
2 1
0 0
  • ← Newer
  • 1
  • ...
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • Older →

HyperKitty Powered by HyperKitty