hulk inclusion category: bugfix bugzilla: NA CVE: NA
--------------------------------
On other board do do cntvct workaround on VDSO path may cause unexpected error, so we do this only on D05.
Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/clocksource/arm_arch_timer.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 443079810300..50030326d27b 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -551,9 +551,14 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa * change both the default value and the vdso itself. */ if (wa->read_cntvct_el0) { - clocksource_counter.archdata.vdso_direct = true; - vdso_default = true; - vdso_fix = true; + if (wa->read_cntvct_el0 == hisi_161010101_read_cntvct_el0) { + clocksource_counter.archdata.vdso_direct = true; + vdso_default = true; + vdso_fix = true; + } else { + clocksource_counter.archdata.vdso_direct = false; + vdso_default = false; + } } }
From: Yang Shunfeng yangshunfeng2@huawei.com
driver inclusion category: bugfix bugzilla: NA CVE: NA
----------------------------------
This patch fix the extended SGE err
Reviewed-by: Zhao Weibo zhaoweibo3@huawei.com Reviewed-by: Hu Chunzhi huchunzhi@huawei.com Signed-off-by: Yang Shunfeng yangshunfeng2@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/infiniband/hw/hns/hns_roce_qp.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index b6017ab10ab6..520213d6461e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -435,6 +435,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, { u32 ex_sge_num; u32 page_size; + u32 buf_size; u32 max_cnt; int ret;
@@ -491,8 +492,11 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, hr_qp->sq.wqe_shift), PAGE_SIZE); } else { page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); + buf_size = ALIGN((hr_qp->sge.sge_cnt << HNS_ROCE_SGE_SHIFT), + page_size); hr_qp->sge.sge_cnt = ex_sge_num ? - max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0; + max(buf_size / (1 << hr_qp->sge.sge_shift), + ex_sge_num) : 0; hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size) + HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt << @@ -642,6 +646,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct device *dev = hr_dev->dev; + u32 buf_size; u32 page_size; u32 max_cnt; int size; @@ -689,7 +694,9 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
if (hr_dev->caps.max_sq_sg > HNS_ROCE_MAX_SGE_NUM && hr_qp->sge.sge_cnt) { - hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift), + buf_size = ALIGN((hr_qp->sge.sge_cnt << HNS_ROCE_SGE_SHIFT), + page_size); + hr_qp->sge.sge_cnt = max(buf_size / (1 << hr_qp->sge.sge_shift), (u32)hr_qp->sge.sge_cnt); hr_qp->sge.offset = size; size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt <<