From: Jian Zhang zhangjian210@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I63SDZ
-------------------------------
In Ascend, we use tmp hugepage and disable OOM-killer, when we cause a OOM, and after some time, the memory is enough for process, the process will not return to run normal. In this case, we must use oom recover to let the process run.
Signed-off-by: Jian Zhang zhangjian210@huawei.com --- mm/memcontrol.c | 11 +++++++++++ 1 file changed, 11 insertions(+)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a3617f0a0fd1..b2c4bc4bb591 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3116,12 +3116,23 @@ void __memcg_kmem_uncharge_page(struct page *page, int order) { struct obj_cgroup *objcg; unsigned int nr_pages = 1 << order; +#ifdef CONFIG_ASCEND_FEATURES + struct mem_cgroup *memcg; +#endif
if (!PageMemcgKmem(page)) return;
objcg = __page_objcg(page); obj_cgroup_uncharge_pages(objcg, nr_pages); + +#ifdef CONFIG_ASCEND_FEATURES + memcg = get_mem_cgroup_from_objcg(objcg); + if (!mem_cgroup_is_root(memcg)) + memcg_oom_recover(memcg); + css_put(&memcg->css); +#endif + page->memcg_data = 0; obj_cgroup_put(objcg); }
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I63SDZ
-------------------------------
Some Taishan core's sub version is 2, and add new version for Taishan core adaptation.
Signed-off-by: Zhang Jian zhangjian210@huawei.com --- arch/arm64/kernel/cpu_errata.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ba26ef1739a4..a4f258c83a1f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -68,6 +68,7 @@ hisilicon_1980005_match(const struct arm64_cpu_capabilities *entry, static const struct midr_range idc_support_list[] = { MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), MIDR_REV(MIDR_HISI_TSV200, 1, 0), + MIDR_REV(MIDR_HISI_TSV200, 1, 2), { /* sentinel */ } };
From: z00512904 z00512904@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I63SDZ
-------------------------------
When cause oom and enable CONFIG_ASCEND_OOM and oom_panic, we should call oom_type_notifier_call() to tell others this message.
Signed-off-by: z00512904 z00512904@huawei.com --- mm/oom_kill.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 417ff9574d19..0f77eb4c6644 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -1091,6 +1091,7 @@ static void check_panic_on_oom(struct oom_control *oc) if (is_sysrq_oom(oc)) return; dump_header(oc, NULL); + oom_type_notifier_call(0, oc); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); }
From: Baolin Wang baolin.wang@linux.alibaba.com
mainline inclusion from mainline-v5.13 commit 63f83b31f4f36d933e13bd8b9a25d6d9a0cf89dd category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I63SDZ CVE: NA
-------------------------------
If we did not reserve extra CMA memory, the log buffer can be easily filled up by CMA failure warning when the devices calling dmam_alloc_coherent() to alloc DMA memory. Thus we can use pr_err_ratelimited() instead to reduce the duplicate CMA warning.
Link: https://lkml.kernel.org/r/ce2251ef49e1727a9a40531d1996660b05462bd2.161527982... Signed-off-by: Baolin Wang baolin.wang@linux.alibaba.com Reviewed-by: David Hildenbrand david@redhat.com Acked-by: Minchan Kim minchan@kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org --- mm/cma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/cma.c b/mm/cma.c index 9361ecaf52be..09f3b1e264c0 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -486,8 +486,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, }
if (ret && !no_warn) { - pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n", - __func__, count, ret); + pr_err_ratelimited("%s: alloc failed, req-size: %zu pages, ret: %d\n", + __func__, count, ret); cma_debug_show_areas(cma); }
From: Zhou Guanghui zhouguanghui1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I63SDZ
-------------------------------
In Ascend feature, some driver's VMA struct contains hugepage and nomal page. In this case, we can's using VMA's message to alloc page. To fix this, we only alloc huge page directly in this interface rather than using VMA's message.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com --- mm/hugetlb.c | 56 ++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 28 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6b96eda50977..bb97bbaac65a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6283,39 +6283,43 @@ struct page *hugetlb_alloc_hugepage(int nid, int flag) } EXPORT_SYMBOL_GPL(hugetlb_alloc_hugepage);
+static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr, + unsigned long size) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pte_t *ptep = NULL; + + pgdp = pgd_offset(mm, addr); + p4dp = p4d_offset(pgdp, addr); + pudp = pud_alloc(mm, p4dp, addr); + if (!pudp) + return NULL; + + ptep = (pte_t *)pmd_alloc(mm, pudp, addr); + + return ptep; +} + static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr, - pgprot_t prot, unsigned long pfn, bool special) + pgprot_t prot, unsigned long pfn) { int ret = 0; pte_t *ptep, entry; struct hstate *h; - struct vm_area_struct *vma; - struct address_space *mapping; spinlock_t *ptl;
h = size_to_hstate(PMD_SIZE); if (!h) return -EINVAL;
- if (!IS_ALIGNED(addr, PMD_SIZE)) - return -EINVAL; - - vma = find_vma(mm, addr); - if (!vma || !range_in_vma(vma, addr, addr + PMD_SIZE)) - return -EINVAL; - - mapping = vma->vm_file->f_mapping; - i_mmap_lock_read(mapping); - ptep = huge_pte_alloc(mm, addr, huge_page_size(h)); - if (!ptep) { - ret = -ENXIO; - goto out_unlock; - } + ptep = hugetlb_huge_pte_alloc(mm, addr, huge_page_size(h)); + if (!ptep) + return -ENXIO;
- if (WARN_ON(ptep && !pte_none(*ptep) && !pmd_huge(*(pmd_t *)ptep))) { - ret = -ENXIO; - goto out_unlock; - } + if (WARN_ON(ptep && !pte_none(*ptep) && !pmd_huge(*(pmd_t *)ptep))) + return -ENXIO;
entry = pfn_pte(pfn, prot); entry = huge_pte_mkdirty(entry); @@ -6323,31 +6327,27 @@ static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr, entry = huge_pte_mkwrite(entry); entry = pte_mkyoung(entry); entry = pte_mkhuge(entry); - if (special) - entry = pte_mkspecial(entry); + entry = pte_mkspecial(entry);
ptl = huge_pte_lockptr(h, mm, ptep); spin_lock(ptl); set_huge_pte_at(mm, addr, ptep, entry); spin_unlock(ptl);
-out_unlock: - i_mmap_unlock_read(mapping); - return ret; }
int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, pgprot_t prot, struct page *hpage) { - return __hugetlb_insert_hugepage(mm, addr, prot, page_to_pfn(hpage), false); + return __hugetlb_insert_hugepage(mm, addr, prot, page_to_pfn(hpage)); } EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte);
int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, unsigned long addr, pgprot_t prot, unsigned long phy_addr) { - return __hugetlb_insert_hugepage(mm, addr, prot, phy_addr >> PAGE_SHIFT, true); + return __hugetlb_insert_hugepage(mm, addr, prot, phy_addr >> PAGE_SHIFT); } EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte_by_pa);
From: Jiankang Chen chenjiankang1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I63SDZ
-------------------
In ascend feature, the SMMU's sid will read from acpi tabels's streamid. To enable the SMMU, we must get SID from ACPI table, and make stall_enabled be true.
Signed-off-by: Jiankang Chen chenjiankang1@huawei.com Signed-off-by: Fang Lijun fanglijun3@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Reviewed-by: Zhen Lei thunder.leizhen@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 12 ++++++++++++ 1 file changed, 12 insertions(+)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 9e702788e90f..74886facbcab 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3249,6 +3249,9 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) struct arm_smmu_device *smmu; struct arm_smmu_master *master; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); +#ifdef CONFIG_ASCEND_FEATURES + u32 sid; +#endif
if (!fwspec || fwspec->ops != &arm_smmu_ops) return ERR_PTR(-ENODEV); @@ -3295,6 +3298,15 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) smmu->features & ARM_SMMU_FEAT_STALL_FORCE) master->stall_enabled = true;
+#ifdef CONFIG_ASCEND_FEATURES + if (!acpi_dev_prop_read_single(ACPI_COMPANION(dev), + "streamid", DEV_PROP_U32, &sid)) { + if (iommu_fwspec_add_ids(dev, &sid, 1)) + dev_info(dev, "failed to add ids\n"); + master->stall_enabled = true; + master->ssid_bits = 0x10; + } +#endif arm_smmu_init_pri(master);
return &smmu->iommu;
From: Zhou Guanghui zhouguanghui1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I63SDZ
-------------------
The ECC feature increases a maximum of 64 reserved physical memory segments, which increases the number of memblocks. So we make INIT_MEMBLOCK_REGIONS be 256.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com --- mm/memblock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mm/memblock.c b/mm/memblock.c index e1fd07166a35..8abeb59c927d 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -22,7 +22,7 @@
#include "internal.h"
-#define INIT_MEMBLOCK_REGIONS 128 +#define INIT_MEMBLOCK_REGIONS 256 #define INIT_PHYSMEM_REGIONS 4
#ifndef INIT_MEMBLOCK_RESERVED_REGIONS