From: Ard Biesheuvel ard.biesheuvel@linaro.org
mainline inclusion from mainline-v4.20-rc2 commit 26a4676faa1ad5d99317e0cd701e5d6f3e716b77 category: feature bugzilla: NA CVE: NA ---------------------------
On arm64, there is no need to add 2 bytes of padding to the start of each network buffer just to make the IP header appear 32-bit aligned.
Since this might actually adversely affect DMA performance some platforms, let's override NET_IP_ALIGN to 0 to get rid of this padding.
Acked-by: Ilias Apalodimas ilias.apalodimas@linaro.org Tested-by: Ilias Apalodimas ilias.apalodimas@linaro.org Acked-by: Mark Rutland mark.rutland@arm.com Acked-by: Will Deacon will.deacon@arm.com Signed-off-by: Ard Biesheuvel ard.biesheuvel@linaro.org Signed-off-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/processor.h | 8 ++++++++ 1 file changed, 8 insertions(+)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 98529c8b5d313..7695a5117ff20 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -24,6 +24,14 @@ #define KERNEL_DS UL(-1) #define USER_DS (TASK_SIZE_64 - 1)
+/* + * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is + * no point in shifting all network buffers by 2 bytes just to make some IP + * header fields appear aligned in memory, potentially sacrificing some DMA + * performance on some platforms. + */ +#define NET_IP_ALIGN 0 + #ifndef __ASSEMBLY__
/*
From: Hanjun Guo guohanjun@huawei.com
hulk inclusion category: feature bugzilla: NA CVE: NA ---------------------------
Add the support for PHYTIUM topology detect, it's better use PPTT ACPI table to report the topology, but we can live with it at now.
Signed-off-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/cputype.h | 1 + arch/arm64/kernel/topology.c | 6 ++++++ 2 files changed, 7 insertions(+)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 3cd936b1c79c1..557d838f829c8 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -69,6 +69,7 @@ #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E #define ARM_CPU_IMP_HISI 0x48 +#define ARM_CPU_IMP_PHYTIUM 0x70
#define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 6106c49f84bc8..d6fcafa22f310 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -286,6 +286,12 @@ void store_cpu_topology(unsigned int cpuid) cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 | MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16; + + if (read_cpuid_implementor() == ARM_CPU_IMP_PHYTIUM) { + cpuid_topo->thread_id = 0; + cpuid_topo->core_id = cpuid; + cpuid_topo->package_id = 0; + } }
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
From: Hanjun Guo guohanjun@huawei.com
hulk inclusion category: feature bugzilla: NA CVE: NA ---------------------------
Add workaround for phytium as the firmware didn't report the DMA size info.
Signed-off-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/usb/host/xhci-pci.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1a6a23e57201d..1216f3783930d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -302,6 +302,18 @@ static int xhci_pci_setup(struct usb_hcd *hcd) return xhci_pci_reinit(xhci, pdev); }
+#ifdef CONFIG_ARM64 +#include <asm/cputype.h> +static void phytium_xhci_pci_workaround(struct pci_dev *dev) +{ + /* Firmware bug, DMA mask is not reported by the firmware */ + if (read_cpuid_implementor() == ARM_CPU_IMP_PHYTIUM) + dma_set_mask(&dev->dev, DMA_BIT_MASK(64)); +} +#else +static inline void phytium_xhci_pci_workaround(struct pci_dev *dev) { } +#endif + /* * We need to register our own PCI probe function (instead of the USB core's * function) in order to create a second roothub under xHCI. @@ -315,6 +327,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
driver = (struct hc_driver *)id->driver_data;
+ phytium_xhci_pci_workaround(dev); + /* Prevent runtime suspending between USB-2 and USB-3 initialization */ pm_runtime_get_noresume(&dev->dev);
From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature Bugzilla: N/A CVE: N/A
-------------------------------------------------------------
When the driver gets huge pages by alloc_huge_page_node, it attempts to apply for migrate hugepages after the reserved memory hugepages are used up. We expect that the migrated hugepages that are applied for can be charged in memcg to limit the memory usage.
__GFP_ACOUNT flag is added to gfp mask before we allocate migrage hugepages. Then, if memcg is set by memalloc_use_memcg(), the allocated migrate hugepages will be charged to this memcg.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/Kconfig | 11 +++++++++++ mm/hugetlb.c | 21 ++++++++++++++++++++- 2 files changed, 31 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a08fa3f1648c2..83344333670e2 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1389,6 +1389,17 @@ config ASCEND_IOPF_HIPRI CPU which processes IOPF work is the same as that which processes IOPF event interrupts.
+config ASCEND_CHARGE_MIGRATE_HUGEPAGES + bool "Enable support for migrate hugepages" + default y + help + When reseved hugepages are used up, we attempts to apply for migrate + hugepages. We expect that the migrated hugepages that are applied for + can be charged in memcg to limit the memory usage. + + This option enable the feature to charge migrate hugepages to memory + cgroup. + endif
endmenu diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0eb0c943397fd..3460ab634941a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -54,6 +54,7 @@ static struct hstate * __initdata parsed_hstate; static unsigned long __initdata default_hstate_max_huge_pages; static unsigned long __initdata default_hstate_size; static bool __initdata parsed_valid_hugepagesz = true; +static int enable_charge_mighp __read_mostly;
/* * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, @@ -1710,8 +1711,12 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid) page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL); spin_unlock(&hugetlb_lock);
- if (!page) + if (!page) { + if (enable_charge_mighp) + gfp_mask |= __GFP_ACCOUNT; + page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); + }
return page; } @@ -5227,4 +5232,18 @@ int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, return 0; } EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte_by_pa); + +#ifdef CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES + +static int __init ascend_enable_charge_migrate_hugepages(char *s) +{ + enable_charge_mighp = 1; + + pr_info("Ascend enable charge migrate hugepage\n"); + + return 1; +} +__setup("enable_charge_mighp", ascend_enable_charge_migrate_hugepages); + +#endif #endif
From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
-----------------------------------------------
Enable charge migrate hugepages for hulk defconfig.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/configs/hulk_defconfig | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index 895a237f500a8..4d05a801cae9b 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -478,6 +478,7 @@ CONFIG_ASCEND_FEATURES=y CONFIG_ASCEND_DVPP_MMAP=y CONFIG_ASCEND_OOM=y CONFIG_ASCEND_IOPF_HIPRI=y +CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES=y
# # Boot options
From: Yang Yingliang yaingyingliang@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
------------
For Ascend platform, other NON-OS managed GICRs need be initialized in OS.
Signed-off-by: Yang Yingliang yaingyingliang@huawei.com Signed-off-by: Xu Qiang xuqiang36@huawei.com Signed-off-by: Lijun Fang fanglijun3@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Xu Qiang xuqiang36@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/irqchip/Kconfig | 10 ++ drivers/irqchip/irq-gic-v3-its.c | 212 ++++++++++++++++++++++++++++- drivers/irqchip/irq-gic-v3.c | 206 ++++++++++++++++++++++++++++ include/linux/irqchip/arm-gic-v3.h | 7 + 4 files changed, 433 insertions(+), 2 deletions(-)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 8cb6800dbdfb7..bc34ee8c35c12 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -145,6 +145,16 @@ config HISILICON_IRQ_MBIGEN select ARM_GIC_V3 select ARM_GIC_V3_ITS
+if ASCEND_FEATURES + +config ASCEND_INIT_ALL_GICR + bool "Enable init all GICR for Ascend" + depends on ARM_GIC_V3 + depends on ARM_GIC_V3_ITS + default n + +endif + config IMGPDC_IRQ bool select GENERIC_IRQ_CHIP diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index bfedab2eabf48..dedc432550b0f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -185,6 +185,14 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
static DEFINE_IDA(its_vpeid_ida);
+#ifdef CONFIG_ASCEND_INIT_ALL_GICR +static bool init_all_gicr; +static int nr_gicr; +#else +#define init_all_gicr false +#define nr_gicr 0 +#endif + #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) @@ -1156,6 +1164,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { unsigned int cpu; + unsigned int max_cpu; const struct cpumask *cpu_mask = cpu_online_mask; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; @@ -1175,8 +1184,14 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, }
cpu = cpumask_any_and(mask_val, cpu_mask); + max_cpu = nr_cpu_ids; + + if (init_all_gicr) { + cpu = find_first_bit(cpumask_bits(mask_val), NR_CPUS); + max_cpu = nr_gicr; + }
- if (cpu >= nr_cpu_ids) + if (cpu >= max_cpu) return -EINVAL;
/* don't set the affinity when the target cpu is same as current one */ @@ -2046,8 +2061,12 @@ static int its_alloc_tables(struct its_node *its) static int its_alloc_collections(struct its_node *its) { int i; + int cpu_nr = nr_cpu_ids;
- its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + if (init_all_gicr) + cpu_nr = nr_gicr; + + its->collections = kcalloc(cpu_nr, sizeof(*its->collections), GFP_KERNEL); if (!its->collections) return -ENOMEM; @@ -2327,6 +2346,195 @@ static void its_cpu_init_collections(void) raw_spin_unlock(&its_lock); }
+#ifdef CONFIG_ASCEND_INIT_ALL_GICR +void its_set_gicr_nr(int nr) +{ + nr_gicr = nr; +} + +int its_gicr_nr(void) +{ + return nr_gicr; +} + +void its_enable_init_all_gicr(void) +{ + init_all_gicr = true; +} + +bool its_init_all_gicr(void) +{ + return init_all_gicr; +} + +static void its_cpu_init_lpis_others(void __iomem *rbase, int cpu) +{ + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (!init_all_gicr) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + its_free_pending_table(gic_data_rdist()->pend_page); + gic_data_rdist()->pend_page = NULL; + + goto out; + } + + /* If we didn't allocate the pending table yet, do it now */ + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for GICR:%p\n", rbase); + return; + } + + paddr = page_to_phys(pend_page); + pr_info("GICR:%p using LPI pending table @%pa\n", + rbase, &paddr); + + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + + /* Disable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* + * Make sure any change to the table is observable by the GIC. + */ + dsb(sy); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", + cpu, pend_page ? "allocated" : "reserved", &paddr); +} + +static void its_cpu_init_collection_others(void __iomem *rbase, + phys_addr_t phys_base, int cpu) +{ + struct its_node *its; + + if (!init_all_gicr) + return; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) { + u64 target; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = phys_base; + } else { + /* + * This ITS wants a linear CPU number. + */ + target = gic_read_typer(rbase + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); + } + + raw_spin_unlock(&its_lock); +} + +int its_cpu_init_others(void __iomem *base, phys_addr_t phys_base, int cpu) +{ + if (!list_empty(&its_nodes)) { + if (!(gic_read_typer(base + GICR_TYPER) & GICR_TYPER_PLPIS)) { + pr_err("GICR:%p: LPIs not supported\n", base); + return -ENXIO; + } + + its_cpu_init_lpis_others(base, cpu); + its_cpu_init_collection_others(base, phys_base, cpu); + } + + return 0; +} +#endif + static struct its_device *its_find_device(struct its_node *its, u32 dev_id) { struct its_device *its_dev = NULL, *tmp; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 0b6fe0ec2ac2f..99cc646819f5e 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -891,6 +891,208 @@ static void gic_cpu_init(void) gic_cpu_sys_reg_init(); }
+#ifdef CONFIG_ASCEND_INIT_ALL_GICR +struct workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static struct workaround_oem_info gicr_wkrd_info[] = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x300, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x301, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x400, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x401, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0x402, + } +}; + +static void gic_check_hisi_workaround(void) +{ + struct acpi_table_header *tbl; + acpi_status status = AE_OK; + int i; + + status = acpi_get_table(ACPI_SIG_MADT, 0, &tbl); + if (ACPI_FAILURE(status) || !tbl) + return; + + for (i = 0; i < ARRAY_SIZE(gicr_wkrd_info); i++) { + if (!memcmp(gicr_wkrd_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(gicr_wkrd_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + gicr_wkrd_info[i].oem_revision == tbl->oem_revision) { + its_enable_init_all_gicr(); + break; + } + } +} + +static void gic_compute_nr_gicr(void) +{ + int i; + int sum = 0; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + u64 typer; + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + sum++; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + /* Skip VLPI_base + reserved page */ + ptr += SZ_64K * 2; + } + } while (!(typer & GICR_TYPER_LAST)); + } + + its_set_gicr_nr(sum); +} + +static void gic_enable_redist_others(void __iomem *rbase, bool enable) +{ + u32 count = 1000000; /* 1s! */ + u32 val; + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); +} + +static int gic_rdist_cpu(void __iomem *ptr, unsigned int cpu) +{ + unsigned long mpidr = cpu_logical_map(cpu); + u64 typer; + u32 aff; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) + return 0; + + return 1; +} + +static int gic_rdist_cpus(void __iomem *ptr) +{ + unsigned int i; + + for (i = 0; i < nr_cpu_ids; i++) { + if (gic_rdist_cpu(ptr, i) == 0) + return 0; + } + + return 1; +} + +static void gic_cpu_init_others(void) +{ + int i, cpu = nr_cpu_ids; + int gicr_nr = its_gicr_nr(); + + if (!its_init_all_gicr()) + return; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + u64 typer; + void __iomem *redist_base = + gic_data.redist_regions[i].redist_base; + phys_addr_t phys_base = gic_data.redist_regions[i].phys_base; + + do { + typer = gic_read_typer(redist_base + GICR_TYPER); + + if (gic_rdist_cpus(redist_base) == 1) { + if (cpu >= gicr_nr) { + pr_err("CPU over GICR number.\n"); + break; + } + gic_enable_redist_others(redist_base, true); + + if (gic_dist_supports_lpis()) + its_cpu_init_others(redist_base, phys_base, cpu); + cpu++; + } + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + redist_base += gic_data.redist_stride; + phys_base += gic_data.redist_stride; + } else { + /* Skip RD_base + SGI_base */ + redist_base += SZ_64K * 2; + phys_base += SZ_64K * 2; + if (typer & GICR_TYPER_VLPIS) { + /* Skip VLPI_base + reserved page */ + redist_base += SZ_64K * 2; + phys_base += SZ_64K * 2; + } + } + } while (!(typer & GICR_TYPER_LAST)); + } +} +#else +static inline void gic_check_hisi_workaround(void) {} + +static inline void gic_compute_nr_gicr(void) {} + +static inline void gic_cpu_init_others(void) {} +#endif + #ifdef CONFIG_SMP
#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) @@ -1345,6 +1547,8 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); gic_data.rdists.has_vlpis = true; gic_data.rdists.has_direct_lpi = true; + gic_check_hisi_workaround(); + gic_compute_nr_gicr();
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { err = -ENOMEM; @@ -1386,6 +1590,8 @@ static int __init gic_init_bases(void __iomem *dist_base, its_cpu_init(); }
+ gic_cpu_init_others(); + return 0;
out_free: diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 83e47a2d5cdd9..fad51f2d887a6 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -608,6 +608,13 @@ struct rdists { struct irq_domain; struct fwnode_handle; int its_cpu_init(void); +#ifdef CONFIG_ASCEND_INIT_ALL_GICR +void its_set_gicr_nr(int nr); +int its_gicr_nr(void); +void its_enable_init_all_gicr(void); +bool its_init_all_gicr(void); +int its_cpu_init_others(void __iomem *base, phys_addr_t phys_base, int idx); +#endif int its_init(struct fwnode_handle *handle, struct rdists *rdists, struct irq_domain *domain); int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
From: Xu Qiang xuqiang36@huawei.com
ascend inclusion category: feature Bugzilla: N/A CVE: N/A
--------------------------------------------------------
Signed-off-by: Xu Qiang xuqiang36@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/configs/hulk_defconfig | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index 4d05a801cae9b..e3c0ff85ea33c 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -4695,6 +4695,7 @@ CONFIG_ARM_GIC_V3=y CONFIG_ARM_GIC_V3_ITS=y CONFIG_ARM_GIC_V3_ITS_PCI=y CONFIG_HISILICON_IRQ_MBIGEN=y +CONFIG_ASCEND_INIT_ALL_GICR=y CONFIG_PARTITION_PERCPU=y CONFIG_QCOM_IRQ_COMBINER=y # CONFIG_QCOM_PDC is not set
From: Fang Lijun fanglijun3@huawei.com
ascend inclusion category: bugfix bugzilla: 39052 CVE: NA
---------------------------
This pmu use the same interrupts for every four l3t and lpddrc, so they must be initialized by IRQF_SHARED.
Signed-off-by: Fang Lijun fanglijun3@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c | 2 +- drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c index c736df5816a82..4c4952b9533db 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c @@ -217,7 +217,7 @@ static int hisi_l3t_pmu_init_irq(struct hisi_pmu *l3t_pmu, }
ret = devm_request_irq(&pdev->dev, irq, hisi_l3t_pmu_isr, - IRQF_NOBALANCING | IRQF_NO_THREAD, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, dev_name(&pdev->dev), l3t_pmu); if (ret < 0) { dev_err(&pdev->dev, diff --git a/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c index b612203c9457a..462c33778d9fa 100644 --- a/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c @@ -224,7 +224,7 @@ static int hisi_lpddrc_pmu_init_irq(struct hisi_pmu *lpddrc_pmu, }
ret = devm_request_irq(&pdev->dev, irq, hisi_lpddrc_pmu_isr, - IRQF_NOBALANCING | IRQF_NO_THREAD, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, dev_name(&pdev->dev), lpddrc_pmu); if (ret < 0) { dev_err(&pdev->dev,
From: Chiqijun chiqijun@huawei.com
driver inclusion category: bugfix bugzilla: 4472
-----------------------------------------------------------------------
When the user mode tool sets ETS, the input value tc id exceeds the maximum value of 7, which will cause the array to be accessed out of bounds. Driver check the legality of tc id.
Signed-off-by: Chiqijun chiqijun@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/huawei/hinic/hinic_nictool.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c index 740ec4adf6baa..04d40e60d8bba 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c @@ -769,8 +769,19 @@ int set_ets(struct hinic_nic_dev *nic_dev, void *buf_in, err = 0xff; goto exit; } - if (ets.flag_com.ets_flag.flag_ets_cos) + + if (ets.flag_com.ets_flag.flag_ets_cos) { + for (i = 0; i < HINIC_DCB_COS_MAX; i++) { + if (ets.tc[i] >= HINIC_DCB_TC_MAX) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "ETS tc id %d out of range\n", + ets.tc[i]); + err = 0xFF; + goto exit; + } + } hinic_dcbnl_set_ets_tc_tool(nic_dev->netdev, ets.tc, true); + }
if (ets.flag_com.ets_flag.flag_ets_percent) { for (i = support_tc; i < HINIC_DCB_TC_MAX; i++) {
From: Chiqijun chiqijun@huawei.com
driver inclusion category: bugfix bugzilla: 4472
-----------------------------------------------------------------------
The out_size in nictool is passed in by the user mode. If out_size is not checked, it will cause too much memory in the kernel space to be copied to the user space.
Signed-off-by: Chiqijun chiqijun@huawei.com Reviewed-by: Zengweiliang zengweiliang.zengweiliang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/net/ethernet/huawei/hinic/hinic_dbg.h | 2 +- .../net/ethernet/huawei/hinic/hinic_nic_dbg.c | 11 ++- .../net/ethernet/huawei/hinic/hinic_nictool.c | 86 ++++++++++--------- 3 files changed, 57 insertions(+), 42 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dbg.h b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h index 1b4789fc1de27..415fe989ef33f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dbg.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dbg.h @@ -83,7 +83,7 @@ int hinic_api_csr_wr64(void *hwdev, u8 dest, u32 addr, u64 val);
int hinic_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size);
-u16 hinic_dbg_clear_hw_stats(void *hwdev); +u16 hinic_dbg_clear_hw_stats(void *hwdev, u32 *out_size);
void hinic_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, int offset); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c index 15a006e299270..7407241216f6f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_nic_dbg.c @@ -241,13 +241,20 @@ int hinic_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size) return 0; }
-u16 hinic_dbg_clear_hw_stats(void *hwdev) +u16 hinic_dbg_clear_hw_stats(void *hwdev, u32 *out_size) { + if (*out_size != sizeof(struct hinic_hw_stats)) { + pr_err("Unexpect out buf size from user :%d, expect: %lu\n", + *out_size, sizeof(struct hinic_hw_stats)); + return -EFAULT; + } + memset((void *)&((struct hinic_hwdev *)hwdev)->hw_stats, 0, sizeof(struct hinic_hw_stats)); memset((void *)((struct hinic_hwdev *)hwdev)->chip_fault_stats, 0, HINIC_CHIP_FAULT_SIZE); - return sizeof(struct hinic_hw_stats); + + return 0; }
void hinic_get_chip_fault_stats(const void *hwdev, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c index 04d40e60d8bba..c739914fab399 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c @@ -342,8 +342,6 @@ static int get_inter_num(struct hinic_nic_dev *nic_dev, void *buf_in, } *(u16 *)buf_out = intr_num;
- *out_size = sizeof(u16); - return 0; }
@@ -361,7 +359,12 @@ static int clear_func_static(struct hinic_nic_dev *nic_dev, void *buf_in, { int i;
- *out_size = 0; + if (*out_size != 0) { + pr_err("Unexpect out buf size from user: %d, expect: 0\n", + *out_size); + return -EINVAL; + } + clean_nicdev_stats(nic_dev); for (i = 0; i < nic_dev->max_qps; i++) { hinic_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats); @@ -415,7 +418,12 @@ static int set_dcb_cos_up_map(struct hinic_nic_dev *nic_dev, void *buf_in, if (!buf_in || !out_size || in_size != sizeof(*map)) return -EINVAL;
- *out_size = sizeof(*map); + if (*out_size != sizeof(*map)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*map)); + return -EINVAL; + }
return hinic_set_cos_up_map(nic_dev, map->cos_up); } @@ -589,7 +597,6 @@ static int set_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { struct hinic_nic_loop_mode *mode = buf_in; - int err;
if (!test_bit(HINIC_INTF_UP, &nic_dev->flags)) { nicif_err(nic_dev, drv, nic_dev->netdev, @@ -600,13 +607,15 @@ static int set_loopback_mode(struct hinic_nic_dev *nic_dev, void *buf_in, if (!mode || !out_size || in_size != sizeof(*mode)) return -EFAULT;
- err = hinic_set_loopback_mode_ex(nic_dev->hwdev, mode->loop_mode, - mode->loop_ctrl); - if (err) - return err; + if (*out_size != sizeof(*mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*mode)); + return -EINVAL; + }
- *out_size = sizeof(*mode); - return 0; + return hinic_set_loopback_mode_ex(nic_dev->hwdev, mode->loop_mode, + mode->loop_ctrl); }
static int set_link_mode(struct hinic_nic_dev *nic_dev, void *buf_in, @@ -624,6 +633,13 @@ static int set_link_mode(struct hinic_nic_dev *nic_dev, void *buf_in, if (!link || !out_size || in_size != sizeof(*link)) return -EFAULT;
+ if (*out_size != sizeof(*link)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %d, expect: %lu\n", + *out_size, sizeof(*link)); + return -EINVAL; + } + switch (*link) { case HINIC_LINK_MODE_AUTO: if (hinic_get_link_state(nic_dev->hwdev, &link_status)) @@ -649,7 +665,6 @@ static int set_link_mode(struct hinic_nic_dev *nic_dev, void *buf_in, return -EINVAL; }
- *out_size = sizeof(*link); return 0; }
@@ -676,7 +691,6 @@ static int set_dcb_cfg(struct hinic_nic_dev *nic_dev, void *buf_in, } dcb_ctl.dcb_data.err = (u8)err; *((u32 *)buf_out) = (u32)dcb_ctl.data; - *out_size = sizeof(u32);
return 0; } @@ -699,7 +713,6 @@ int get_pfc_info(struct hinic_nic_dev *nic_dev, void *buf_in, hinic_dcbnl_get_tc_num_tool(nic_dev->netdev, &pfc.pfc_data.num_of_tc); *((u32 *)buf_out) = (u32)pfc.data; - *out_size = sizeof(u32);
return 0; } @@ -732,7 +745,7 @@ int set_pfc_control(struct hinic_nic_dev *nic_dev, void *buf_in,
exit: *((u8 *)buf_out) = (u8)err; - *out_size = sizeof(u8); + return 0; }
@@ -807,7 +820,7 @@ int set_ets(struct hinic_nic_dev *nic_dev, void *buf_in, } exit: *((u8 *)buf_out) = err; - *out_size = sizeof(err); + return 0; }
@@ -874,7 +887,6 @@ int get_ets_info(struct hinic_nic_dev *nic_dev, void *buf_in, hinic_dcbnl_set_ets_strict_tool(nic_dev->netdev, &ets->strict, false); ets->err = 0;
- *out_size = sizeof(*ets); return 0; }
@@ -907,7 +919,6 @@ int set_pfc_priority(struct hinic_nic_dev *nic_dev, void *buf_in, } exit: *((u8 *)buf_out) = (u8)err; - *out_size = sizeof(u8);
return 0; } @@ -940,7 +951,6 @@ static int set_pf_bw_limit(struct hinic_nic_dev *nic_dev, void *buf_in, }
*((u8 *)buf_out) = (u8)err; - *out_size = sizeof(u8);
return 0; } @@ -992,15 +1002,16 @@ static int set_poll_weight(struct hinic_nic_dev *nic_dev, void *buf_in, { struct hinic_nic_poll_weight *weight_info = buf_in;
- if (!buf_in || in_size != sizeof(*weight_info)) { + if (!buf_in || in_size != sizeof(*weight_info) || + *out_size != sizeof(u32)) { nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect in buf size from user: %u, expect: %lu\n", - *out_size, sizeof(*weight_info)); + "Unexpect in buf size: %u or out buf size: %d from user, expect: %lu\n", + in_size, *out_size, sizeof(*weight_info)); return -EFAULT; }
nic_dev->poll_weight = weight_info->poll_weight; - *out_size = sizeof(u32); + return 0; }
@@ -1021,8 +1032,6 @@ static int get_homologue(struct hinic_nic_dev *nic_dev, void *buf_in, else homo->homo_state = HINIC_HOMOLOGUES_OFF;
- *out_size = sizeof(*homo); - return 0; }
@@ -1031,10 +1040,11 @@ static int set_homologue(struct hinic_nic_dev *nic_dev, void *buf_in, { struct hinic_homologues *homo = buf_in;
- if (!buf_in || in_size != sizeof(*homo)) { + if (!buf_in || in_size != sizeof(*homo) || + *out_size != sizeof(*homo)) { nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect in buf size from user: %d, expect: %lu\n", - *out_size, sizeof(*homo)); + "Unexpect in buf size: %d or out buf size: %d from user, expect: %lu\n", + in_size, *out_size, sizeof(*homo)); return -EFAULT; }
@@ -1047,8 +1057,6 @@ static int set_homologue(struct hinic_nic_dev *nic_dev, void *buf_in, return -EFAULT; }
- *out_size = sizeof(*homo); - return 0; }
@@ -1146,7 +1154,7 @@ static int get_func_id(void *hwdev, void *buf_in, u32 in_size,
func_id = hinic_global_func_id_hw(hwdev); *(u16 *)buf_out = func_id; - *out_size = sizeof(u16); + return 0; }
@@ -1179,8 +1187,7 @@ static int get_hw_stats(void *hwdev, void *buf_in, u32 in_size, static int clear_hw_stats(void *hwdev, void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { - *out_size = hinic_dbg_clear_hw_stats(hwdev); - return 0; + return hinic_dbg_clear_hw_stats(hwdev, out_size); }
static int get_drv_version(void *hwdev, void *buf_in, u32 in_size, @@ -1231,7 +1238,6 @@ static int get_single_card_info(void *hwdev, void *buf_in, u32 in_size, }
hinic_get_card_info(hwdev, buf_out); - *out_size = in_size;
return 0; } @@ -1242,8 +1248,7 @@ static int get_device_id(void *hwdev, void *buf_in, u32 in_size, u16 dev_id; int err;
- if (!buf_out || !buf_in || *out_size != sizeof(u16) || - in_size != sizeof(u16)) { + if (!buf_out || *out_size != sizeof(u16)) { pr_err("Unexpect out buf size from user: %d, expect: %lu\n", *out_size, sizeof(u16)); return -EFAULT; @@ -1254,7 +1259,6 @@ static int get_device_id(void *hwdev, void *buf_in, u32 in_size, return err;
*((u32 *)buf_out) = dev_id; - *out_size = in_size;
return 0; } @@ -1294,8 +1298,6 @@ static int get_pf_id(void *hwdev, void *buf_in, u32 in_size, if (err) return err;
- *out_size = sizeof(*pf_info); - return 0; }
@@ -1478,6 +1480,12 @@ static int get_firmware_active_status(void *hwdev, void *buf_in, u32 in_size, { u32 loop_cnt = 0;
+ if (*out_size != 0) { + pr_err("Unexpect out buf size from user: %d, expect: 0\n", + *out_size); + return -EINVAL; + } + while (loop_cnt < GET_FIRMWARE_ACTIVE_STATUS_TIMEOUT) { if (!hinic_get_mgmt_channel_status(hwdev)) return 0;
From: Navid Emamdoost navid.emamdoost@gmail.com
hulk inclusion category: bugfix bugzilla: NA CVE: CVE-2019-16089
---------------------------
nla_nest_start may fail and return NULL. The check is inserted, and errno is selected based on other call sites within the same source code. Update: removed extra new line. v3 Update: added release reply, thanks to Michal Kubecek for pointing out.
Signed-off-by: Navid Emamdoost navid.emamdoost@gmail.com Reviewed-by: Michal Kubecek mkubecek@suse.cz Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Jason Yan yanaijie@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/block/nbd.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index a7e7820674a44..816188aa841f5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -2220,6 +2220,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) }
dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST); + if (!dev_list) { + nlmsg_free(reply); + ret = -EMSGSIZE; + goto out; + } + if (index == -1) { ret = idr_for_each(&nbd_index_idr, &status_cb, reply); if (ret) {
From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
------------------------------------------------------------------
The current SMMU driver supports SPI WireInterrupt and Message Based SPI. However, some hisilicon chips use the Messaged Based SPI. Therefore, a special attribute is added to indentify this situation. Add an option "hisicion,message-based-spi" and addr of GICD_SETSPI to dts node of SMMU, like this: hisicion,message-based-spi iommu-spi-base=<0x10 0x9000040> //Addr of GICD_SETSPI: 0x1009000040
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/iommu/arm-smmu-v3.c | 50 ++++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index c0728bc12cf87..e232c8f5e4479 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -43,6 +43,7 @@ #include <linux/platform_device.h> #include <linux/sched/mm.h>
+#include <linux/irq.h> #include <linux/amba/bus.h>
#include "io-pgtable.h" @@ -609,8 +610,11 @@ struct arm_smmu_device {
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1) +#define ARM_SMMU_OPT_MESSAGE_BASED_SPI (1 << 2) u32 options;
+ u64 spi_base; + struct arm_smmu_cmdq cmdq; struct arm_smmu_evtq evtq; struct arm_smmu_priq priq; @@ -722,6 +726,7 @@ struct arm_smmu_option_prop { static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, + { ARM_SMMU_OPT_MESSAGE_BASED_SPI, "hisilicon,message-based-spi"}, { 0, NULL}, };
@@ -1127,7 +1132,8 @@ static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) { int ret; - bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) && + bool msi = !(smmu->options & ARM_SMMU_OPT_MESSAGE_BASED_SPI) && + (smmu->features & ARM_SMMU_FEAT_MSI) && (smmu->features & ARM_SMMU_FEAT_COHERENCY);
ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu) @@ -3072,6 +3078,37 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) devm_add_action(dev, arm_smmu_free_msis, dev); }
+static void arm_smmu_setup_message_based_spi(struct arm_smmu_device *smmu) +{ + struct irq_desc *desc; + u32 event_hwirq, gerror_hwirq, pri_hwirq; + + desc = irq_to_desc(smmu->gerr_irq); + gerror_hwirq = desc->irq_data.hwirq; + writeq_relaxed(smmu->spi_base, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); + writel_relaxed(gerror_hwirq, smmu->base + ARM_SMMU_GERROR_IRQ_CFG1); + writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, + smmu->base + ARM_SMMU_GERROR_IRQ_CFG2); + + desc = irq_to_desc(smmu->evtq.q.irq); + event_hwirq = desc->irq_data.hwirq; + writeq_relaxed(smmu->spi_base, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0); + writel_relaxed(event_hwirq, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG1); + writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, + smmu->base + ARM_SMMU_EVTQ_IRQ_CFG2); + + if (smmu->features & ARM_SMMU_FEAT_PRI) { + desc = irq_to_desc(smmu->priq.q.irq); + pri_hwirq = desc->irq_data.hwirq; + + writeq_relaxed(smmu->spi_base, + smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); + writel_relaxed(pri_hwirq, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG1); + writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, + smmu->base + ARM_SMMU_PRIQ_IRQ_CFG2); + } +} + static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) { int irq, ret; @@ -3150,6 +3187,9 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) if (smmu->features & ARM_SMMU_FEAT_PRI) irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
+ if (smmu->options & ARM_SMMU_OPT_MESSAGE_BASED_SPI) + arm_smmu_setup_message_based_spi(smmu); + /* Enable interrupt generation on the SMMU */ ret = arm_smmu_write_reg_sync(smmu, irqen_flags, ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK); @@ -3645,6 +3685,14 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
parse_driver_options(smmu);
+ if (smmu->options & ARM_SMMU_OPT_MESSAGE_BASED_SPI) { + if (of_property_read_u64(dev->of_node, "iommu-spi-base", + &smmu->spi_base)) { + dev_err(dev, "missing irq base address\n"); + ret = -EINVAL; + } + } + if (of_dma_is_coherent(dev->of_node)) smmu->features |= ARM_SMMU_FEAT_COHERENCY;
From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
----------------------------------------------------
Add Message Base SPI optional property for hisilicon
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt | 8 ++++++++ 1 file changed, 8 insertions(+)
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt index c9abbf3e4f682..322f958939fb4 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt @@ -61,6 +61,14 @@ the PCIe specification. Set for Cavium ThunderX2 silicon that doesn't support SMMU page1 register space.
+- hisilicon,message-based-spi + : Message based SPI is used for Ascend310 silicon. The addr + of GICD_SETSPIR needs to be configured in the CFG_REG of + SMMU. + +- iommu-spi-base + : The addr of GICD_SETSPI + ** Example
smmu@2b400000 {
From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
------------------------------------------------------------------
Normally, pages can not be allocated from CDM node(explicit allocation requests from kernel or user process MPOL_BIND policy which has CDM nodes).
This situation may happen. Within the limit bytes range of the memcg, the CDM nodes have a large amount of free memory, and other nodes have no available free memory. Then, the kernel or user process can not get required memory resources normally.
For example: size of CMD : A mbytes size of non CMD : B mbytes limit mbytes of memcg : C mbytes. A,B < C < (A + B). If app(user app and OS service app) used up non CMD memory, but a large amount of CDM memory is available. Since OS service app can't get pages from CDM node, the allocating of pages should be failed. This is not what we expect. We hope that the memcg can be used to restrict the memory used by some user apps to ensure that certain memory is available for system services.
Therefore, the CDM memory does not need to be charged in the memcg. The total size of CDM is already a limit.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/memcontrol.c | 11 +++++++++++ 1 file changed, 11 insertions(+)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a63bfd73da9a1..1342b9540476d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2696,10 +2696,15 @@ int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
memcg = get_mem_cgroup_from_current(); if (!mem_cgroup_is_root(memcg)) { + if (is_cdm_node(page_to_nid(page))) + goto out; + ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); if (!ret) __SetPageKmemcg(page); } + +out: css_put(&memcg->css); return ret; } @@ -6016,6 +6021,12 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, if (!memcg) memcg = get_mem_cgroup_from_mm(mm);
+ if (!mem_cgroup_is_root(memcg) && is_cdm_node(page_to_nid(page))) { + css_put(&memcg->css); + memcg = NULL; + goto out; + } + ret = try_charge(memcg, gfp_mask, nr_pages);
css_put(&memcg->css);
From: Wang Wensheng wangwensheng4@huawei.com
hulk inclusion category: feature bugzilla: NA CVE: NA
When the kernel panic we reset the timeout to pretimeout, then after pretimeout seconds the sbsa_gwdt reset the system.
The pretimeout to be set is configured via the watchdog-core, but the meaning is different from that of the watchdog-core. The pretimeout here defines the new timeout for the sbsa_gwdt after panic while that of the watchdog-core is meaningful where the WDT would raise an interrupt before timeout and it defines the length of that period. That period of sbsa_gwdt cannot be changed separately so the redefination here doesn't make trouble.
The pretimeout here follow the same limit of that of watchdog-core that the pretimeout shall be smaller than timeout, since we prefer that the sbsa_gwdt would reset the system sooner on panic.
We add a new kconfig ARM_SBSA_WATCHDOG_PANIC_NOTIFIER to control the feature. Set action to 2 to enable the notifier.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com Reviewed-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/watchdog/Kconfig | 8 +++++ drivers/watchdog/sbsa_gwdt.c | 62 ++++++++++++++++++++++++++++++++++-- 2 files changed, 68 insertions(+), 2 deletions(-)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 709d4de11f40f..6f64ac6e33775 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -273,6 +273,14 @@ config ARM_SBSA_WATCHDOG To compile this driver as module, choose M here: The module will be called sbsa_gwdt.
+config ARM_SBSA_WATCHDOG_PANIC_NOTIFIER + bool "Reset the WDT Timeout on Panic" + depends on ASCEND_FEATURES + depends on ARM_SBSA_WATCHDOG + help + This registers a panic notifier that reset the timeout of watchdog to + pretimeout set by user before. + config ASM9260_WATCHDOG tristate "Alphascale ASM9260 watchdog" depends on MACH_ASM9260 || COMPILE_TEST diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index e8bd9887c5663..00723fb9e4cad 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c @@ -108,12 +108,13 @@ MODULE_PARM_DESC(timeout, * action refers to action taken when watchdog gets WS0 * 0 = skip * 1 = panic + * 2 = panic and reset timeout (need to enable CONFIG_ARM_SBSA_WATCHDOG_PANIC_NOTIFIER) * defaults to skip (0) */ static int action; module_param(action, int, 0); MODULE_PARM_DESC(action, "after watchdog gets WS0 interrupt, do: " - "0 = skip(*) 1 = panic"); + "0 = skip(*) 1 = panic 2 = panic_notifier");
static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, S_IRUGO); @@ -130,6 +131,11 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd, struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
wdd->timeout = timeout; +#ifdef CONFIG_ARM_SBSA_WATCHDOG_PANIC_NOTIFIER + /* Disable pretimeout if it doesn't fit the new timeout */ + if (action == 2 && wdd->pretimeout >= wdd->timeout) + wdd->pretimeout = 0; +#endif
if (action) writel(gwdt->clk * timeout, @@ -208,7 +214,7 @@ static irqreturn_t sbsa_gwdt_interrupt(int irq, void *dev_id) return IRQ_HANDLED; }
-static const struct watchdog_info sbsa_gwdt_info = { +static struct watchdog_info sbsa_gwdt_info = { .identity = WATCHDOG_NAME, .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | @@ -225,6 +231,44 @@ static const struct watchdog_ops sbsa_gwdt_ops = { .get_timeleft = sbsa_gwdt_get_timeleft, };
+#ifdef CONFIG_ARM_SBSA_WATCHDOG_PANIC_NOTIFIER +static struct sbsa_gwdt_notifier_s { + struct sbsa_gwdt *gwdt; + struct notifier_block panic_notifier; +} sbsa_gwdt_notifier; + +static int gwdt_reset_timeout(struct notifier_block *self, + unsigned long v, void *p) +{ + struct sbsa_gwdt *gwdt = + container_of(self, struct sbsa_gwdt_notifier_s, + panic_notifier)->gwdt; + unsigned int timeout = gwdt->wdd.pretimeout; + unsigned int div = 1U; + + /* + * If the panic occurred after when WSO was raised, the gwdt would + * reset the board after WOR. If WS0 was not raised WOR * 2 would + * take before gwdt would reset the board. + */ + if (!(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) + div = 2U; + writel(gwdt->clk * timeout / div, gwdt->control_base + SBSA_GWDT_WOR); + + return 0; +} + +static void sbsa_register_panic_notifier(struct sbsa_gwdt *gwdt) +{ + sbsa_gwdt_notifier.gwdt = gwdt; + sbsa_gwdt_notifier.panic_notifier.notifier_call = gwdt_reset_timeout; + sbsa_gwdt_notifier.panic_notifier.priority = INT_MAX; + + atomic_notifier_chain_register(&panic_notifier_list, + &sbsa_gwdt_notifier.panic_notifier); +} +#endif + static int sbsa_gwdt_probe(struct platform_device *pdev) { void __iomem *rf_base, *cf_base; @@ -317,6 +361,20 @@ static int sbsa_gwdt_probe(struct platform_device *pdev) if (ret) return ret;
+#ifdef CONFIG_ARM_SBSA_WATCHDOG_PANIC_NOTIFIER + if (action == 2) { + /* + * Since pretimeout should be smaller than timeout we initialize + * pretimeout to timeout-1. + * Add WDIOF_PRETIMEOUT flags to enable user to configure it. + */ + gwdt->wdd.pretimeout = gwdt->wdd.timeout - 1; + sbsa_gwdt_info.options |= WDIOF_PRETIMEOUT; + + sbsa_register_panic_notifier(gwdt); + } +#endif + dev_info(dev, "Initialized with %ds timeout @ %u Hz, action=%d.%s\n", wdd->timeout, gwdt->clk, action, status & SBSA_GWDT_WCS_EN ? " [enabled]" : "");
From: Wang Wensheng wangwensheng4@huawei.com
hulk inclusion category: bugfix bugzilla: NA CVE: NA
Enable ARM_SBSA_WATCHDOG_PANIC_NOTIFIER in hulk_defconfig.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com Reviewed-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/configs/hulk_defconfig | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index e3c0ff85ea33c..8328dde4084bf 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -3454,6 +3454,7 @@ CONFIG_GPIO_WATCHDOG=m # CONFIG_ZIIRAVE_WATCHDOG is not set CONFIG_ARM_SP805_WATCHDOG=m CONFIG_ARM_SBSA_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG_PANIC_NOTIFIER=y # CONFIG_CADENCE_WATCHDOG is not set # CONFIG_DW_WATCHDOG is not set # CONFIG_MAX63XX_WATCHDOG is not set
From: Ding Tianhong dingtianhong@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
-------------------------------------------------
There are too many ascend features enable flag, all of them is used for all ascend soc till now, so use a new enable flag to enable all of them for ascend platform by default, it would clean and simplify the bootargs.
Also clean some code warning.
v2: modify the wrong config name.
v3: modify the wrong include head file.
Signed-off-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Zefan Li lizefan@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/mm/init.c | 20 ++++++++++++++++++++ drivers/iommu/io-pgfault.c | 2 +- include/linux/iommu.h | 2 ++ include/linux/mm.h | 2 ++ mm/hugetlb.c | 2 +- mm/mmap.c | 2 +- 6 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index e43764db4c3a5..08cc25afd9857 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -40,6 +40,7 @@ #include <linux/mm.h> #include <linux/kexec.h> #include <linux/crash_dump.h> +#include <linux/iommu.h>
#include <asm/boot.h> #include <asm/fixmap.h> @@ -766,6 +767,25 @@ static int __init keepinitrd_setup(char *__unused) __setup("keepinitrd", keepinitrd_setup); #endif
+#ifdef CONFIG_ASCEND_FEATURES +static int __init ascend_enable_setup(char *__unused) +{ + if (IS_ENABLED(CONFIG_ASCEND_DVPP_MMAP)) + enable_map_dvpp = 1; + + if (IS_ENABLED(CONFIG_ASCEND_IOPF_HIPRI)) + enable_iopf_hipri = 1; + + if (IS_ENABLED(CONFIG_ASCEND_CHARGE_MIGRATE_HUGEPAGES)) + enable_charge_mighp = 1; + + return 1; +} + +__setup("ascend_enable_all", ascend_enable_setup); +#endif + + /* * Dump out memory limit information on panic. */ diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c index 271400633dae8..1029a22441bb0 100644 --- a/drivers/iommu/io-pgfault.c +++ b/drivers/iommu/io-pgfault.c @@ -48,7 +48,7 @@ struct iopf_group { struct work_struct work; };
-static int enable_iopf_hipri __read_mostly; +int enable_iopf_hipri __read_mostly;
static int iopf_complete(struct device *dev, struct iommu_fault_event *evt, enum page_response_code status) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0af49bd0a6b13..72ff6611b72e3 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1235,4 +1235,6 @@ static inline void __iommu_process_unbind_dev_all(struct iommu_domain *domain,
#endif /* CONFIG_IOMMU_PROCESS */
+extern int enable_iopf_hipri __read_mostly; + #endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 794d21255bfc5..369dc740963a4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2868,5 +2868,7 @@ void __init setup_nr_node_ids(void); static inline void setup_nr_node_ids(void) {} #endif
+extern int enable_charge_mighp __read_mostly; + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3460ab634941a..009313824b8c0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -54,7 +54,7 @@ static struct hstate * __initdata parsed_hstate; static unsigned long __initdata default_hstate_max_huge_pages; static unsigned long __initdata default_hstate_size; static bool __initdata parsed_valid_hugepagesz = true; -static int enable_charge_mighp __read_mostly; +int enable_charge_mighp __read_mostly;
/* * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, diff --git a/mm/mmap.c b/mm/mmap.c index e1a4d3fa713ec..e8898a241a798 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3747,7 +3747,7 @@ subsys_initcall(init_reserve_notifier); /* * Enable the MAP_32BIT (mmaps and hugetlb). */ -int enable_map_dvpp __read_mostly = 0; +int enable_map_dvpp __read_mostly;
#ifdef CONFIG_ASCEND_DVPP_MMAP
ascend inclusion category: feature bugzilla: 14369 CVE: NA
--------------
Drop the iommu/process support patchset.
Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/configs/euleros_defconfig | 1 - arch/arm64/configs/hulk_defconfig | 1 - arch/arm64/configs/openeuler_defconfig | 1 - arch/arm64/configs/syzkaller_defconfig | 1 - 4 files changed, 4 deletions(-)
diff --git a/arch/arm64/configs/euleros_defconfig b/arch/arm64/configs/euleros_defconfig index b93d668702b9c..a62af24c9faab 100644 --- a/arch/arm64/configs/euleros_defconfig +++ b/arch/arm64/configs/euleros_defconfig @@ -4637,7 +4637,6 @@ CONFIG_IOMMU_IO_PGTABLE_LPAE=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_IOMMU_IOVA=y CONFIG_OF_IOMMU=y -# CONFIG_IOMMU_PROCESS is not set CONFIG_IOMMU_DMA=y CONFIG_ARM_SMMU=y CONFIG_ARM_SMMU_V3=y diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index 8328dde4084bf..295d8bcacde50 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -4603,7 +4603,6 @@ CONFIG_ARM_SMMU_V3_CONTEXT=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_IOMMU_IOVA=y CONFIG_OF_IOMMU=y -CONFIG_IOMMU_PROCESS=y CONFIG_IOMMU_DMA=y CONFIG_IOMMU_SVA=y CONFIG_IOMMU_PAGE_FAULT=y diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index bc51b88b70305..e4aff6ad6a67f 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -4937,7 +4937,6 @@ CONFIG_ARM_SMMU_V3_CONTEXT=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_IOMMU_IOVA=y CONFIG_OF_IOMMU=y -# CONFIG_IOMMU_PROCESS is not set CONFIG_IOMMU_DMA=y CONFIG_IOMMU_SVA=y CONFIG_IOMMU_PAGE_FAULT=y diff --git a/arch/arm64/configs/syzkaller_defconfig b/arch/arm64/configs/syzkaller_defconfig index ec82457288894..bc52e7eaabb5b 100644 --- a/arch/arm64/configs/syzkaller_defconfig +++ b/arch/arm64/configs/syzkaller_defconfig @@ -4565,7 +4565,6 @@ CONFIG_ARM_SMMU_V3_CONTEXT=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_IOMMU_IOVA=y CONFIG_OF_IOMMU=y -CONFIG_IOMMU_PROCESS=y CONFIG_IOMMU_DMA=y CONFIG_IOMMU_SVA=y CONFIG_IOMMU_PAGE_FAULT=y
ascend inclusion category: feature bugzilla: 14369 CVE: NA
--------------
Drop the iommu/process support patchset.
Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/iommu/iommu-process.c | 445 +--------------------------------- drivers/iommu/iommu.c | 64 ----- include/linux/iommu.h | 53 ---- 3 files changed, 4 insertions(+), 558 deletions(-)
diff --git a/drivers/iommu/iommu-process.c b/drivers/iommu/iommu-process.c index 483821a9e93a3..626503b833549 100644 --- a/drivers/iommu/iommu-process.c +++ b/drivers/iommu/iommu-process.c @@ -21,9 +21,7 @@
#include <linux/idr.h> #include <linux/iommu.h> -#include <linux/mmu_notifier.h> #include <linux/slab.h> -#include <linux/sched/mm.h> #include <linux/spinlock.h>
/* Link between a domain and a process */ @@ -52,144 +50,21 @@ static DEFINE_IDR(iommu_process_idr); */ static DEFINE_SPINLOCK(iommu_process_lock);
-static struct mmu_notifier_ops iommu_process_mmu_notfier; - -/* - * Allocate a iommu_process structure for the given task. - * - * Ideally we shouldn't need the domain parameter, since iommu_process is - * system-wide, but we use it to retrieve the driver's allocation ops and a - * PASID range. - */ -static struct iommu_process * -iommu_process_alloc(struct iommu_domain *domain, struct task_struct *task) -{ - int err; - int pasid; - struct iommu_process *process; - - if (WARN_ON(!domain->ops->process_alloc || !domain->ops->process_free)) - return ERR_PTR(-ENODEV); - - process = domain->ops->process_alloc(task); - if (IS_ERR(process)) - return process; - if (!process) - return ERR_PTR(-ENOMEM); - - process->pid = get_task_pid(task, PIDTYPE_PID); - process->mm = get_task_mm(task); - process->notifier.ops = &iommu_process_mmu_notfier; - process->release = domain->ops->process_free; - INIT_LIST_HEAD(&process->domains); - - if (!process->pid) { - err = -EINVAL; - goto err_free_process; - } - - if (!process->mm) { - err = -EINVAL; - goto err_put_pid; - } - - idr_preload(GFP_KERNEL); - spin_lock(&iommu_process_lock); - pasid = idr_alloc_cyclic(&iommu_process_idr, process, domain->min_pasid, - domain->max_pasid + 1, GFP_ATOMIC); - process->pasid = pasid; - spin_unlock(&iommu_process_lock); - idr_preload_end(); - - if (pasid < 0) { - err = pasid; - goto err_put_mm; - } - - err = mmu_notifier_register(&process->notifier, process->mm); - if (err) - goto err_free_pasid; - - /* - * Now that the MMU notifier is valid, we can allow users to grab this - * process by setting a valid refcount. Before that it was accessible in - * the IDR but invalid. - * - * Users of the process structure obtain it with inc_not_zero, which - * provides a control dependency to ensure that they don't modify the - * structure if they didn't acquire the ref. So I think we need a write - * barrier here to pair with that control dependency (XXX probably - * nonsense.) - */ - smp_wmb(); - kref_init(&process->kref); - - /* A mm_count reference is kept by the notifier */ - mmput(process->mm); - - return process; - -err_free_pasid: - /* - * Even if the process is accessible from the IDR at this point, kref is - * 0 so no user could get a reference to it. Free it manually. - */ - spin_lock(&iommu_process_lock); - idr_remove(&iommu_process_idr, process->pasid); - spin_unlock(&iommu_process_lock); - -err_put_mm: - mmput(process->mm); - -err_put_pid: - put_pid(process->pid); - -err_free_process: - domain->ops->process_free(process); - - return ERR_PTR(err); -} - -static void iommu_process_free(struct rcu_head *rcu) -{ - struct iommu_process *process; - void (*release)(struct iommu_process *); - - process = container_of(rcu, struct iommu_process, rcu); - release = process->release; - - release(process); -} - static void iommu_process_release(struct kref *kref) { struct iommu_process *process; + void (*release)(struct iommu_process *);
assert_spin_locked(&iommu_process_lock);
process = container_of(kref, struct iommu_process, kref); + release = process->release; + WARN_ON(!list_empty(&process->domains));
idr_remove(&iommu_process_idr, process->pasid); put_pid(process->pid); - - /* - * If we're being released from process exit, the notifier callback - * ->release has already been called. Otherwise we don't need to go - * through there, the process isn't attached to anything anymore. Hence - * no_release. - */ - mmu_notifier_unregister_no_release(&process->notifier, process->mm); - - /* - * We can't free the structure here, because ->release might be - * attempting to grab it concurrently. And in the other case, if the - * structure is being released from within ->release, then - * __mmu_notifier_release expects to still have a valid mn when - * returning. So free the structure when it's safe, after the RCU grace - * period elapsed. - */ - mmu_notifier_call_srcu(&process->rcu, iommu_process_free); + release(process); }
/* @@ -248,318 +123,6 @@ struct iommu_process *iommu_process_find(int pasid) } EXPORT_SYMBOL_GPL(iommu_process_find);
-static int iommu_process_attach(struct iommu_domain *domain, struct device *dev, - struct iommu_process *process) -{ - int err; - int pasid = process->pasid; - struct iommu_context *context; - - if (WARN_ON(!domain->ops->process_attach || !domain->ops->process_detach || - !domain->ops->process_exit || !domain->ops->process_invalidate)) - return -ENODEV; - - if (pasid > domain->max_pasid || pasid < domain->min_pasid) - return -ENOSPC; - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - context->process = process; - context->domain = domain; - refcount_set(&context->ref, 1); - - spin_lock(&iommu_process_lock); - err = domain->ops->process_attach(domain, dev, process, true); - if (err) { - kfree(context); - spin_unlock(&iommu_process_lock); - return err; - } - - list_add(&context->process_head, &process->domains); - list_add(&context->domain_head, &domain->processes); - spin_unlock(&iommu_process_lock); - - return 0; -} - -static void iommu_context_free(struct iommu_context *context) -{ - assert_spin_locked(&iommu_process_lock); - - if (WARN_ON(!context->process || !context->domain)) - return; - - list_del(&context->process_head); - list_del(&context->domain_head); - iommu_process_put_locked(context->process); - - kfree(context); -} - -/* Attach an existing context to the device */ -static int iommu_process_attach_locked(struct iommu_context *context, - struct device *dev) -{ - assert_spin_locked(&iommu_process_lock); - - refcount_inc(&context->ref); - return context->domain->ops->process_attach(context->domain, dev, - context->process, false); -} - -/* Detach device from context and release it if necessary */ -static void iommu_process_detach_locked(struct iommu_context *context, - struct device *dev) -{ - bool last = false; - struct iommu_domain *domain = context->domain; - - assert_spin_locked(&iommu_process_lock); - - if (refcount_dec_and_test(&context->ref)) - last = true; - - domain->ops->process_detach(domain, dev, context->process, last); - - if (last) - iommu_context_free(context); -} - -/* - * Called when the process exits. Might race with unbind or any other function - * dropping the last reference to the process. As the mmu notifier doesn't hold - * any reference to the process when calling ->release, try to take a reference. - */ -static void iommu_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) -{ - struct iommu_context *context, *next; - struct iommu_process *process = container_of(mn, struct iommu_process, notifier); - - /* - * If the process is exiting then domains are still attached to the - * process. A few things need to be done before it is safe to release - * - * 1) Tell the IOMMU driver to stop using this PASID (and forward the - * message to attached device drivers. It can then clear the PASID - * table and invalidate relevant TLBs. - * - * 2) Drop all references to this process, by freeing the contexts. - */ - spin_lock(&iommu_process_lock); - if (!iommu_process_get_locked(process)) { - /* Someone's already taking care of it. */ - spin_unlock(&iommu_process_lock); - return; - } - - list_for_each_entry_safe(context, next, &process->domains, process_head) { - context->domain->ops->process_exit(context->domain, process); - iommu_context_free(context); - } - spin_unlock(&iommu_process_lock); - - /* - * We're now reasonably certain that no more fault is being handled for - * this process, since we just flushed them all out of the fault queue. - * Release the last reference to free the process. - */ - iommu_process_put(process); -} - -static void iommu_notifier_invalidate_range(struct mmu_notifier *mn, struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - struct iommu_context *context; - struct iommu_process *process = container_of(mn, struct iommu_process, notifier); - - spin_lock(&iommu_process_lock); - list_for_each_entry(context, &process->domains, process_head) { - context->domain->ops->process_invalidate(context->domain, - process, start, end - start); - } - spin_unlock(&iommu_process_lock); -} - -static int iommu_notifier_clear_flush_young(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end) -{ - iommu_notifier_invalidate_range(mn, mm, start, end); - return 0; -} - -static void iommu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, - unsigned long address, pte_t pte) -{ - iommu_notifier_invalidate_range(mn, mm, address, address + PAGE_SIZE); -} - -static struct mmu_notifier_ops iommu_process_mmu_notfier = { - .release = iommu_notifier_release, - .clear_flush_young = iommu_notifier_clear_flush_young, - .change_pte = iommu_notifier_change_pte, - .invalidate_range = iommu_notifier_invalidate_range, -}; - -/** - * iommu_process_bind_device - Bind a process address space to a device - * @dev: the device - * @task: the process to bind - * @pasid: valid address where the PASID will be stored - * @flags: bond properties (IOMMU_PROCESS_BIND_*) - * - * Create a bond between device and task, allowing the device to access the - * process address space using the returned PASID. - * - * On success, 0 is returned and @pasid contains a valid ID. Otherwise, an error - * is returned. - */ -int iommu_process_bind_device(struct device *dev, struct task_struct *task, - int *pasid, int flags) -{ - int err, i; - int nesting; - struct pid *pid; - struct iommu_domain *domain; - struct iommu_process *process; - struct iommu_context *cur_context; - struct iommu_context *context = NULL; - - domain = iommu_get_domain_for_dev(dev); - if (WARN_ON(!domain)) - return -EINVAL; - - if (!iommu_domain_get_attr(domain, DOMAIN_ATTR_NESTING, &nesting) && - nesting) - return -EINVAL; - - pid = get_task_pid(task, PIDTYPE_PID); - if (!pid) - return -EINVAL; - - /* If an iommu_process already exists, use it */ - spin_lock(&iommu_process_lock); - idr_for_each_entry(&iommu_process_idr, process, i) { - if (process->pid != pid) - continue; - - if (!iommu_process_get_locked(process)) { - /* Process is defunct, create a new one */ - process = NULL; - break; - } - - /* Great, is it also bound to this domain? */ - list_for_each_entry(cur_context, &process->domains, - process_head) { - if (cur_context->domain != domain) - continue; - - context = cur_context; - *pasid = process->pasid; - - /* Splendid, tell the driver and increase the ref */ - err = iommu_process_attach_locked(context, dev); - if (err) - iommu_process_put_locked(process); - - break; - } - break; - } - spin_unlock(&iommu_process_lock); - put_pid(pid); - - if (context) - return err; - - if (!process) { - process = iommu_process_alloc(domain, task); - if (IS_ERR(process)) - return PTR_ERR(process); - } - - err = iommu_process_attach(domain, dev, process); - if (err) { - iommu_process_put(process); - return err; - } - - *pasid = process->pasid; - - return 0; -} -EXPORT_SYMBOL_GPL(iommu_process_bind_device); - -/** - * iommu_process_unbind_device - Remove a bond created with - * iommu_process_bind_device. - * - * @dev: the device - * @pasid: the pasid returned by bind - */ -int iommu_process_unbind_device(struct device *dev, int pasid) -{ - struct iommu_domain *domain; - struct iommu_process *process; - struct iommu_context *cur_context; - struct iommu_context *context = NULL; - - domain = iommu_get_domain_for_dev(dev); - if (WARN_ON(!domain)) - return -EINVAL; - - spin_lock(&iommu_process_lock); - process = idr_find(&iommu_process_idr, pasid); - if (!process) { - spin_unlock(&iommu_process_lock); - return -ESRCH; - } - - list_for_each_entry(cur_context, &process->domains, process_head) { - if (cur_context->domain == domain) { - context = cur_context; - break; - } - } - - if (context) - iommu_process_detach_locked(context, dev); - spin_unlock(&iommu_process_lock); - - return context ? 0 : -ESRCH; -} -EXPORT_SYMBOL_GPL(iommu_process_unbind_device); - -/* - * __iommu_process_unbind_dev_all - Detach all processes attached to this - * device. - * - * When detaching @device from @domain, IOMMU drivers have to use this function. - */ -void __iommu_process_unbind_dev_all(struct iommu_domain *domain, struct device *dev) -{ - struct iommu_context *context, *next; - - /* Ask device driver to stop using all PASIDs */ - spin_lock(&iommu_process_lock); - if (domain->process_exit) { - list_for_each_entry(context, &domain->processes, domain_head) - domain->process_exit(domain, dev, - context->process->pasid, - domain->process_exit_token); - } - - list_for_each_entry_safe(context, next, &domain->processes, domain_head) - iommu_process_detach_locked(context, dev); - spin_unlock(&iommu_process_lock); -} -EXPORT_SYMBOL_GPL(__iommu_process_unbind_dev_all); - /** * iommu_set_process_exit_handler() - set a callback for stopping the use of * PASID in a device. diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index bee6b8fcfe0ed..8cd0604049f01 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1852,70 +1852,6 @@ void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) } EXPORT_SYMBOL_GPL(iommu_detach_group);
-/* - * iommu_process_bind_group - Share process address space with all devices in - * the group. - * @group: the iommu group - * @task: the process to bind - * @pasid: valid address where the PASID will be stored - * @flags: bond properties (IOMMU_PROCESS_BIND_*) - * - * Create a bond between group and process, allowing devices in the group to - * access the process address space using @pasid. - * - * On success, 0 is returned and @pasid contains a valid ID. Otherwise, an error - * is returned. - */ -int iommu_process_bind_group(struct iommu_group *group, - struct task_struct *task, int *pasid, int flags) -{ - struct group_device *device; - int ret = -ENODEV; - - if (!pasid) - return -EINVAL; - - if (!group->domain) - return -EINVAL; - - mutex_lock(&group->mutex); - list_for_each_entry(device, &group->devices, list) { - ret = iommu_process_bind_device(device->dev, task, pasid, - flags); - if (ret) - break; - } - - if (ret) { - list_for_each_entry_continue_reverse(device, &group->devices, list) - iommu_process_unbind_device(device->dev, *pasid); - } - mutex_unlock(&group->mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_process_bind_group); - -/** - * iommu_process_unbind_group - Remove a bond created with - * iommu_process_bind_group - * - * @group: the group - * @pasid: the pasid returned by bind - */ -int iommu_process_unbind_group(struct iommu_group *group, int pasid) -{ - struct group_device *device; - - mutex_lock(&group->mutex); - list_for_each_entry(device, &group->devices, list) - iommu_process_unbind_device(device->dev, pasid); - mutex_unlock(&group->mutex); - - return 0; -} -EXPORT_SYMBOL_GPL(iommu_process_unbind_group); - phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { if (unlikely(domain->ops->iova_to_phys == NULL)) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 72ff6611b72e3..7a4ec56b51766 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -284,11 +284,6 @@ struct iommu_sva_param { * @domain_free: free iommu domain * @attach_dev: attach device to an iommu domain * @detach_dev: detach device from an iommu domain - * @process_alloc: allocate iommu process - * @process_free: free iommu process - * @process_attach: attach iommu process to a domain - * @process_detach: detach iommu process from a domain. Remove PASID entry and - * flush associated TLB entries. * @process_invalidate: Invalidate a range of mappings for a process. * @process_exit: A process is exiting. Stop using the PASID, remove PASID entry * and flush associated TLB entries. @@ -335,12 +330,6 @@ struct iommu_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); - struct iommu_process *(*process_alloc)(struct task_struct *task); - void (*process_free)(struct iommu_process *process); - int (*process_attach)(struct iommu_domain *domain, struct device *dev, - struct iommu_process *process, bool first); - void (*process_detach)(struct iommu_domain *domain, struct device *dev, - struct iommu_process *process, bool last); void (*process_invalidate)(struct iommu_domain *domain, struct iommu_process *process, unsigned long iova, size_t size); @@ -714,10 +703,6 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); -extern int iommu_process_bind_group(struct iommu_group *group, - struct task_struct *task, int *pasid, - int flags); -extern int iommu_process_unbind_group(struct iommu_group *group, int pasid);
extern int iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, int *pasid, unsigned long flags, void *drvdata); @@ -1074,19 +1059,6 @@ static inline int iommu_sva_unbind_device(struct device *dev, int pasid) return -ENODEV; }
-static inline int iommu_process_bind_group(struct iommu_group *group, - struct task_struct *task, int *pasid, - int flags) -{ - return -ENODEV; -} - -static inline int iommu_process_unbind_group(struct iommu_group *group, - int pasid) -{ - return -ENODEV; -} - #endif /* CONFIG_IOMMU_API */
#ifdef CONFIG_IOMMU_SVA @@ -1193,13 +1165,6 @@ extern void iommu_set_process_exit_handler(struct device *dev, extern struct iommu_process *iommu_process_find(int pasid); extern void iommu_process_put(struct iommu_process *process);
-extern int iommu_process_bind_device(struct device *dev, - struct task_struct *task, int *pasid, - int flags); -extern int iommu_process_unbind_device(struct device *dev, int pasid); -extern void __iommu_process_unbind_dev_all(struct iommu_domain *domain, - struct device *dev); - #else /* CONFIG_IOMMU_PROCESS */ static inline void iommu_set_process_exit_handler(struct device *dev, iommu_process_exit_handler_t cb, @@ -1215,24 +1180,6 @@ static inline struct iommu_process *iommu_process_find(int pasid) static inline void iommu_process_put(struct iommu_process *process) { } - -static inline int iommu_process_bind_device(struct device *dev, - struct task_struct *task, - int *pasid, int flags) -{ - return -ENODEV; -} - -static inline int iommu_process_unbind_device(struct device *dev, int pasid) -{ - return -ENODEV; -} - -static inline void __iommu_process_unbind_dev_all(struct iommu_domain *domain, - struct device *dev) -{ -} - #endif /* CONFIG_IOMMU_PROCESS */
extern int enable_iopf_hipri __read_mostly;
ascend inclusion category: feature bugzilla: 14369 CVE: NA
--------------
Drop the iommu/process support patchset.
Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/iommu.h | 13 ------------- 1 file changed, 13 deletions(-)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 7a4ec56b51766..2af66e8141558 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -122,15 +122,9 @@ struct iommu_process { int pasid; struct list_head domains; struct kref kref; -#ifdef CONFIG_MMU_NOTIFIER - struct mmu_notifier notifier; -#endif - struct mm_struct *mm;
/* Release callback for this process */ void (*release)(struct iommu_process *process); - /* For postponed release */ - struct rcu_head rcu; };
struct io_mm { @@ -284,9 +278,6 @@ struct iommu_sva_param { * @domain_free: free iommu domain * @attach_dev: attach device to an iommu domain * @detach_dev: detach device from an iommu domain - * @process_invalidate: Invalidate a range of mappings for a process. - * @process_exit: A process is exiting. Stop using the PASID, remove PASID entry - * and flush associated TLB entries. * @sva_device_init: initialize Shared Virtual Adressing for a device * @sva_device_shutdown: shutdown Shared Virtual Adressing for a device * @mm_alloc: allocate io_mm @@ -330,10 +321,6 @@ struct iommu_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); - void (*process_invalidate)(struct iommu_domain *domain, - struct iommu_process *process, - unsigned long iova, size_t size); - void (*process_exit)(struct iommu_domain *domain, struct iommu_process *process); int (*sva_device_init)(struct device *dev, struct iommu_sva_param *param); void (*sva_device_shutdown)(struct device *dev,
ascend inclusion category: feature bugzilla: 14369 CVE: NA
--------------
Drop the iommu/process support patchset.
Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/iommu/iommu-process.c | 87 ----------------------------------- include/linux/iommu.h | 12 ----- 2 files changed, 99 deletions(-)
diff --git a/drivers/iommu/iommu-process.c b/drivers/iommu/iommu-process.c index 626503b833549..66ee91c140940 100644 --- a/drivers/iommu/iommu-process.c +++ b/drivers/iommu/iommu-process.c @@ -36,93 +36,6 @@ struct iommu_context { refcount_t ref; };
-/* - * Because we're using an IDR, PASIDs are limited to 31 bits (the sign bit is - * used for returning errors). In practice implementations will use at most 20 - * bits, which is the PCI limit. - */ -static DEFINE_IDR(iommu_process_idr); - -/* - * For the moment this is an all-purpose lock. It serializes - * access/modifications to contexts (process-domain links), access/modifications - * to the PASID IDR, and changes to process refcount as well. - */ -static DEFINE_SPINLOCK(iommu_process_lock); - -static void iommu_process_release(struct kref *kref) -{ - struct iommu_process *process; - void (*release)(struct iommu_process *); - - assert_spin_locked(&iommu_process_lock); - - process = container_of(kref, struct iommu_process, kref); - release = process->release; - - WARN_ON(!list_empty(&process->domains)); - - idr_remove(&iommu_process_idr, process->pasid); - put_pid(process->pid); - release(process); -} - -/* - * Returns non-zero if a reference to the process was successfully taken. - * Returns zero if the process is being freed and should not be used. - */ -static int iommu_process_get_locked(struct iommu_process *process) -{ - assert_spin_locked(&iommu_process_lock); - - if (process) - return kref_get_unless_zero(&process->kref); - - return 0; -} - -static void iommu_process_put_locked(struct iommu_process *process) -{ - assert_spin_locked(&iommu_process_lock); - - kref_put(&process->kref, iommu_process_release); -} - -/** - * iommu_process_put - Put reference to process, freeing it if necessary. - */ -void iommu_process_put(struct iommu_process *process) -{ - spin_lock(&iommu_process_lock); - iommu_process_put_locked(process); - spin_unlock(&iommu_process_lock); -} -EXPORT_SYMBOL_GPL(iommu_process_put); - -/** - * iommu_process_find - Find process associated to the given PASID - * - * Returns the IOMMU process corresponding to this PASID, or NULL if not found. - * A reference to the iommu_process is kept, and must be released with - * iommu_process_put. - */ -struct iommu_process *iommu_process_find(int pasid) -{ - struct iommu_process *process; - - spin_lock(&iommu_process_lock); - process = idr_find(&iommu_process_idr, pasid); - if (process) { - if (!iommu_process_get_locked(process)) - /* kref is 0, process is defunct */ - process = NULL; - } - spin_unlock(&iommu_process_lock); - - return process; -} -EXPORT_SYMBOL_GPL(iommu_process_find); - /** * iommu_set_process_exit_handler() - set a callback for stopping the use of * PASID in a device. diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2af66e8141558..6542d22807140 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1149,24 +1149,12 @@ static inline void iommu_debugfs_setup(void) {} extern void iommu_set_process_exit_handler(struct device *dev, iommu_process_exit_handler_t cb, void *token); -extern struct iommu_process *iommu_process_find(int pasid); -extern void iommu_process_put(struct iommu_process *process); - #else /* CONFIG_IOMMU_PROCESS */ static inline void iommu_set_process_exit_handler(struct device *dev, iommu_process_exit_handler_t cb, void *token) { } - -static inline struct iommu_process *iommu_process_find(int pasid) -{ - return NULL; -} - -static inline void iommu_process_put(struct iommu_process *process) -{ -} #endif /* CONFIG_IOMMU_PROCESS */
extern int enable_iopf_hipri __read_mostly;
ascend inclusion category: feature bugzilla: 14369 CVE: NA
--------------
Drop the iommu/process support patchset.
Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/iommu/Kconfig | 11 ------ drivers/iommu/Makefile | 1 - drivers/iommu/iommu-process.c | 68 ----------------------------------- include/linux/iommu.h | 32 ----------------- 4 files changed, 112 deletions(-) delete mode 100644 drivers/iommu/iommu-process.c
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 80aeff486e0c5..4f4132d0fca40 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -112,17 +112,6 @@ config IOMMU_DMA select IOMMU_IOVA select NEED_SG_DMA_LENGTH
-config IOMMU_PROCESS - bool "Process management API for the IOMMU" - depends on MMU_NOTIFIER - select IOMMU_API - help - Enable process management for the IOMMU API. In systems that support - it, device drivers can bind processes to devices and share their page - tables using this API. - - If unsure, say N here. - config IOMMU_SVA bool select IOMMU_API diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 1533a9ff47770..a6f94cc89f924 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -3,7 +3,6 @@ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o -obj-$(CONFIG_IOMMU_PROCESS) += iommu-process.o obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o obj-$(CONFIG_IOMMU_PAGE_FAULT) += io-pgfault.o diff --git a/drivers/iommu/iommu-process.c b/drivers/iommu/iommu-process.c deleted file mode 100644 index 66ee91c140940..0000000000000 --- a/drivers/iommu/iommu-process.c +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Track processes bound to devices - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. - * - * Copyright (C) 2017 ARM Ltd. - * - * Author: Jean-Philippe Brucker jean-philippe.brucker@arm.com - */ - -#include <linux/idr.h> -#include <linux/iommu.h> -#include <linux/slab.h> -#include <linux/spinlock.h> - -/* Link between a domain and a process */ -struct iommu_context { - struct iommu_process *process; - struct iommu_domain *domain; - - struct list_head process_head; - struct list_head domain_head; - - /* Number of devices that use this context */ - refcount_t ref; -}; - -/** - * iommu_set_process_exit_handler() - set a callback for stopping the use of - * PASID in a device. - * @dev: the device - * @handler: exit handler - * @token: user data, will be passed back to the exit handler - * - * Users of the bind/unbind API should call this function to set a - * device-specific callback telling them when a process is exiting. - * - * After the callback returns, the device must not issue any more transaction - * with the PASIDs given as argument to the handler. It can be a single PASID - * value or the special IOMMU_PROCESS_EXIT_ALL. - * - * The handler itself should return 0 on success, and an appropriate error code - * otherwise. - */ -void iommu_set_process_exit_handler(struct device *dev, - iommu_process_exit_handler_t handler, - void *token) -{ - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - - if (WARN_ON(!domain)) - return; - - domain->process_exit = handler; - domain->process_exit_token = token; -} -EXPORT_SYMBOL_GPL(iommu_set_process_exit_handler); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 6542d22807140..c5cc3c9e393c4 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -63,11 +63,6 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *, typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault_event *, void *); typedef int (*iommu_mm_exit_handler_t)(struct device *dev, int pasid, void *);
-/* All process are being detached from this device */ -#define IOMMU_PROCESS_EXIT_ALL (-1) -typedef int (*iommu_process_exit_handler_t)(struct iommu_domain *, struct device *dev, - int pasid, void *); - #define IOMMU_SVA_FEAT_IOPF (1 << 0)
struct iommu_domain_geometry { @@ -106,27 +101,12 @@ struct iommu_domain { unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ iommu_fault_handler_t handler; void *handler_token; - iommu_process_exit_handler_t process_exit; - void *process_exit_token; struct iommu_domain_geometry geometry; void *iova_cookie;
- unsigned int min_pasid, max_pasid; - struct list_head processes; - struct list_head mm_list; };
-struct iommu_process { - struct pid *pid; - int pasid; - struct list_head domains; - struct kref kref; - - /* Release callback for this process */ - void (*release)(struct iommu_process *process); -}; - struct io_mm { int pasid; /* IOMMU_SVA_FEAT_* */ @@ -1145,18 +1125,6 @@ void iommu_debugfs_setup(void); static inline void iommu_debugfs_setup(void) {} #endif
-#ifdef CONFIG_IOMMU_PROCESS -extern void iommu_set_process_exit_handler(struct device *dev, - iommu_process_exit_handler_t cb, - void *token); -#else /* CONFIG_IOMMU_PROCESS */ -static inline void iommu_set_process_exit_handler(struct device *dev, - iommu_process_exit_handler_t cb, - void *token) -{ -} -#endif /* CONFIG_IOMMU_PROCESS */ - extern int enable_iopf_hipri __read_mostly;
#endif /* __LINUX_IOMMU_H */