hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7U78A CVE: NA
--------------------------------
fix kabi change for mm_struct->tlb_flush_batched and task_struct->tlb_ubc.
Signed-off-by: Jinjiang Tu tujinjiang@huawei.com --- include/linux/mm_types.h | 4 +++- include/linux/mm_types_task.h | 25 ++++++++++++++++++++++++- include/linux/sched.h | 3 +++ mm/rmap.c | 14 +++++++++++--- 4 files changed, 41 insertions(+), 5 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e3eaf458787a..d1be389c0468 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -598,7 +598,7 @@ struct mm_struct { * moving a PROT_NONE or PROT_NUMA mapped page. */ atomic_t tlb_flush_pending; -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH +#if defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && !defined(CONFIG_ARM64) /* See flush_tlb_batched_pending() */ bool tlb_flush_batched; #endif @@ -620,6 +620,8 @@ struct mm_struct {
#if defined(CONFIG_X86_64) KABI_USE(1, struct mm_struct_extend *mm_extend) +#elif defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && defined(CONFIG_ARM64) + KABI_USE(1, bool tlb_flush_batched) #else KABI_RESERVE(1) #endif diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index 78bbded3b13f..e293d7037bfa 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -74,7 +74,7 @@ struct page_frag {
/* Track pages that require TLB flushes */ struct tlbflush_unmap_batch { -#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH +#if defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && !defined(CONFIG_ARM64) /* * The arch code makes the following promise: generic code can modify a * PTE, then call arch_tlbbatch_add_pending() (which internally provides @@ -96,4 +96,27 @@ struct tlbflush_unmap_batch { #endif };
+#if defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && defined(CONFIG_ARM64) +struct tlbflush_unmap_batch_arm64 { + /* + * The arch code makes the following promise: generic code can modify a + * PTE, then call arch_tlbbatch_add_pending() (which internally provides + * all needed barriers), then call arch_tlbbatch_flush(), and the entries + * will be flushed on all CPUs by the time that arch_tlbbatch_flush() + * returns. + */ + struct arch_tlbflush_unmap_batch arch; + + /* True if a flush is needed. */ + bool flush_required; + + /* + * If true then the PTE was dirty when unmapped. The entry must be + * flushed before IO is initiated or a stale TLB entry potentially + * allows an update without redirtying the page. + */ + bool writable; +}; +#endif + #endif /* _LINUX_MM_TYPES_TASK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index fdd3324cc858..5e413d309e77 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -685,6 +685,9 @@ struct task_struct_resvd { #ifdef CONFIG_MMU struct timer_list oom_reaper_timer; #endif +#if defined(CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH) && defined(CONFIG_ARM64) + struct tlbflush_unmap_batch_arm64 tlb_ubc; +#endif };
struct task_struct { diff --git a/mm/rmap.c b/mm/rmap.c index 816db3edc116..150803a7ffb5 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -596,6 +596,14 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) }
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + +#ifdef CONFIG_ARM64 +#define DEFINE_TLB_UBC(name) struct tlbflush_unmap_batch_arm64 *name = \ + ¤t->_resvd->tlb_ubc +#else +#define DEFINE_TLB_UBC(name) struct tlbflush_unmap_batch *name = ¤t->tlb_ubc +#endif + /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed @@ -604,7 +612,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) */ void try_to_unmap_flush(void) { - struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; + DEFINE_TLB_UBC(tlb_ubc);
if (!tlb_ubc->flush_required) return; @@ -617,7 +625,7 @@ void try_to_unmap_flush(void) /* Flush iff there are potentially writable TLB entries that can race with IO */ void try_to_unmap_flush_dirty(void) { - struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; + DEFINE_TLB_UBC(tlb_ubc);
if (tlb_ubc->writable) try_to_unmap_flush(); @@ -626,7 +634,7 @@ void try_to_unmap_flush_dirty(void) static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable, unsigned long uaddr) { - struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; + DEFINE_TLB_UBC(tlb_ubc);
arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); tlb_ubc->flush_required = true;