euleros inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7RO5Q CVE: NA
----------------------------------------------------
During the upgrade from Linux 5.10 to Linux 6.4, some structures are changed.
Therefore, need to adapt to these changes when installing the etmem patch to Linux 6.4.
Signed-off-by: liubo liubo254@huawei.com --- fs/proc/etmem_scan.c | 49 ++++++++++++++++++++++++++------------------ include/linux/swap.h | 1 + mm/internal.h | 1 - mm/vmscan.c | 49 +++++++++++++++++++------------------------- 4 files changed, 51 insertions(+), 49 deletions(-)
diff --git a/fs/proc/etmem_scan.c b/fs/proc/etmem_scan.c index 7c1af58bbf21..c3bac1bda1af 100644 --- a/fs/proc/etmem_scan.c +++ b/fs/proc/etmem_scan.c @@ -43,7 +43,7 @@ #endif
# define kvm_arch_mmu_pointer(vcpu) (vcpu->arch.mmu) -# define kvm_mmu_ad_disabled(mmu) (mmu->mmu_role.base.ad_disabled) +# define kvm_mmu_ad_disabled(mmu) (mmu->cpu_role.base.ad_disabled) #endif /*CONFIG_X86_64*/
#ifdef CONFIG_ARM64 @@ -314,7 +314,7 @@ static int vm_walk_host_range(unsigned long long start, unsigned long tmp_gpa_to_hva = pic->gpa_to_hva;
pic->gpa_to_hva = 0; - spin_unlock_irq(&pic->kvm->mmu_lock); + read_unlock(&pic->kvm->mmu_lock); down_read(&walk->mm->mmap_lock); local_irq_disable(); ret = walk_page_range(walk->mm, start + tmp_gpa_to_hva, end + tmp_gpa_to_hva, @@ -537,28 +537,28 @@ static int ept_page_range(struct page_idle_ctrl *pic,
WARN_ON(addr >= end);
- spin_lock_irq(&pic->kvm->mmu_lock); + read_lock(&pic->kvm->mmu_lock);
vcpu = kvm_get_vcpu(pic->kvm, 0); if (!vcpu) { pic->gpa_to_hva = 0; set_restart_gpa(TASK_SIZE, "NO-VCPU"); - spin_unlock_irq(&pic->kvm->mmu_lock); + read_unlock(&pic->kvm->mmu_lock); return -EINVAL; }
mmu = kvm_arch_mmu_pointer(vcpu); - if (!VALID_PAGE(mmu->root_hpa)) { + if (!VALID_PAGE(mmu->root.hpa)) { pic->gpa_to_hva = 0; set_restart_gpa(TASK_SIZE, "NO-HPA"); - spin_unlock_irq(&pic->kvm->mmu_lock); + read_unlock(&pic->kvm->mmu_lock); return -EINVAL; }
- ept_root = __va(mmu->root_hpa); + ept_root = __va(mmu->root.hpa);
/* Walk start at p4d when vm has 4 level table pages */ - if (mmu->shadow_root_level != 4) + if (mmu->root_role.level != 4) err = ept_pgd_range(pic, (pgd_t *)ept_root, addr, end, walk); else err = ept_p4d_range(pic, (p4d_t *)ept_root, addr, end, walk); @@ -567,7 +567,7 @@ static int ept_page_range(struct page_idle_ctrl *pic, * and RET_RESCAN_FLAG will be set in ret value */ if (!(err & RET_RESCAN_FLAG)) - spin_unlock_irq(&pic->kvm->mmu_lock); + read_unlock(&pic->kvm->mmu_lock); else err &= ~RET_RESCAN_FLAG;
@@ -584,23 +584,31 @@ static int ept_idle_supports_cpu(struct kvm *kvm) if (!vcpu) return -EINVAL;
- spin_lock(&kvm->mmu_lock); + read_lock(&kvm->mmu_lock); mmu = kvm_arch_mmu_pointer(vcpu); if (kvm_mmu_ad_disabled(mmu)) { pr_notice("CPU does not support EPT A/D bits tracking\n"); ret = -EINVAL; - } else if (mmu->shadow_root_level < 4 || - (mmu->shadow_root_level == 5 && !pgtable_l5_enabled())) { - pr_notice("Unsupported EPT level %d\n", mmu->shadow_root_level); + } else if (mmu->root_role.level < 4 || + (mmu->root_role.level == 5 && !pgtable_l5_enabled())) { + pr_notice("Unsupported EPT level %d\n", mmu->root_role.level); ret = -EINVAL; } else ret = 0; - spin_unlock(&kvm->mmu_lock); + read_unlock(&kvm->mmu_lock);
return ret; }
#else +static inline phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL); + phys_addr_t boundary = ALIGN_DOWN(addr + size, size); + + return (boundary - 1 < end - 1) ? boundary : end; +} + static int arm_pte_range(struct page_idle_ctrl *pic, pmd_t *pmd, unsigned long addr, unsigned long end) { @@ -724,13 +732,13 @@ static int arm_page_range(struct page_idle_ctrl *pic,
WARN_ON(addr >= end);
- spin_lock(&pic->kvm->mmu_lock); + read_lock(&pic->kvm->mmu_lock); pgd = (pgd_t *)kvm->arch.mmu.pgt->pgd + pgd_index(addr); - spin_unlock(&pic->kvm->mmu_lock); + read_unlock(&pic->kvm->mmu_lock);
local_irq_disable(); do { - next = stage2_pgd_addr_end(kvm, addr, end); + next = stage2_range_addr_end(addr, end); if (!pgd_present(*pgd)) { set_restart_gpa(next, "PGD_HOLE"); continue; @@ -773,11 +781,12 @@ static unsigned long vm_idle_find_gpa(struct page_idle_ctrl *pic, struct kvm_memory_slot *memslot; unsigned long hva_end; gfn_t gfn; + int bkt;
*addr_range = ~0UL; mutex_lock(&kvm->slots_lock); slots = kvm_memslots(pic->kvm); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { hva_end = memslot->userspace_addr + (memslot->npages << PAGE_SHIFT);
@@ -1045,9 +1054,9 @@ static int page_scan_release(struct inode *inode, struct file *file) goto out; } #ifdef CONFIG_X86_64 - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); kvm_flush_remote_tlbs(kvm); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); #endif
out: diff --git a/include/linux/swap.h b/include/linux/swap.h index 45cbd02c909f..34df8c8d7c68 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -752,6 +752,7 @@ static inline bool mem_cgroup_swap_full(struct folio *folio) return vm_swap_full(); } #endif +extern unsigned long reclaim_pages(struct list_head *folio_list);
#endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */ diff --git a/mm/internal.h b/mm/internal.h index 68410c6d97ac..ba568b48072c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -783,7 +783,6 @@ extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, unsigned long, unsigned long);
extern void set_pageblock_order(void); -unsigned long reclaim_pages(struct list_head *folio_list); unsigned int reclaim_clean_pages_from_list(struct zone *zone, struct list_head *folio_list); /* The ALLOC_WMARK bits are used as an index to zone->watermark */ diff --git a/mm/vmscan.c b/mm/vmscan.c index dda21e824349..bdb83c784a26 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -8148,8 +8148,7 @@ int add_page_for_swap(struct page *page, struct list_head *pagelist) return -EACCES;
head = compound_head(page); - err = isolate_lru_page(head); - if (err) { + if (!folio_isolate_lru(page_folio(head))) { put_page(page); return err; } @@ -8178,7 +8177,7 @@ struct page *get_page_from_vaddr(struct mm_struct *mm, unsigned long vaddr) return NULL; }
- follflags = FOLL_GET | FOLL_DUMP; + follflags = FOLL_GET | FOLL_DUMP | FOLL_FORCE; page = follow_page(vma, vaddr, follflags); if (IS_ERR(page) || !page) { up_read(&mm->mmap_lock); @@ -8197,37 +8196,30 @@ static int add_page_for_reclaim_swapcache(struct page *page,
/* If the page is mapped by more than one process, do not swap it */ if (page_mapcount(page) > 1) - return -EACCES; + return -EINVAL;
if (PageHuge(page)) - return -EACCES; + return -EINVAL;
head = compound_head(page); + if (!PageLRU(head) || PageUnevictable(head)) + return -EBUSY;
- switch (__isolate_lru_page_prepare(head, 0)) { - case 0: - if (unlikely(!get_page_unless_zero(page))) - return -1; + if (unlikely(!get_page_unless_zero(page))) + return -EBUSY;
- if (!TestClearPageLRU(page)) { - /* - * This page may in other isolation path, - * but we still hold lru_lock. - */ - put_page(page); - return -1; - } - - list_move(&head->lru, pagelist); - update_lru_size(lruvec, lru, page_zonenum(head), -thp_nr_pages(head)); - break; - - case -EBUSY: - return -1; - default: - break; + if (!TestClearPageLRU(page)) { + /* + * This page may in other isolation path, + * but we still hold lru_lock. + */ + put_page(page); + return -EBUSY; }
+ list_move(&head->lru, pagelist); + update_lru_size(lruvec, lru, page_zonenum(head), -thp_nr_pages(head)); + return 0; }
@@ -8238,6 +8230,7 @@ static unsigned long reclaim_swapcache_pages_from_list(int nid, .may_unmap = 1, .may_swap = 1, .may_writepage = 1, + .no_demotion = 1, .gfp_mask = GFP_KERNEL, }; unsigned long nr_reclaimed = 0; @@ -8269,7 +8262,7 @@ static unsigned long reclaim_swapcache_pages_from_list(int nid,
/* swap the pages */ if (pgdat) - nr_reclaimed = shrink_page_list(&swap_pages, + nr_reclaimed = shrink_folio_list(&swap_pages, pgdat, &sc, &stat, true); @@ -8408,7 +8401,7 @@ int do_swapcache_reclaim(unsigned long *swapcache_watermark, continue; }
- if (!PageLRU(pos) || page_lru(pos) != LRU_INACTIVE_ANON) { + if (!PageLRU(pos) || folio_lru_list(page_folio(pos)) != LRU_INACTIVE_ANON) { spin_unlock_irq(&lruvec->lru_lock); continue; }