From: Arun KS arunks@codeaurora.org
mainline inclusion from mainline-v5.1-rc1 commit a9cd410a3d296846a8125aa43d97a573a354c472 category: feature bugzilla: 182882 CVE: NA
-----------------------------------------------
When freeing pages are done with higher order, time spent on coalescing pages by buddy allocator can be reduced. With section size of 256MB, hot add latency of a single section shows improvement from 50-60 ms to less than 1 ms, hence improving the hot add latency by 60 times. Modify external providers of online callback to align with the change.
[arunks@codeaurora.org: v11] Link: http://lkml.kernel.org/r/1547792588-18032-1-git-send-email-arunks@codeaurora... [akpm@linux-foundation.org: remove unused local, per Arun] [akpm@linux-foundation.org: avoid return of void-returning __free_pages_core(), per Oscar] [akpm@linux-foundation.org: fix it for mm-convert-totalram_pages-and-totalhigh_pages-variables-to-atomic.patch] [arunks@codeaurora.org: v8] Link: http://lkml.kernel.org/r/1547032395-24582-1-git-send-email-arunks@codeaurora... [arunks@codeaurora.org: v9] Link: http://lkml.kernel.org/r/1547098543-26452-1-git-send-email-arunks@codeaurora... Link: http://lkml.kernel.org/r/1538727006-5727-1-git-send-email-arunks@codeaurora.... Signed-off-by: Arun KS arunks@codeaurora.org Reviewed-by: Andrew Morton akpm@linux-foundation.org Acked-by: Michal Hocko mhocko@suse.com Reviewed-by: Oscar Salvador osalvador@suse.de Reviewed-by: Alexander Duyck alexander.h.duyck@linux.intel.com Cc: K. Y. Srinivasan kys@microsoft.com Cc: Haiyang Zhang haiyangz@microsoft.com Cc: Stephen Hemminger sthemmin@microsoft.com Cc: Boris Ostrovsky boris.ostrovsky@oracle.com Cc: Juergen Gross jgross@suse.com Cc: Dan Williams dan.j.williams@intel.com Cc: Vlastimil Babka vbabka@suse.cz Cc: Joonsoo Kim iamjoonsoo.kim@lge.com Cc: Greg Kroah-Hartman gregkh@linuxfoundation.org Cc: Mathieu Malaterre malat@debian.org Cc: "Kirill A. Shutemov" kirill.shutemov@linux.intel.com Cc: Souptick Joarder jrdr.linux@gmail.com Cc: Mel Gorman mgorman@techsingularity.net Cc: Aaron Lu aaron.lu@intel.com Cc: Srivatsa Vaddagiri vatsa@codeaurora.org Cc: Vinayak Menon vinmenon@codeaurora.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Conflicts: mm/page_alloc.c mm/memory_hotplug.c [Peng Liu: adjust context] Signed-off-by: Peng Liu liupeng256@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/hv/hv_balloon.c | 7 ++++--- drivers/xen/balloon.c | 15 ++++++++++----- include/linux/memory_hotplug.h | 2 +- mm/internal.h | 1 + mm/memory_hotplug.c | 33 +++++++++++++++++++++------------ mm/page_alloc.c | 8 ++++---- 6 files changed, 41 insertions(+), 25 deletions(-)
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index e5fc719a34e70..23d9156915271 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -771,7 +771,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, } }
-static void hv_online_page(struct page *pg) +static void hv_online_page(struct page *pg, unsigned int order) { struct hv_hotadd_state *has; unsigned long flags; @@ -780,10 +780,11 @@ static void hv_online_page(struct page *pg) spin_lock_irqsave(&dm_device.ha_lock, flags); list_for_each_entry(has, &dm_device.ha_region_list, list) { /* The page belongs to a different HAS. */ - if ((pfn < has->start_pfn) || (pfn >= has->end_pfn)) + if ((pfn < has->start_pfn) || + (pfn + (1UL << order) > has->end_pfn)) continue;
- hv_page_online_one(has, pg); + hv_bring_pgs_online(has, pfn, 1UL << order); break; } spin_unlock_irqrestore(&dm_device.ha_lock, flags); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index b23edf64c2b21..630fd49a7ce84 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -369,14 +369,19 @@ static enum bp_state reserve_additional_memory(void) return BP_ECANCELED; }
-static void xen_online_page(struct page *page) +static void xen_online_page(struct page *page, unsigned int order) { - __online_page_set_limits(page); + unsigned long i, size = (1 << order); + unsigned long start_pfn = page_to_pfn(page); + struct page *p;
+ pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); mutex_lock(&balloon_mutex); - - __balloon_append(page); - + for (i = 0; i < size; i++) { + p = pfn_to_page(start_pfn + i); + __online_page_set_limits(p); + __balloon_append(p); + } mutex_unlock(&balloon_mutex); }
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 5653178768227..8782f0e993704 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -89,7 +89,7 @@ extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, unsigned long *valid_start, unsigned long *valid_end); extern void __offline_isolated_pages(unsigned long, unsigned long);
-typedef void (*online_page_callback_t)(struct page *page); +typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
extern int set_online_page_callback(online_page_callback_t callback); extern int restore_online_page_callback(online_page_callback_t callback); diff --git a/mm/internal.h b/mm/internal.h index 3bb7ca86e84eb..415a6a326bb4d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -163,6 +163,7 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, extern int __isolate_free_page(struct page *page, unsigned int order); extern void __free_pages_bootmem(struct page *page, unsigned long pfn, unsigned int order); +extern void __free_pages_core(struct page *page, unsigned int order); extern void prep_compound_page(struct page *page, unsigned int order); extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 967adfc17dd26..64a3ec9365911 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -48,7 +48,7 @@ * and restore_online_page_callback() for generic callback restore. */
-static void generic_online_page(struct page *page); +static void generic_online_page(struct page *page, unsigned int order);
static online_page_callback_t online_page_callback = generic_online_page; static DEFINE_MUTEX(online_page_callback_lock); @@ -580,26 +580,35 @@ void __online_page_free(struct page *page) } EXPORT_SYMBOL_GPL(__online_page_free);
-static void generic_online_page(struct page *page) +static void generic_online_page(struct page *page, unsigned int order) { - __online_page_set_limits(page); - __online_page_increment_counters(page); - __online_page_free(page); + __free_pages_core(page, order); + adjust_managed_page_count(page, 1UL << order); +} + +static int online_pages_blocks(unsigned long start, unsigned long nr_pages) +{ + unsigned long end = start + nr_pages; + int order, onlined_pages = 0; + + while (start < end) { + order = min(MAX_ORDER - 1, + get_order(PFN_PHYS(end) - PFN_PHYS(start))); + (*online_page_callback)(pfn_to_page(start), order); + + onlined_pages += (1UL << order); + start += (1UL << order); + } + return onlined_pages; }
static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, void *arg) { - unsigned long i; unsigned long onlined_pages = *(unsigned long *)arg; - struct page *page;
if (PageReserved(pfn_to_page(start_pfn))) - for (i = 0; i < nr_pages; i++) { - page = pfn_to_page(start_pfn + i); - (*online_page_callback)(page); - onlined_pages++; - } + onlined_pages += online_pages_blocks(start_pfn, nr_pages);
online_mem_sections(start_pfn, start_pfn + nr_pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 12e0899f3b9c8..e0002e5c3d06d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1288,7 +1288,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) local_irq_restore(flags); }
-static void __init __free_pages_boot_core(struct page *page, unsigned int order) +void __free_pages_core(struct page *page, unsigned int order) { unsigned int nr_pages = 1 << order; struct page *p = page; @@ -1368,7 +1368,7 @@ void __init __free_pages_bootmem(struct page *page, unsigned long pfn, return;
page_zone(page)->managed_pages += 1UL << order; - return __free_pages_boot_core(page, order); + return __free_pages_core(page, order); }
/* @@ -1459,14 +1459,14 @@ static void __init deferred_free_range(unsigned long pfn, if (nr_pages == pageblock_nr_pages && (pfn & (pageblock_nr_pages - 1)) == 0) { set_pageblock_migratetype(page, MIGRATE_MOVABLE); - __free_pages_boot_core(page, pageblock_order); + __free_pages_core(page, pageblock_order); return; }
for (i = 0; i < nr_pages; i++, page++, pfn++) { if ((pfn & (pageblock_nr_pages - 1)) == 0) set_pageblock_migratetype(page, MIGRATE_MOVABLE); - __free_pages_boot_core(page, 0); + __free_pages_core(page, 0); } }
From: Alexander Duyck alexander.h.duyck@linux.intel.com
mainline inclusion from mainline-v5.7-rc1 commit 624f58d8f4639676d2fa1238425ab0148d501c4a category: feature bugzilla: 182882 CVE: NA
-----------------------------------------------
There are cases where we would benefit from avoiding having to go through the allocation and free cycle to return an isolated page.
Examples for this might include page poisoning in which we isolate a page and then put it back in the free list without ever having actually allocated it.
This will enable us to also avoid notifiers for the future free page reporting which will need to avoid retriggering page reporting when returning pages that have been reported on.
Signed-off-by: Alexander Duyck alexander.h.duyck@linux.intel.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Acked-by: David Hildenbrand david@redhat.com Acked-by: Mel Gorman mgorman@techsingularity.net Cc: Andrea Arcangeli aarcange@redhat.com Cc: Dan Williams dan.j.williams@intel.com Cc: Dave Hansen dave.hansen@intel.com Cc: Konrad Rzeszutek Wilk konrad.wilk@oracle.com Cc: Luiz Capitulino lcapitulino@redhat.com Cc: Matthew Wilcox willy@infradead.org Cc: Michael S. Tsirkin mst@redhat.com Cc: Michal Hocko mhocko@kernel.org Cc: Nitesh Narayan Lal nitesh@redhat.com Cc: Oscar Salvador osalvador@suse.de Cc: Pankaj Gupta pagupta@redhat.com Cc: Paolo Bonzini pbonzini@redhat.com Cc: Rik van Riel riel@surriel.com Cc: Vlastimil Babka vbabka@suse.cz Cc: Wei Wang wei.w.wang@intel.com Cc: Yang Zhang yang.zhang.wz@gmail.com Cc: wei qi weiqi4@huawei.com Link: http://lkml.kernel.org/r/20200211224624.29318.89287.stgit@localhost.localdom... Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Conflicts: mm/internal.h [Peng Liu: cherry-pick from 624f58d8f4639676d2fa1238425ab0148d501c4a] Signed-off-by: Peng Liu liupeng256@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/internal.h | 2 ++ mm/page_alloc.c | 19 +++++++++++++++++++ mm/page_isolation.c | 6 ++---- 3 files changed, 23 insertions(+), 4 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h index 415a6a326bb4d..5a142fa64908d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -163,6 +163,8 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, extern int __isolate_free_page(struct page *page, unsigned int order); extern void __free_pages_bootmem(struct page *page, unsigned long pfn, unsigned int order); +extern void __putback_isolated_page(struct page *page, unsigned int order, + int mt); extern void __free_pages_core(struct page *page, unsigned int order); extern void prep_compound_page(struct page *page, unsigned int order); extern void post_alloc_hook(struct page *page, unsigned int order, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e0002e5c3d06d..de362e070e461 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2993,6 +2993,25 @@ int __isolate_free_page(struct page *page, unsigned int order) return 1UL << order; }
+/** + * __putback_isolated_page - Return a now-isolated page back where we got it + * @page: Page that was isolated + * @order: Order of the isolated page + * + * This function is meant to return a page pulled from the free lists via + * __isolate_free_page back to the free lists they were pulled from. + */ +void __putback_isolated_page(struct page *page, unsigned int order, int mt) +{ + struct zone *zone = page_zone(page); + + /* zone lock should be held when this function is called */ + lockdep_assert_held(&zone->lock); + + /* Return isolated page to tail of freelist. */ + __free_one_page(page, page_to_pfn(page), zone, order, mt); +} + /* * Update NUMA hit/miss statistics * diff --git a/mm/page_isolation.c b/mm/page_isolation.c index cdc33cd2203d1..88b635279e032 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -134,13 +134,11 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) __mod_zone_freepage_state(zone, nr_pages, migratetype); } set_pageblock_migratetype(page, migratetype); + if (isolated_page) + __putback_isolated_page(page, order, migratetype); zone->nr_isolate_pageblock--; out: spin_unlock_irqrestore(&zone->lock, flags); - if (isolated_page) { - post_alloc_hook(page, order, __GFP_MOVABLE); - __free_pages(page, order); - } }
static inline struct page *
From: David Hildenbrand david@redhat.com
mainline inclusion from mainline-v5.10-rc1 commit f04a5d5d913fa81b5a3456512c3991aea7227912 category: feature bugzilla: 182882 CVE: NA
-----------------------------------------------
Patch series "mm: place pages to the freelist tail when onlining and undoing isolation", v2.
When adding separate memory blocks via add_memory*() and onlining them immediately, the metadata (especially the memmap) of the next block will be placed onto one of the just added+onlined block. This creates a chain of unmovable allocations: If the last memory block cannot get offlined+removed() so will all dependent ones. We directly have unmovable allocations all over the place.
This can be observed quite easily using virtio-mem, however, it can also be observed when using DIMMs. The freshly onlined pages will usually be placed to the head of the freelists, meaning they will be allocated next, turning the just-added memory usually immediately un-removable. The fresh pages are cold, prefering to allocate others (that might be hot) also feels to be the natural thing to do.
It also applies to the hyper-v balloon xen-balloon, and ppc64 dlpar: when adding separate, successive memory blocks, each memory block will have unmovable allocations on them - for example gigantic pages will fail to allocate.
While the ZONE_NORMAL doesn't provide any guarantees that memory can get offlined+removed again (any kind of fragmentation with unmovable allocations is possible), there are many scenarios (hotplugging a lot of memory, running workload, hotunplug some memory/as much as possible) where we can offline+remove quite a lot with this patchset.
a) To visualize the problem, a very simple example:
Start a VM with 4GB and 8GB of virtio-mem memory:
[root@localhost ~]# lsmem RANGE SIZE STATE REMOVABLE BLOCK 0x0000000000000000-0x00000000bfffffff 3G online yes 0-23 0x0000000100000000-0x000000033fffffff 9G online yes 32-103
Memory block size: 128M Total online memory: 12G Total offline memory: 0B
Then try to unplug as much as possible using virtio-mem. Observe which memory blocks are still around. Without this patch set:
[root@localhost ~]# lsmem RANGE SIZE STATE REMOVABLE BLOCK 0x0000000000000000-0x00000000bfffffff 3G online yes 0-23 0x0000000100000000-0x000000013fffffff 1G online yes 32-39 0x0000000148000000-0x000000014fffffff 128M online yes 41 0x0000000158000000-0x000000015fffffff 128M online yes 43 0x0000000168000000-0x000000016fffffff 128M online yes 45 0x0000000178000000-0x000000017fffffff 128M online yes 47 0x0000000188000000-0x0000000197ffffff 256M online yes 49-50 0x00000001a0000000-0x00000001a7ffffff 128M online yes 52 0x00000001b0000000-0x00000001b7ffffff 128M online yes 54 0x00000001c0000000-0x00000001c7ffffff 128M online yes 56 0x00000001d0000000-0x00000001d7ffffff 128M online yes 58 0x00000001e0000000-0x00000001e7ffffff 128M online yes 60 0x00000001f0000000-0x00000001f7ffffff 128M online yes 62 0x0000000200000000-0x0000000207ffffff 128M online yes 64 0x0000000210000000-0x0000000217ffffff 128M online yes 66 0x0000000220000000-0x0000000227ffffff 128M online yes 68 0x0000000230000000-0x0000000237ffffff 128M online yes 70 0x0000000240000000-0x0000000247ffffff 128M online yes 72 0x0000000250000000-0x0000000257ffffff 128M online yes 74 0x0000000260000000-0x0000000267ffffff 128M online yes 76 0x0000000270000000-0x0000000277ffffff 128M online yes 78 0x0000000280000000-0x0000000287ffffff 128M online yes 80 0x0000000290000000-0x0000000297ffffff 128M online yes 82 0x00000002a0000000-0x00000002a7ffffff 128M online yes 84 0x00000002b0000000-0x00000002b7ffffff 128M online yes 86 0x00000002c0000000-0x00000002c7ffffff 128M online yes 88 0x00000002d0000000-0x00000002d7ffffff 128M online yes 90 0x00000002e0000000-0x00000002e7ffffff 128M online yes 92 0x00000002f0000000-0x00000002f7ffffff 128M online yes 94 0x0000000300000000-0x0000000307ffffff 128M online yes 96 0x0000000310000000-0x0000000317ffffff 128M online yes 98 0x0000000320000000-0x0000000327ffffff 128M online yes 100 0x0000000330000000-0x000000033fffffff 256M online yes 102-103
Memory block size: 128M Total online memory: 8.1G Total offline memory: 0B
With this patch set:
[root@localhost ~]# lsmem RANGE SIZE STATE REMOVABLE BLOCK 0x0000000000000000-0x00000000bfffffff 3G online yes 0-23 0x0000000100000000-0x000000013fffffff 1G online yes 32-39
Memory block size: 128M Total online memory: 4G Total offline memory: 0B
All memory can get unplugged, all memory block can get removed. Of course, no workload ran and the system was basically idle, but it highlights the issue - the fairly deterministic chain of unmovable allocations. When a huge page for the 2MB memmap is needed, a just-onlined 4MB page will be split. The remaining 2MB page will be used for the memmap of the next memory block. So one memory block will hold the memmap of the two following memory blocks. Finally the pages of the last-onlined memory block will get used for the next bigger allocations - if any allocation is unmovable, all dependent memory blocks cannot get unplugged and removed until that allocation is gone.
Note that with bigger memory blocks (e.g., 256MB), *all* memory blocks are dependent and none can get unplugged again!
b) Experiment with memory intensive workload
I performed an experiment with an older version of this patch set (before we used undo_isolate_page_range() in online_pages(): Hotplug 56GB to a VM with an initial 4GB, onlining all memory to ZONE_NORMAL right from the kernel when adding it. I then run various memory intensive workloads that consume most system memory for a total of 45 minutes. Once finished, I try to unplug as much memory as possible.
With this change, I am able to remove via virtio-mem (adding individual 128MB memory blocks) 413 out of 448 added memory blocks. Via individual (256MB) DIMMs 380 out of 448 added memory blocks. (I don't have any numbers without this patchset, but looking at the above example, it's at most half of the 448 memory blocks for virtio-mem, and most probably none for DIMMs).
Again, there are workloads that might behave very differently due to the nature of ZONE_NORMAL.
This change also affects (besides memory onlining): - Other users of undo_isolate_page_range(): Pages are always placed to the tail. -- When memory offlining fails -- When memory isolation fails after having isolated some pageblocks -- When alloc_contig_range() either succeeds or fails - Other users of __putback_isolated_page(): Pages are always placed to the tail. -- Free page reporting - Other users of __free_pages_core() -- AFAIKs, any memory that is getting exposed to the buddy during boot. IIUC we will now usually allocate memory from lower addresses within a zone first (especially during boot). - Other users of generic_online_page() -- Hyper-V balloon
This patch (of 5):
Let's prepare for additional flags and avoid long parameter lists of bools. Follow-up patches will also make use of the flags in __free_pages_ok().
Signed-off-by: David Hildenbrand david@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Alexander Duyck alexander.h.duyck@linux.intel.com Reviewed-by: Vlastimil Babka vbabka@suse.cz Reviewed-by: Oscar Salvador osalvador@suse.de Reviewed-by: Wei Yang richard.weiyang@linux.alibaba.com Reviewed-by: Pankaj Gupta pankaj.gupta.linux@gmail.com Acked-by: Michal Hocko mhocko@suse.com Cc: Mel Gorman mgorman@techsingularity.net Cc: Dave Hansen dave.hansen@intel.com Cc: Mike Rapoport rppt@kernel.org Cc: Matthew Wilcox willy@infradead.org Cc: Haiyang Zhang haiyangz@microsoft.com Cc: "K. Y. Srinivasan" kys@microsoft.com Cc: Michael Ellerman mpe@ellerman.id.au Cc: Michal Hocko mhocko@suse.com Cc: Scott Cheloha cheloha@linux.ibm.com Cc: Stephen Hemminger sthemmin@microsoft.com Cc: Wei Liu wei.liu@kernel.org Cc: Michal Hocko mhocko@kernel.org Link: https://lkml.kernel.org/r/20201005121534.15649-1-david@redhat.com Link: https://lkml.kernel.org/r/20201005121534.15649-2-david@redhat.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Conflicts: mm/page_alloc.c [Peng Liu: cherry-pick from f04a5d5d913fa81b5a3456512c3991aea7227912] Signed-off-by: Peng Liu liupeng256@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/page_alloc.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index de362e070e461..4a15b87b7f89e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -74,6 +74,22 @@ #include <asm/div64.h> #include "internal.h"
+/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ +typedef int __bitwise fpi_t; + +/* No special request */ +#define FPI_NONE ((__force fpi_t)0) + +/* + * Skip free page reporting notification for the (possibly merged) page. + * This does not hinder free page reporting from grabbing the page, + * reporting it and marking it "reported" - it only skips notifying + * the free page reporting infrastructure about a newly freed page. For + * example, used when temporarily pulling a page from a freelist and + * putting it back unmodified. + */ +#define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) + /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_FRACTION (8) @@ -802,7 +818,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, static inline void __free_one_page(struct page *page, unsigned long pfn, struct zone *zone, unsigned int order, - int migratetype) + int migratetype, fpi_t fpi_flags) { unsigned long combined_pfn; unsigned long uninitialized_var(buddy_pfn); @@ -1184,7 +1200,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, if (unlikely(isolated_pageblocks)) mt = get_pageblock_migratetype(page);
- __free_one_page(page, page_to_pfn(page), zone, 0, mt); + __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); trace_mm_page_pcpu_drain(page, 0, mt); } spin_unlock(&zone->lock); @@ -1200,7 +1216,7 @@ static void free_one_page(struct zone *zone, is_migrate_isolate(migratetype))) { migratetype = get_pfnblock_migratetype(page, pfn); } - __free_one_page(page, pfn, zone, order, migratetype); + __free_one_page(page, pfn, zone, order, migratetype, FPI_NONE); spin_unlock(&zone->lock); }
@@ -3009,7 +3025,8 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt) lockdep_assert_held(&zone->lock);
/* Return isolated page to tail of freelist. */ - __free_one_page(page, page_to_pfn(page), zone, order, mt); + __free_one_page(page, page_to_pfn(page), zone, order, mt, + FPI_SKIP_REPORT_NOTIFY); }
/*
From: David Hildenbrand david@redhat.com
mainline inclusion from mainline-v5.10-rc1 commit 47b6a24a23825ae7b33ff11396980da7c353843d category: feature bugzilla: 182882 CVE: NA
-----------------------------------------------
__putback_isolated_page() already documents that pages will be placed to the tail of the freelist - this is, however, not the case for "order >= MAX_ORDER - 2" (see buddy_merge_likely()) - which should be the case for all existing users.
This change affects two users: - free page reporting - page isolation, when undoing the isolation (including memory onlining).
This behavior is desirable for pages that haven't really been touched lately, so exactly the two users that don't actually read/write page content, but rather move untouched pages.
The new behavior is especially desirable for memory onlining, where we allow allocation of newly onlined pages via undo_isolate_page_range() in online_pages(). Right now, we always place them to the head of the freelist, resulting in undesireable behavior: Assume we add individual memory chunks via add_memory() and online them right away to the NORMAL zone. We create a dependency chain of unmovable allocations e.g., via the memmap. The memmap of the next chunk will be placed onto previous chunks - if the last block cannot get offlined+removed, all dependent ones cannot get offlined+removed. While this can already be observed with individual DIMMs, it's more of an issue for virtio-mem (and I suspect also ppc DLPAR).
Document that this should only be used for optimizations, and no code should rely on this behavior for correction (if the order of the freelists ever changes).
We won't care about page shuffling: memory onlining already properly shuffles after onlining. free page reporting doesn't care about physically contiguous ranges, and there are already cases where page isolation will simply move (physically close) free pages to (currently) the head of the freelists via move_freepages_block() instead of shuffling. If this becomes ever relevant, we should shuffle the whole zone when undoing isolation of larger ranges, and after free_contig_range().
Signed-off-by: David Hildenbrand david@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Alexander Duyck alexander.h.duyck@linux.intel.com Reviewed-by: Oscar Salvador osalvador@suse.de Reviewed-by: Wei Yang richard.weiyang@linux.alibaba.com Reviewed-by: Pankaj Gupta pankaj.gupta.linux@gmail.com Acked-by: Michal Hocko mhocko@suse.com Cc: Mel Gorman mgorman@techsingularity.net Cc: Dave Hansen dave.hansen@intel.com Cc: Vlastimil Babka vbabka@suse.cz Cc: Mike Rapoport rppt@kernel.org Cc: Scott Cheloha cheloha@linux.ibm.com Cc: Michael Ellerman mpe@ellerman.id.au Cc: Haiyang Zhang haiyangz@microsoft.com Cc: "K. Y. Srinivasan" kys@microsoft.com Cc: Matthew Wilcox willy@infradead.org Cc: Michal Hocko mhocko@kernel.org Cc: Stephen Hemminger sthemmin@microsoft.com Cc: Wei Liu wei.liu@kernel.org Link: https://lkml.kernel.org/r/20201005121534.15649-3-david@redhat.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Conflicts: mm/page_alloc.c [Peng Liu: cherry-pick from 47b6a24a23825ae7b33ff11396980da7c353843d] Signed-off-by: Peng Liu liupeng256@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/page_alloc.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4a15b87b7f89e..a4806709e441f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -90,6 +90,18 @@ typedef int __bitwise fpi_t; */ #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
+/* + * Place the (possibly merged) page to the tail of the freelist. Will ignore + * page shuffling (relevant code - e.g., memory onlining - is expected to + * shuffle the whole zone). + * + * Note: No code should rely on this flag for correctness - it's purely + * to allow for optimizations when handing back either fresh pages + * (memory onlining) or untouched pages (page isolation, free page + * reporting). + */ +#define FPI_TO_TAIL ((__force fpi_t)BIT(1)) + /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_FRACTION (8) @@ -889,7 +901,11 @@ static inline void __free_one_page(struct page *page,
done_merging: set_page_order(page, order); - + if (fpi_flags & FPI_TO_TAIL) { + list_add_tail(&page->lru, + &zone->free_area[order].free_list[migratetype]); + goto out; + } /* * If this is not the largest possible page, check if the buddy * of the next-highest order is free. If it is, it's possible @@ -3026,7 +3042,7 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
/* Return isolated page to tail of freelist. */ __free_one_page(page, page_to_pfn(page), zone, order, mt, - FPI_SKIP_REPORT_NOTIFY); + FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); }
/*
From: David Hildenbrand david@redhat.com
mainline inclusion from mainline-v5.10-rc1 commit 293ffa5ebb9c08a77d8de458166c31b4d7b0cd65 category: feature bugzilla: 182882 CVE: NA
-----------------------------------------------
Whenever we move pages between freelists via move_to_free_list()/ move_freepages_block(), we don't actually touch the pages: 1. Page isolation doesn't actually touch the pages, it simply isolates pageblocks and moves all free pages to the MIGRATE_ISOLATE freelist. When undoing isolation, we move the pages back to the target list. 2. Page stealing (steal_suitable_fallback()) moves free pages directly between lists without touching them. 3. reserve_highatomic_pageblock()/unreserve_highatomic_pageblock() moves free pages directly between freelists without touching them.
We already place pages to the tail of the freelists when undoing isolation via __putback_isolated_page(), let's do it in any case (e.g., if order <= pageblock_order) and document the behavior. To simplify, let's move the pages to the tail for all move_to_free_list()/move_freepages_block() users.
In 2., the target list is empty, so there should be no change. In 3., we might observe a change, however, highatomic is more concerned about allocations succeeding than cache hotness - if we ever realize this change degrades a workload, we can special-case this instance and add a proper comment.
This change results in all pages getting onlined via online_pages() to be placed to the tail of the freelist.
Signed-off-by: David Hildenbrand david@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Oscar Salvador osalvador@suse.de Reviewed-by: Wei Yang richard.weiyang@linux.alibaba.com Acked-by: Pankaj Gupta pankaj.gupta.linux@gmail.com Acked-by: Michal Hocko mhocko@suse.com Cc: Alexander Duyck alexander.h.duyck@linux.intel.com Cc: Mel Gorman mgorman@techsingularity.net Cc: Dave Hansen dave.hansen@intel.com Cc: Vlastimil Babka vbabka@suse.cz Cc: Mike Rapoport rppt@kernel.org Cc: Scott Cheloha cheloha@linux.ibm.com Cc: Michael Ellerman mpe@ellerman.id.au Cc: Haiyang Zhang haiyangz@microsoft.com Cc: "K. Y. Srinivasan" kys@microsoft.com Cc: Matthew Wilcox willy@infradead.org Cc: Michal Hocko mhocko@suse.com Cc: Stephen Hemminger sthemmin@microsoft.com Cc: Wei Liu wei.liu@kernel.org Link: https://lkml.kernel.org/r/20201005121534.15649-4-david@redhat.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Conflicts: mm/page_alloc.c [Peng Liu: cherry-pick from 293ffa5ebb9c08a77d8de458166c31b4d7b0cd65] Signed-off-by: Peng Liu liupeng256@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/page_alloc.c | 12 +++++++++--- mm/page_isolation.c | 5 +++++ 2 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a4806709e441f..9baa829f6a29c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2122,7 +2122,7 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone, #endif
/* - * Move the free pages in a range to the free lists of the requested type. + * Move the free pages in a range to the freelist tail of the requested type. * Note that start_page and end_pages are not aligned on a pageblock * boundary. If alignment is required, use move_freepages_block() */ @@ -2174,8 +2174,14 @@ static int move_freepages(struct zone *zone, }
order = page_order(page); - list_move(&page->lru, - &zone->free_area[order].free_list[migratetype]); + /* + * Used for pages which are on another list. Move the pages to + * the tail of the list - so the moved pages won't immediately + * be considered for allocation again (e.g., optimization for + * memory onlining). + */ + list_move_tail(&page->lru, + &zone->free_area[order].free_list[migratetype]); page += 1 << order; pages_moved += 1 << order; } diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 88b635279e032..4639ef78fb151 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -128,6 +128,11 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) * If we isolate freepage with more than pageblock_order, there * should be no freepage in the range, so we could avoid costly * pageblock scanning for freepage moving. + * + * We didn't actually touch any of the isolated pages, so place them + * to the tail of the freelist. This is an optimization for memory + * onlining - just onlined memory won't immediately be considered for + * allocation. */ if (!isolated_page) { nr_pages = move_freepages_block(zone, page, migratetype, NULL);
From: David Hildenbrand david@redhat.com
mainline inclusion from mainline-v5.10-rc1 commit 7fef431be9c9ac255838a9578331567b9dba4477 category: feature bugzilla: 182882 CVE: NA
__free_pages_core() is used when exposing fresh memory to the buddy during system boot and when onlining memory in generic_online_page().
generic_online_page() is used in two cases:
1. Direct memory onlining in online_pages(). 2. Deferred memory onlining in memory-ballooning-like mechanisms (HyperV balloon and virtio-mem), when parts of a section are kept fake-offline to be fake-onlined later on.
In 1, we already place pages to the tail of the freelist. Pages will be freed to MIGRATE_ISOLATE lists first and moved to the tail of the freelists via undo_isolate_page_range().
In 2, we currently don't implement a proper rule. In case of virtio-mem, where we currently always online MAX_ORDER - 1 pages, the pages will be placed to the HEAD of the freelist - undesireable. While the hyper-v balloon calls generic_online_page() with single pages, usually it will call it on successive single pages in a larger block.
The pages are fresh, so place them to the tail of the freelist and avoid the PCP. In __free_pages_core(), remove the now superflouos call to set_page_refcounted() and add a comment regarding page initialization and the refcount.
Note: In 2. we currently don't shuffle. If ever relevant (page shuffling is usually of limited use in virtualized environments), we might want to shuffle after a sequence of generic_online_page() calls in the relevant callers.
Signed-off-by: David Hildenbrand david@redhat.com Signed-off-by: Andrew Morton akpm@linux-foundation.org Reviewed-by: Vlastimil Babka vbabka@suse.cz Reviewed-by: Oscar Salvador osalvador@suse.de Reviewed-by: Wei Yang richard.weiyang@linux.alibaba.com Acked-by: Pankaj Gupta pankaj.gupta.linux@gmail.com Acked-by: Michal Hocko mhocko@suse.com Cc: Alexander Duyck alexander.h.duyck@linux.intel.com Cc: Mel Gorman mgorman@techsingularity.net Cc: Dave Hansen dave.hansen@intel.com Cc: Mike Rapoport rppt@kernel.org Cc: "K. Y. Srinivasan" kys@microsoft.com Cc: Haiyang Zhang haiyangz@microsoft.com Cc: Stephen Hemminger sthemmin@microsoft.com Cc: Wei Liu wei.liu@kernel.org Cc: Matthew Wilcox willy@infradead.org Cc: Michael Ellerman mpe@ellerman.id.au Cc: Michal Hocko mhocko@kernel.org Cc: Scott Cheloha cheloha@linux.ibm.com Link: https://lkml.kernel.org/r/20201005121534.15649-5-david@redhat.com Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Conflicts: mm/page_alloc.c [Peng Liu: adjust context] Signed-off-by: Peng Liu liupeng256@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/page_alloc.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9baa829f6a29c..a914fc3b589be 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -221,7 +221,8 @@ bool pm_suspended_storage(void) unsigned int pageblock_order __read_mostly; #endif
-static void __free_pages_ok(struct page *page, unsigned int order); +static void __free_pages_ok(struct page *page, unsigned int order, + fpi_t fpi_flags);
/* * results with 256, 32 in the lowmem_reserve sysctl: @@ -622,7 +623,7 @@ static void bad_page(struct page *page, const char *reason, void free_compound_page(struct page *page) { mem_cgroup_uncharge(page); - __free_pages_ok(page, compound_order(page)); + __free_pages_ok(page, compound_order(page), FPI_NONE); }
void prep_compound_page(struct page *page, unsigned int order) @@ -1225,14 +1226,14 @@ static void free_pcppages_bulk(struct zone *zone, int count, static void free_one_page(struct zone *zone, struct page *page, unsigned long pfn, unsigned int order, - int migratetype) + int migratetype, fpi_t fpi_flags) { spin_lock(&zone->lock); if (unlikely(has_isolate_pageblock(zone) || is_migrate_isolate(migratetype))) { migratetype = get_pfnblock_migratetype(page, pfn); } - __free_one_page(page, pfn, zone, order, migratetype, FPI_NONE); + __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); spin_unlock(&zone->lock); }
@@ -1304,7 +1305,8 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) } }
-static void __free_pages_ok(struct page *page, unsigned int order) +static void __free_pages_ok(struct page *page, unsigned int order, + fpi_t fpi_flags) { unsigned long flags; int migratetype; @@ -1316,7 +1318,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) migratetype = get_pfnblock_migratetype(page, pfn); local_irq_save(flags); __count_vm_events(PGFREE, 1 << order); - free_one_page(page_zone(page), page, pfn, order, migratetype); + free_one_page(page_zone(page), page, pfn, order, migratetype, + fpi_flags); local_irq_restore(flags); }
@@ -1326,6 +1329,11 @@ void __free_pages_core(struct page *page, unsigned int order) struct page *p = page; unsigned int loop;
+ /* + * When initializing the memmap, __init_single_page() sets the refcount + * of all pages to 1 ("allocated"/"not free"). We have to set the + * refcount of all involved pages to 0. + */ prefetchw(p); for (loop = 0; loop < (nr_pages - 1); loop++, p++) { prefetchw(p + 1); @@ -1335,8 +1343,11 @@ void __free_pages_core(struct page *page, unsigned int order) __ClearPageReserved(p); set_page_count(p, 0);
- set_page_refcounted(page); - __free_pages(page, order); + /* + * Bypass PCP and place fresh pages right to the tail, primarily + * relevant for memory onlining. + */ + __free_pages_ok(page, order, FPI_TO_TAIL); }
#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ @@ -2892,7 +2903,8 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) */ if (migratetype >= MIGRATE_PCPTYPES) { if (unlikely(is_migrate_isolate(migratetype))) { - free_one_page(zone, page, pfn, 0, migratetype); + free_one_page(zone, page, pfn, 0, migratetype, + FPI_NONE); return; } migratetype = MIGRATE_MOVABLE; @@ -4645,7 +4657,7 @@ static inline void free_the_page(struct page *page, unsigned int order) if (order == 0) /* Via pcp? */ free_unref_page(page); else - __free_pages_ok(page, order); + __free_pages_ok(page, order, FPI_NONE); }
void __free_pages(struct page *page, unsigned int order)