From: Dennis Zhou dennis@kernel.org
mainline inclusion from mainline-v5.14-rc1 commit 1c29a3ceaf5f02919e0a89119a70382581453dbb category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4BE79 CVE: NA
------------------------------------------------- This prepares for adding a to_depopulate list and sidelined list after the free slot in the set of lists in pcpu_slot.
Signed-off-by: Dennis Zhou dennis@kernel.org Acked-by: Roman Gushchin guro@fb.com Signed-off-by: Dennis Zhou dennis@kernel.org (cherry picked from commit 1c29a3ceaf5f02919e0a89119a70382581453dbb) Signed-off-by: Yuanzheng Song songyuanzheng@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- mm/percpu.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/mm/percpu.c b/mm/percpu.c index dd4e31934f3e..e43e1e418603 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -134,6 +134,7 @@ static int pcpu_unit_size __ro_after_init; static int pcpu_nr_units __ro_after_init; static int pcpu_atom_size __ro_after_init; int pcpu_nr_slots __ro_after_init; +int pcpu_free_slot __ro_after_init; static size_t pcpu_chunk_struct_size __ro_after_init;
/* cpus with the lowest and highest unit addresses */ @@ -236,7 +237,7 @@ static int __pcpu_size_to_slot(int size) static int pcpu_size_to_slot(int size) { if (size == pcpu_unit_size) - return pcpu_nr_slots - 1; + return pcpu_free_slot; return __pcpu_size_to_slot(size); }
@@ -1805,7 +1806,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, goto fail; }
- if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { + if (list_empty(&pcpu_slot[pcpu_free_slot])) { chunk = pcpu_create_chunk(type, pcpu_gfp); if (!chunk) { err = "failed to allocate new chunk"; @@ -1957,7 +1958,7 @@ static void pcpu_balance_free(enum pcpu_chunk_type type) { LIST_HEAD(to_free); struct list_head *pcpu_slot = pcpu_chunk_list(type); - struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; + struct list_head *free_head = &pcpu_slot[pcpu_free_slot]; struct pcpu_chunk *chunk, *next;
/* @@ -2032,7 +2033,7 @@ static void pcpu_balance_populated(enum pcpu_chunk_type type) 0, PCPU_EMPTY_POP_PAGES_HIGH); }
- for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { + for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) { unsigned int nr_unpop = 0, rs, re;
if (!nr_to_pop) @@ -2139,7 +2140,7 @@ void free_percpu(void __percpu *ptr) if (chunk->free_bytes == pcpu_unit_size) { struct pcpu_chunk *pos;
- list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) + list_for_each_entry(pos, &pcpu_slot[pcpu_free_slot], list) if (pos != chunk) { need_balance = true; break; @@ -2561,7 +2562,8 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, * Allocate chunk slots. The additional last slot is for * empty chunks. */ - pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; + pcpu_free_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1; + pcpu_nr_slots = pcpu_free_slot + 1; pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) * PCPU_NR_CHUNK_TYPES,