hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8UU53
---------------------------
Reserve space for the structure in mm subsystem.
Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com --- include/linux/mempolicy.h | 3 +++ include/linux/mempool.h | 4 ++++ include/linux/memremap.h | 9 +++++++++ include/linux/mm.h | 8 ++++++++ include/linux/mm_types.h | 11 +++++++++++ include/linux/mmzone.h | 14 ++++++++++++++ include/linux/pagemap.h | 4 ++++ include/linux/shrinker.h | 7 +++++++ include/linux/swap.h | 3 +++ 9 files changed, 63 insertions(+)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index ca62e945e4f7..2e81ac87e6f6 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -52,6 +52,9 @@ struct mempolicy { nodemask_t cpuset_mems_allowed; /* relative to these nodes */ nodemask_t user_nodemask; /* nodemask passed by user */ } w; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 4aae6c06c5f2..178959b2ee5c 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -7,6 +7,7 @@
#include <linux/wait.h> #include <linux/compiler.h> +#include <linux/kabi.h>
struct kmem_cache;
@@ -23,6 +24,9 @@ typedef struct mempool_s { mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) } mempool_t;
static inline bool mempool_initialized(mempool_t *pool) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 1314d9c5f05b..816532f7ae92 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -25,6 +25,8 @@ struct vmem_altmap { unsigned long free; unsigned long align; unsigned long alloc; + KABI_RESERVE(1) + KABI_RESERVE(2) };
/* @@ -99,6 +101,8 @@ struct dev_pagemap_ops { */ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, unsigned long nr_pages, int mf_flags); + KABI_RESERVE(1) + KABI_RESERVE(2) };
#define PGMAP_ALTMAP_VALID (1 << 0) @@ -133,6 +137,11 @@ struct dev_pagemap { const struct dev_pagemap_ops *ops; void *owner; int nr_range; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) union { struct range range; DECLARE_FLEX_ARRAY(struct range, ranges); diff --git a/include/linux/mm.h b/include/linux/mm.h index f078aa6b493c..ffd323dda7cf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -30,6 +30,7 @@ #include <linux/kasan.h> #include <linux/memremap.h> #include <linux/slab.h> +#include <linux/kabi.h>
struct mempolicy; struct anon_vma; @@ -562,6 +563,9 @@ struct vm_fault { * page table to avoid allocation from * atomic context. */ + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* @@ -641,6 +645,10 @@ struct vm_operations_struct { */ struct page *(*find_special_page)(struct vm_area_struct *vma, unsigned long addr); + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
#ifdef CONFIG_NUMA_BALANCING diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0bc3c7c191a5..03a0a3e53986 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -22,6 +22,8 @@
#include <asm/mmu.h>
+#include <linux/kabi.h> + #ifndef AT_VECTOR_SIZE_ARCH #define AT_VECTOR_SIZE_ARCH 0 #endif @@ -677,6 +679,10 @@ struct vm_area_struct { #ifdef CONFIG_SHARE_POOL struct sp_area *spa; #endif + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) } __randomize_layout;
#ifdef CONFIG_SCHED_MM_CID @@ -939,6 +945,11 @@ struct mm_struct { #endif } __randomize_layout;
+ KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) /* * The mm_cpumask needs to be at the end of mm_struct, because it * is dynamically sized based on nr_cpu_ids. diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 57086c57b8e4..0821282fccd3 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -637,6 +637,10 @@ struct lruvec { #ifdef CONFIG_MEMCG struct pglist_data *pgdat; #endif + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
/* Isolate unmapped pages */ @@ -997,6 +1001,12 @@ struct zone { bool contiguous;
CACHELINE_PADDING(_pad3_); + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; @@ -1410,6 +1420,10 @@ typedef struct pglist_data {
CACHELINE_PADDING(_pad2_);
+ KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + /* Per-node vmstats */ struct per_cpu_nodestat __percpu *per_cpu_nodestats; atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 8c9608b217b0..d418f1c563ee 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -15,6 +15,7 @@ #include <linux/bitops.h> #include <linux/hardirq.h> /* for in_interrupt() */ #include <linux/hugetlb_inline.h> +#include <linux/kabi.h>
struct folio_batch;
@@ -1262,6 +1263,9 @@ struct readahead_control { struct file *file; struct address_space *mapping; struct file_ra_state *ra; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) /* private: use the readahead_* accessors instead */ pgoff_t _index; unsigned int _nr_pages; diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 224293b2dd06..335e7aa12fa5 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -4,6 +4,7 @@
#include <linux/atomic.h> #include <linux/types.h> +#include <linux/kabi.h>
/* * This struct is used to pass information from page reclaim to the shrinkers. @@ -34,6 +35,9 @@ struct shrink_control {
/* current memcg being shrunk (for memcg aware shrinkers) */ struct mem_cgroup *memcg; + + KABI_RESERVE(1) + KABI_RESERVE(2) };
#define SHRINK_STOP (~0UL) @@ -83,6 +87,9 @@ struct shrinker { #endif /* objs pending delete, per node */ atomic_long_t *nr_deferred; + + KABI_RESERVE(1) + KABI_RESERVE(2) }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
diff --git a/include/linux/swap.h b/include/linux/swap.h index 9dc160d6fd43..a0286656c231 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -334,6 +334,9 @@ struct swap_info_struct { */ struct work_struct discard_work; /* discard worker */ struct swap_cluster_list discard_clusters; /* discard clusters list */ + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) struct plist_node avail_lists[]; /* * entries in swap_avail_heads, one * entry per node.
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/3959 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/7...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/3959 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/7...