v4: delete the kabi reserve for enums in include/linux/bpf.h
v3: change kabi reserve for struct mem_cgroup_threshold_ary
v2: add some kabi and fix the code conflict in include/linux/memcontrol.h
Lu Jialin (2): memcg/kabi: reserve space for memcg related structures cgroup_bpf/kabi: reserve space for cgroup_bpf related structures
include/linux/bpf-cgroup-defs.h | 10 ++++++ include/linux/memcontrol.h | 54 +++++++++++++++++++++++++++++++++ mm/memcontrol.c | 16 ++++++++++ 3 files changed, 80 insertions(+)
From: Lu Jialin lujialin4@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TP4Z
--------------------------------
We reserve some fields beforehand for memcg related structures prone to change, therefore, we can hot add/change features of memcg with this enhancement.
After reserving, normally cache does not matter as the reserved fields are not accessed at all.
Signed-off-by: Lu Jialin lujialin4@huawei.com Signed-off-by: Xiang Yang xiangyang3@huawei.com --- include/linux/memcontrol.h | 54 ++++++++++++++++++++++++++++++++++++++ mm/memcontrol.c | 16 +++++++++++ 2 files changed, 70 insertions(+)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 649fbb5c1adc..0283fde53965 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -21,6 +21,7 @@ #include <linux/vmstat.h> #include <linux/writeback.h> #include <linux/page-flags.h> +#include <linux/kabi.h>
struct mem_cgroup; struct obj_cgroup; @@ -72,6 +73,9 @@ struct mem_cgroup_reclaim_cookie { struct mem_cgroup_id { int id; refcount_t ref; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* @@ -93,6 +97,9 @@ struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; /* scan generation, increased every round-trip */ unsigned int generation; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* @@ -104,6 +111,9 @@ struct shrinker_info { atomic_long_t *nr_deferred; unsigned long *map; int map_nr_max; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
struct lruvec_stats_percpu { @@ -123,6 +133,15 @@ struct lruvec_stats {
/* Pending child counts during tree propagation */ long state_pending[NR_VM_NODE_STAT_ITEMS]; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
/* @@ -146,11 +165,19 @@ struct mem_cgroup_per_node { bool on_tree; struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* For threshold */ @@ -159,6 +186,9 @@ struct mem_cgroup_threshold_ary { int current_threshold; /* Size of entries[] */ unsigned int size; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) /* Array of thresholds */ struct mem_cgroup_threshold entries[]; }; @@ -172,6 +202,15 @@ struct mem_cgroup_thresholds { * It must be able to store at least primary->size - 1 entries. */ struct mem_cgroup_threshold_ary *spare; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
/* @@ -189,6 +228,9 @@ struct memcg_cgwb_frn { int memcg_id; /* memcg->css.id of foreign inode */ u64 at; /* jiffies_64 at the time of dirtying */ struct wb_completion done; /* tracks in-flight foreign writebacks */ + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* @@ -205,6 +247,10 @@ struct obj_cgroup { struct list_head list; /* protected by objcg_lock */ struct rcu_head rcu; }; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct swap_device { @@ -369,6 +415,14 @@ struct mem_cgroup { struct dynamic_pool *dpool; #endif
+ KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) struct mem_cgroup_per_node *nodeinfo[]; };
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 346be8292294..0bfe95d9b9ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -764,6 +764,14 @@ struct memcg_vmstats_percpu { /* Cgroup1: threshold notifications & softlimit tree updates */ unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
struct memcg_vmstats { @@ -778,6 +786,14 @@ struct memcg_vmstats { /* Pending child counts during tree propagation */ long state_pending[MEMCG_NR_STAT]; unsigned long events_pending[NR_MEMCG_EVENTS]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
From: Lu Jialin lujialin4@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TP4Z
--------------------------------
We reserve some fields beforehand for cgroup_bpf_attach_type bpf_cgroup_storage_type prone and struct cgroup_bpf to change, therefore, we can hot add/change features of bpf cgroup with this enhancement.
After reserving, normally cache does not matter as the reserved fields are not accessed at all.
Signed-off-by: Lu Jialin lujialin4@huawei.com Signed-off-by: Xiang Yang xiangyang3@huawei.com --- include/linux/bpf-cgroup-defs.h | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h index 7b121bd780eb..fb6adb1c3889 100644 --- a/include/linux/bpf-cgroup-defs.h +++ b/include/linux/bpf-cgroup-defs.h @@ -7,6 +7,7 @@ #include <linux/list.h> #include <linux/percpu-refcount.h> #include <linux/workqueue.h> +#include <linux/kabi.h>
struct bpf_prog_array;
@@ -70,6 +71,15 @@ struct cgroup_bpf {
/* cgroup_bpf is released using a work queue */ struct work_struct release_work; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
#else /* CONFIG_CGROUP_BPF */