v3: change kabi reserve for struct mem_cgroup_threshold_ary
v2: add some kabi and fix the code conflict in include/linux/memcontrol.h
Lu Jialin (2): memcg/kabi: reserve space for memcg related structures cgroup_bpf/kabi: reserve space for cgroup_bpf related structures
include/linux/bpf-cgroup-defs.h | 20 ++++++++++++ include/linux/bpf.h | 11 +++++++ include/linux/memcontrol.h | 54 +++++++++++++++++++++++++++++++++ mm/memcontrol.c | 16 ++++++++++ 4 files changed, 101 insertions(+)
-- 2.34.1
From: Lu Jialin lujialin4@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TP4Z
--------------------------------
We reserve some fields beforehand for memcg related structures prone to change, therefore, we can hot add/change features of memcg with this enhancement.
After reserving, normally cache does not matter as the reserved fields are not accessed at all.
Signed-off-by: Lu Jialin lujialin4@huawei.com Signed-off-by: Xiang Yang xiangyang3@huawei.com --- include/linux/memcontrol.h | 54 ++++++++++++++++++++++++++++++++++++++ mm/memcontrol.c | 16 +++++++++++ 2 files changed, 70 insertions(+)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 649fbb5c1adc..0283fde53965 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -21,6 +21,7 @@ #include <linux/vmstat.h> #include <linux/writeback.h> #include <linux/page-flags.h> +#include <linux/kabi.h>
struct mem_cgroup; struct obj_cgroup; @@ -72,6 +73,9 @@ struct mem_cgroup_reclaim_cookie { struct mem_cgroup_id { int id; refcount_t ref; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* @@ -93,6 +97,9 @@ struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; /* scan generation, increased every round-trip */ unsigned int generation; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* @@ -104,6 +111,9 @@ struct shrinker_info { atomic_long_t *nr_deferred; unsigned long *map; int map_nr_max; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
struct lruvec_stats_percpu { @@ -123,6 +133,15 @@ struct lruvec_stats {
/* Pending child counts during tree propagation */ long state_pending[NR_VM_NODE_STAT_ITEMS]; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
/* @@ -146,11 +165,19 @@ struct mem_cgroup_per_node { bool on_tree; struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* For threshold */ @@ -159,6 +186,9 @@ struct mem_cgroup_threshold_ary { int current_threshold; /* Size of entries[] */ unsigned int size; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) /* Array of thresholds */ struct mem_cgroup_threshold entries[]; }; @@ -172,6 +202,15 @@ struct mem_cgroup_thresholds { * It must be able to store at least primary->size - 1 entries. */ struct mem_cgroup_threshold_ary *spare; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
/* @@ -189,6 +228,9 @@ struct memcg_cgwb_frn { int memcg_id; /* memcg->css.id of foreign inode */ u64 at; /* jiffies_64 at the time of dirtying */ struct wb_completion done; /* tracks in-flight foreign writebacks */ + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) };
/* @@ -205,6 +247,10 @@ struct obj_cgroup { struct list_head list; /* protected by objcg_lock */ struct rcu_head rcu; }; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct swap_device { @@ -369,6 +415,14 @@ struct mem_cgroup { struct dynamic_pool *dpool; #endif
+ KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) struct mem_cgroup_per_node *nodeinfo[]; };
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 346be8292294..0bfe95d9b9ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -764,6 +764,14 @@ struct memcg_vmstats_percpu { /* Cgroup1: threshold notifications & softlimit tree updates */ unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
struct memcg_vmstats { @@ -778,6 +786,14 @@ struct memcg_vmstats { /* Pending child counts during tree propagation */ long state_pending[MEMCG_NR_STAT]; unsigned long events_pending[NR_MEMCG_EVENTS]; + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
From: Lu Jialin lujialin4@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8TP4Z
--------------------------------
We reserve some fields beforehand for cgroup_bpf_attach_type bpf_cgroup_storage_type prone and struct cgroup_bpf to change, therefore, we can hot add/change features of bpf cgroup with this enhancement.
After reserving, normally cache does not matter as the reserved fields are not accessed at all.
Signed-off-by: Lu Jialin lujialin4@huawei.com Signed-off-by: Xiang Yang xiangyang3@huawei.com --- include/linux/bpf-cgroup-defs.h | 20 ++++++++++++++++++++ include/linux/bpf.h | 11 +++++++++++ 2 files changed, 31 insertions(+)
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h index 7b121bd780eb..ed04fe751dd0 100644 --- a/include/linux/bpf-cgroup-defs.h +++ b/include/linux/bpf-cgroup-defs.h @@ -7,6 +7,7 @@ #include <linux/list.h> #include <linux/percpu-refcount.h> #include <linux/workqueue.h> +#include <linux/kabi.h>
struct bpf_prog_array;
@@ -44,6 +45,16 @@ enum cgroup_bpf_attach_type { CGROUP_INET_SOCK_RELEASE, CGROUP_LSM_START, CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1, +#ifdef CONFIG_KABI_RESERVE + CGROUP_ATTACH_TYPE_KABI_RESERVE_1, + CGROUP_ATTACH_TYPE_KABI_RESERVE_2, + CGROUP_ATTACH_TYPE_KABI_RESERVE_3, + CGROUP_ATTACH_TYPE_KABI_RESERVE_4, + CGROUP_ATTACH_TYPE_KABI_RESERVE_5, + CGROUP_ATTACH_TYPE_KABI_RESERVE_6, + CGROUP_ATTACH_TYPE_KABI_RESERVE_7, + CGROUP_ATTACH_TYPE_KABI_RESERVE_8, +#endif MAX_CGROUP_BPF_ATTACH_TYPE };
@@ -70,6 +81,15 @@ struct cgroup_bpf {
/* cgroup_bpf is released using a work queue */ struct work_struct release_work; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) };
#else /* CONFIG_CGROUP_BPF */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 392f581af2ce..5ac22a961534 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -29,6 +29,7 @@ #include <linux/rcupdate_trace.h> #include <linux/static_call.h> #include <linux/memcontrol.h> +#include <linux/kabi.h>
struct bpf_verifier_env; struct bpf_verifier_log; @@ -974,6 +975,16 @@ struct bpf_prog_offload { enum bpf_cgroup_storage_type { BPF_CGROUP_STORAGE_SHARED, BPF_CGROUP_STORAGE_PERCPU, +#ifdef CONFIG_KABI_RESERVE + BPF_CGROUP_STORAGE_KABI_RESERVE_1, + BPF_CGROUP_STORAGE_KABI_RESERVE_2, + BPF_CGROUP_STORAGE_KABI_RESERVE_3, + BPF_CGROUP_STORAGE_KABI_RESERVE_4, + BPF_CGROUP_STORAGE_KABI_RESERVE_5, + BPF_CGROUP_STORAGE_KABI_RESERVE_6, + BPF_CGROUP_STORAGE_KABI_RESERVE_7, + BPF_CGROUP_STORAGE_KABI_RESERVE_8, +#endif __BPF_CGROUP_STORAGE_MAX };