From: Guan Jing guanjing6@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4KAP1?from=project-issue CVE: NA
-------- We reserve some fields beforehand for sched structures prone to change, therefore, we can hot add/change features of sched with this enhancement. After reserving, normally cache does not matter as the reserved fields are not accessed at all.
Signed-off-by: Guan Jing guanjing6@huawei.com Reviewed-by: Chen Hui judy.chenhui@huawei.com Reviewed-by: Cheng Jian cj.chengjian@huawei.com Reviewed-by: Cheng Jian cj.chengjian@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- include/linux/delayacct.h | 4 ++++ include/linux/sched.h | 30 ++++++++++++++++++++++++++++++ include/linux/sched/signal.h | 6 ++++++ include/linux/sched/topology.h | 4 ++++ include/linux/sched/user.h | 4 ++++ kernel/sched/sched.h | 18 ++++++++++++++++++ 6 files changed, 66 insertions(+)
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 2d3bdcccf5eb..6c45a14c3eb2 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -8,6 +8,7 @@ #define _LINUX_DELAYACCT_H
#include <uapi/linux/taskstats.h> +#include <linux/kabi.h>
/* * Per-task flags relevant to delay accounting @@ -53,6 +54,9 @@ struct task_delay_info {
u32 freepages_count; /* total count of memory reclaim */ u32 thrashing_count; /* total count of thrash waits */ + + KABI_RESERVE(1) + KABI_RESERVE(2) }; #endif
diff --git a/include/linux/sched.h b/include/linux/sched.h index d0312bdc00a0..3bc2f15df252 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -35,6 +35,7 @@ #include <linux/seqlock.h> #include <linux/kcsan.h> #include <linux/thread_bits.h> +#include <linux/kabi.h>
/* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; @@ -307,6 +308,10 @@ struct sched_info { /* When were we last queued to run? */ unsigned long long last_queued;
+ KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) #endif /* CONFIG_SCHED_INFO */ };
@@ -455,6 +460,11 @@ struct sched_statistics { u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) #endif };
@@ -494,6 +504,11 @@ struct sched_entity { */ struct sched_avg avg; #endif + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
struct sched_rt_entity { @@ -512,6 +527,9 @@ struct sched_rt_entity { /* rq "owned" by this entity/group: */ struct rt_rq *my_q; #endif + + KABI_RESERVE(1) + KABI_RESERVE(2) } __randomize_layout;
struct sched_dl_entity { @@ -589,6 +607,9 @@ struct sched_dl_entity { */ struct sched_dl_entity *pi_se; #endif + + KABI_RESERVE(1) + KABI_RESERVE(2) };
#ifdef CONFIG_UCLAMP_TASK @@ -1369,6 +1390,15 @@ struct task_struct { */ randomized_struct_fields_end
+ KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) + KABI_RESERVE(5) + KABI_RESERVE(6) + KABI_RESERVE(7) + KABI_RESERVE(8) + /* CPU-specific state of this task: */ struct thread_struct thread;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 657640015b33..940be421d263 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -12,6 +12,7 @@ #include <linux/posix-timers.h> #include <linux/mm_types.h> #include <asm/ptrace.h> +#include <linux/kabi.h>
/* * Types defining task->signal and task->sighand and APIs using them: @@ -235,6 +236,11 @@ struct signal_struct { * and may have inconsistent * permissions. */ + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) } __randomize_layout;
/* diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index a80826bfef44..bff39305271d 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -5,6 +5,7 @@ #include <linux/topology.h>
#include <linux/sched/idle.h> +#include <linux/kabi.h>
/* * sched-domains (multiprocessor balancing) declarations: @@ -152,6 +153,9 @@ struct sched_domain { }; struct sched_domain_shared *shared;
+ KABI_RESERVE(1) + KABI_RESERVE(2) + unsigned int span_weight; /* * Span of all CPUs in this domain. diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index a8ec3b6093fc..98cc770a9d2c 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -6,6 +6,7 @@ #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/ratelimit.h> +#include <linux/kabi.h>
/* * Some day this will be a full-fledged user tracking system.. @@ -42,6 +43,9 @@ struct user_struct {
/* Miscellaneous per-user rate limit */ struct ratelimit_state ratelimit; + + KABI_RESERVE(1) + KABI_RESERVE(2) };
extern int uids_sysfs_init(void); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ca7572c052f8..451440e36318 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -384,6 +384,8 @@ struct cfs_bandwidth { int nr_periods; int nr_throttled; u64 throttled_time; + + KABI_RESERVE(1) #endif };
@@ -618,6 +620,9 @@ struct cfs_rq { struct list_head throttled_list; #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ + + KABI_RESERVE(1) + KABI_RESERVE(2) };
static inline int rt_bandwidth_enabled(void) @@ -843,6 +848,11 @@ struct root_domain { * CPUs of the rd. Protected by RCU. */ struct perf_domain __rcu *pd; + + KABI_RESERVE(1) + KABI_RESERVE(2) + KABI_RESERVE(3) + KABI_RESERVE(4) };
extern void init_defrootdomain(void); @@ -1078,6 +1088,9 @@ struct rq { /* Must be inspected within a rcu lock section */ struct cpuidle_state *idle_state; #endif + + KABI_RESERVE(1) + KABI_RESERVE(2) };
#ifdef CONFIG_FAIR_GROUP_SCHED @@ -1519,6 +1532,8 @@ struct sched_group { struct sched_group_capacity *sgc; int asym_prefer_cpu; /* CPU of highest priority in group */
+ KABI_RESERVE(1) + KABI_RESERVE(2) /* * The CPUs this group covers. * @@ -1870,6 +1885,9 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_change_group)(struct task_struct *p, int type); #endif + + KABI_RESERVE(1) + KABI_RESERVE(2) } __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)