From: Zheng Zucheng zhengzucheng@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9HSOM CVE: NA
--------------------------------
Allocate a new task_struct_resvd object for the recently cloned task and rework grid feature use task_struct grid_qos field
Signed-off-by: Zheng Zucheng zhengzucheng@huawei.com --- include/linux/sched.h | 20 +++++++++++++++----- include/linux/sched/grid_qos.h | 2 +- init/init_task.c | 5 +++++ kernel/fork.c | 21 ++++++++++++++++++++- kernel/sched/grid/qos.c | 20 ++++++++++---------- 5 files changed, 51 insertions(+), 17 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8fd8c5b7cdc6..2d59cba2fe64 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -620,6 +620,20 @@ typedef union { } fork_pid_t; #endif
+/* + * struct task_struct_resvd - KABI extension struct + */ +struct task_struct_resvd { + /* + * pointer back to the main task_struct + */ + struct task_struct *task; + +#ifdef CONFIG_QOS_SCHED_SMART_GRID + struct sched_grid_qos *grid_qos; +#endif +}; + struct task_struct { #ifdef CONFIG_THREAD_INFO_IN_TASK /* @@ -1279,11 +1293,7 @@ struct task_struct { #endif
#if !defined(__GENKSYMS__) -#if defined(CONFIG_QOS_SCHED_SMART_GRID) - struct sched_grid_qos *grid_qos; -#else - KABI_RESERVE(8) -#endif + struct task_struct_resvd *_resvd; #else KABI_RESERVE(8) #endif diff --git a/include/linux/sched/grid_qos.h b/include/linux/sched/grid_qos.h index 23d08dbb6ae6..93f663453e16 100644 --- a/include/linux/sched/grid_qos.h +++ b/include/linux/sched/grid_qos.h @@ -76,7 +76,7 @@ struct sched_grid_qos {
static inline int sched_qos_affinity_set(struct task_struct *p) { - return p->grid_qos->affinity_set(p); + return p->_resvd->grid_qos->affinity_set(p); }
int sched_grid_qos_fork(struct task_struct *p, struct task_struct *orig); diff --git a/init/init_task.c b/init/init_task.c index b312a045f4b9..db5b7461b9c3 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -50,6 +50,10 @@ static struct sighand_struct init_sighand = { .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh), };
+static struct task_struct_resvd init_task_struct_resvd = { + .task = &init_task, +}; + /* * Set up the first task table, touch at your own risk!. Base=0, * limit=0x1fffff (=2MB) @@ -188,6 +192,7 @@ struct task_struct init_task .fork_pid = 0, }, #endif + ._resvd = &init_task_struct_resvd, }; EXPORT_SYMBOL(init_task);
diff --git a/kernel/fork.c b/kernel/fork.c index bfc4534ff116..02b676d10054 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -161,6 +161,7 @@ static inline struct task_struct *alloc_task_struct_node(int node)
static inline void free_task_struct(struct task_struct *tsk) { + kfree(tsk->_resvd); kmem_cache_free(task_struct_cachep, tsk); } #endif @@ -845,6 +846,18 @@ void set_task_stack_end_magic(struct task_struct *tsk) *stackend = STACK_END_MAGIC; /* for overflow detection */ }
+static bool dup_resvd_task_struct(struct task_struct *dst, + struct task_struct *orig, int node) +{ + dst->_resvd = kzalloc_node(sizeof(struct task_struct_resvd), + GFP_KERNEL, node); + if (!dst->_resvd) + return false; + + dst->_resvd->task = dst; + return true; +} + static struct task_struct *dup_task_struct(struct task_struct *orig, int node) { struct task_struct *tsk; @@ -857,6 +870,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk = alloc_task_struct_node(node); if (!tsk) return NULL; + /* + * before proceeding, we need to make tsk->_resvd = NULL, + * otherwise the error paths below, if taken, might end up causing + * a double-free for task_struct_resvd extension object. + */ + WRITE_ONCE(tsk->_resvd, NULL);
stack = alloc_thread_stack_node(tsk, node); if (!stack) @@ -882,7 +901,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) atomic_set(&tsk->stack_refcount, 1); #endif
- if (err) + if (err || !dup_resvd_task_struct(tsk, orig, node)) goto free_stack;
#ifdef CONFIG_SECCOMP diff --git a/kernel/sched/grid/qos.c b/kernel/sched/grid/qos.c index f0f10dfb9fd4..b3df69d91499 100644 --- a/kernel/sched/grid/qos.c +++ b/kernel/sched/grid/qos.c @@ -26,7 +26,7 @@ static inline int qos_affinity_set(struct task_struct *p) { int n; - struct sched_grid_qos_affinity *affinity = &p->grid_qos->affinity; + struct sched_grid_qos_affinity *affinity = &p->_resvd->grid_qos->affinity;
if (likely(affinity->prefer_cpus == p->select_cpus)) return 0; @@ -58,18 +58,18 @@ int sched_grid_qos_fork(struct task_struct *p, struct task_struct *orig) qos_stat_init(&qos->stat);
nodes_clear(qos->affinity.mem_preferred_node_mask); - if (likely(orig->grid_qos)) - qos->affinity = orig->grid_qos->affinity; + if (likely(orig->_resvd->grid_qos)) + qos->affinity = orig->_resvd->grid_qos->affinity; qos->affinity_set = qos_affinity_set; - p->grid_qos = qos; + p->_resvd->grid_qos = qos;
return 0; }
void sched_grid_qos_free(struct task_struct *p) { - kfree(p->grid_qos); - p->grid_qos = NULL; + kfree(p->_resvd->grid_qos); + p->_resvd->grid_qos = NULL; }
/* dynamic select a more appropriate preferred interleave nid for process */ @@ -80,9 +80,9 @@ int sched_grid_preferred_interleave_nid(struct mempolicy *policy) struct task_struct *me = current; nodemask_t *preferred_nmask = NULL;
- if (likely(me->grid_qos)) + if (likely(me->_resvd->grid_qos)) preferred_nmask = - &me->grid_qos->affinity.mem_preferred_node_mask; + &me->_resvd->grid_qos->affinity.mem_preferred_node_mask;
if (!preferred_nmask || !policy) return NUMA_NO_NODE; @@ -111,9 +111,9 @@ int sched_grid_preferred_nid(int preferred_nid, nodemask_t *nodemask) nodemask_t nmask, ndmask; nodemask_t *preferred_nmask = NULL;
- if (likely(current->grid_qos)) + if (likely(current->_resvd->grid_qos)) preferred_nmask = - ¤t->grid_qos->affinity.mem_preferred_node_mask; + ¤t->_resvd->grid_qos->affinity.mem_preferred_node_mask;
if (!preferred_nmask) return preferred_nid;