Offering: HULK hulk inclusion category: feature bugzilla: xxx
--------------------------------
Allocate a new task_struct_resvd object for the recently cloned task and rework grid feature use task_struct grid_qos field
Signed-off-by: Zheng Zucheng zhengzucheng@huawei.com --- include/linux/sched.h | 22 +++++++++++++++++----- include/linux/sched/grid_qos.h | 2 +- init/init_task.c | 10 ++++++++++ kernel/fork.c | 32 ++++++++++++++++++++++++++++++++ kernel/sched/grid/qos.c | 20 ++++++++++---------- 5 files changed, 70 insertions(+), 16 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8fd8c5b7cdc6..978fbd03a779 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -620,6 +620,22 @@ typedef union { } fork_pid_t; #endif
+#if !defined(__GENKSYMS__) +/* + * struct task_struct_resvd - KABI extension struct + */ +struct task_struct_resvd { + /* + * pointer back to the main task_struct + */ + struct task_struct *task; + +#ifdef CONFIG_QOS_SCHED_SMART_GRID + struct sched_grid_qos *grid_qos; +#endif +}; +#endif + struct task_struct { #ifdef CONFIG_THREAD_INFO_IN_TASK /* @@ -1279,11 +1295,7 @@ struct task_struct { #endif
#if !defined(__GENKSYMS__) -#if defined(CONFIG_QOS_SCHED_SMART_GRID) - struct sched_grid_qos *grid_qos; -#else - KABI_RESERVE(8) -#endif + struct task_struct_resvd *_resvd; #else KABI_RESERVE(8) #endif diff --git a/include/linux/sched/grid_qos.h b/include/linux/sched/grid_qos.h index 23d08dbb6ae6..93f663453e16 100644 --- a/include/linux/sched/grid_qos.h +++ b/include/linux/sched/grid_qos.h @@ -76,7 +76,7 @@ struct sched_grid_qos {
static inline int sched_qos_affinity_set(struct task_struct *p) { - return p->grid_qos->affinity_set(p); + return p->_resvd->grid_qos->affinity_set(p); }
int sched_grid_qos_fork(struct task_struct *p, struct task_struct *orig); diff --git a/init/init_task.c b/init/init_task.c index b312a045f4b9..791243c6fc5c 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -50,6 +50,12 @@ static struct sighand_struct init_sighand = { .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh), };
+#if !defined(__GENKSYMS__) +static struct task_struct_resvd init_task_struct_resvd = { + .task = &init_task, +}; +#endif + /* * Set up the first task table, touch at your own risk!. Base=0, * limit=0x1fffff (=2MB) @@ -188,6 +194,10 @@ struct task_struct init_task .fork_pid = 0, }, #endif + +#if !defined(__GENKSYMS__) + ._resvd = &init_task_struct_resvd, +#endif }; EXPORT_SYMBOL(init_task);
diff --git a/kernel/fork.c b/kernel/fork.c index bfc4534ff116..dd8cbd26068a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -161,6 +161,9 @@ static inline struct task_struct *alloc_task_struct_node(int node)
static inline void free_task_struct(struct task_struct *tsk) { +#if !defined(__GENKSYMS__) + kfree(tsk->_resvd); +#endif kmem_cache_free(task_struct_cachep, tsk); } #endif @@ -463,6 +466,7 @@ void free_task(struct task_struct *tsk) #ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY sched_prefer_cpus_free(tsk); #endif + #ifdef CONFIG_QOS_SCHED_SMART_GRID sched_grid_qos_free(tsk); #endif @@ -845,6 +849,20 @@ void set_task_stack_end_magic(struct task_struct *tsk) *stackend = STACK_END_MAGIC; /* for overflow detection */ }
+#if !defined(__GENKSYMS__) +static bool dup_resvd_task_struct(struct task_struct *dst, + struct task_struct *orig, int node) +{ + dst->_resvd = kzalloc_node(sizeof(struct task_struct_resvd), + GFP_KERNEL, node); + if (!dst->_resvd) + return false; + + dst->_resvd->task = dst; + return true; +} +#endif + static struct task_struct *dup_task_struct(struct task_struct *orig, int node) { struct task_struct *tsk; @@ -858,6 +876,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) if (!tsk) return NULL;
+#if !defined(__GENKSYMS__) + /* + * before proceeding, we need to make tsk->_resvd = NULL, + * otherwise the error paths below, if taken, might end up causing + * a double-free for task_struct_resvd extension object. + */ + WRITE_ONCE(tsk->_resvd, NULL); +#endif + stack = alloc_thread_stack_node(tsk, node); if (!stack) goto free_tsk; @@ -885,6 +912,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) if (err) goto free_stack;
+#if !defined(__GENKSYMS__) + if (!dup_resvd_task_struct(tsk, orig, node)) + goto free_stack; +#endif + #ifdef CONFIG_SECCOMP /* * We must handle setting up seccomp filters once we're under diff --git a/kernel/sched/grid/qos.c b/kernel/sched/grid/qos.c index f0f10dfb9fd4..b3df69d91499 100644 --- a/kernel/sched/grid/qos.c +++ b/kernel/sched/grid/qos.c @@ -26,7 +26,7 @@ static inline int qos_affinity_set(struct task_struct *p) { int n; - struct sched_grid_qos_affinity *affinity = &p->grid_qos->affinity; + struct sched_grid_qos_affinity *affinity = &p->_resvd->grid_qos->affinity;
if (likely(affinity->prefer_cpus == p->select_cpus)) return 0; @@ -58,18 +58,18 @@ int sched_grid_qos_fork(struct task_struct *p, struct task_struct *orig) qos_stat_init(&qos->stat);
nodes_clear(qos->affinity.mem_preferred_node_mask); - if (likely(orig->grid_qos)) - qos->affinity = orig->grid_qos->affinity; + if (likely(orig->_resvd->grid_qos)) + qos->affinity = orig->_resvd->grid_qos->affinity; qos->affinity_set = qos_affinity_set; - p->grid_qos = qos; + p->_resvd->grid_qos = qos;
return 0; }
void sched_grid_qos_free(struct task_struct *p) { - kfree(p->grid_qos); - p->grid_qos = NULL; + kfree(p->_resvd->grid_qos); + p->_resvd->grid_qos = NULL; }
/* dynamic select a more appropriate preferred interleave nid for process */ @@ -80,9 +80,9 @@ int sched_grid_preferred_interleave_nid(struct mempolicy *policy) struct task_struct *me = current; nodemask_t *preferred_nmask = NULL;
- if (likely(me->grid_qos)) + if (likely(me->_resvd->grid_qos)) preferred_nmask = - &me->grid_qos->affinity.mem_preferred_node_mask; + &me->_resvd->grid_qos->affinity.mem_preferred_node_mask;
if (!preferred_nmask || !policy) return NUMA_NO_NODE; @@ -111,9 +111,9 @@ int sched_grid_preferred_nid(int preferred_nid, nodemask_t *nodemask) nodemask_t nmask, ndmask; nodemask_t *preferred_nmask = NULL;
- if (likely(current->grid_qos)) + if (likely(current->_resvd->grid_qos)) preferred_nmask = - ¤t->grid_qos->affinity.mem_preferred_node_mask; + ¤t->_resvd->grid_qos->affinity.mem_preferred_node_mask;
if (!preferred_nmask) return preferred_nid;