From: Li Hua hucool.lihua@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I3UKOW CVE: NA
-------------------------------------------------
This reverts commit 281caf306accfea76683f1dabc86749cc1de616a.
Remove the work aroud to fix NULL poiner in membarrier_global_expedited, so as to prepare to introduce formal bug fix solution.
Signed-off-by: Li Hua hucool.lihua@huawei.com Reviewed-by: Jian Cheng cj.chengjian@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/sched/membarrier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index ea888ddb914f6..c4ea07e857985 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -115,7 +115,7 @@ static int membarrier_global_expedited(void) * scheduling a kthread. */ p = task_rcu_dereference(&cpu_rq(cpu)->curr); - if (p && p->flags & PF_KTHREAD) + if (p->flags & PF_KTHREAD) continue;
__cpumask_set_cpu(cpu, tmpmask);
From: "Eric W. Biederman" ebiederm@xmission.com
mainline inclusion from mainline-5.4-rc1 commit 3fbd7ee285b2bbc6eebd15a3c8786d9776a402a8 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I3UKOW CVE: NA
-------------------------------------------------
Add a count of the number of RCU users (currently 1) of the task struct so that we can later add the scheduler case and get rid of the very subtle task_rcu_dereference(), and just use rcu_dereference().
As suggested by Oleg have the count overlap rcu_head so that no additional space in task_struct is required.
Inspired-by: Linus Torvalds torvalds@linux-foundation.org Inspired-by: Oleg Nesterov oleg@redhat.com Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Cc: Chris Metcalf cmetcalf@ezchip.com Cc: Christoph Lameter cl@linux.com Cc: Davidlohr Bueso dave@stgolabs.net Cc: Kirill Tkhai tkhai@yandex.ru Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Mike Galbraith efault@gmx.de Cc: Paul E. McKenney paulmck@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Russell King - ARM Linux admin linux@armlinux.org.uk Cc: Thomas Gleixner tglx@linutronix.de Link: https://lkml.kernel.org/r/87woebdplt.fsf_-_@x220.int.ebiederm.org Signed-off-by: Ingo Molnar mingo@kernel.org
Conflicts: kernel/fork.c
Signed-off-by: Li Hua hucool.lihua@huawei.com Reviewed-by: Jian Cheng cj.chengjian@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/sched.h | 5 ++++- include/linux/sched/task.h | 1 + kernel/exit.c | 7 ++++++- kernel/fork.c | 7 +++---- 4 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c6dfe5e1d9d..0ce6cd87e7509 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1087,7 +1087,10 @@ struct task_struct {
struct tlbflush_unmap_batch tlb_ubc;
- struct rcu_head rcu; + union { + refcount_t rcu_users; + struct rcu_head rcu; + };
/* Cache last used pipe for splice(): */ struct pipe_inode_info *splice_pipe; diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index d744b385108e8..4afd357a5e0c5 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -105,6 +105,7 @@ static inline void put_task_struct_many(struct task_struct *t, int nr) }
struct task_struct *task_rcu_dereference(struct task_struct **ptask); +void put_task_struct_rcu_user(struct task_struct *task);
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT extern int arch_task_struct_size __read_mostly; diff --git a/kernel/exit.c b/kernel/exit.c index 378fdc19a1fbb..4c9ab2378082e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -172,6 +172,11 @@ static void delayed_put_task_struct(struct rcu_head *rhp) put_task_struct(tsk); }
+void put_task_struct_rcu_user(struct task_struct *task) +{ + if (refcount_dec_and_test(&task->rcu_users)) + call_rcu(&task->rcu, delayed_put_task_struct); +}
void release_task(struct task_struct *p) { @@ -212,7 +217,7 @@ void release_task(struct task_struct *p)
write_unlock_irq(&tasklist_lock); release_thread(p); - call_rcu(&p->rcu, delayed_put_task_struct); + put_task_struct_rcu_user(p);
p = leader; if (unlikely(zap_leader)) diff --git a/kernel/fork.c b/kernel/fork.c index 80a7e29920cd1..f81a61d1e79c7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -896,10 +896,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->stack_canary = get_random_canary(); #endif
- /* - * One for us, one for whoever does the "release_task()" (usually - * parent) - */ + /* One for the user space visible state that goes away when reaped. */ + refcount_set(&tsk->rcu_users, 1); + /* One for the rcu users, and one for the scheduler */ atomic_set(&tsk->usage, 2); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0;
From: "Eric W. Biederman" ebiederm@xmission.com
mainline inclusion from mainline-5.4-rc1 commit 0ff7b2cfbae36ebcd216c6a5ad7f8534eebeaee2 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I3UKOW CVE: NA
-------------------------------------------------
In the ordinary case today the RCU grace period for a task_struct is triggered when another process wait's for it's zombine and causes the kernel to call release_task(). As the waiting task has to receive a signal and then act upon it before this happens, typically this will occur after the original task as been removed from the runqueue.
Unfortunaty in some cases such as self reaping tasks it can be shown that release_task() will be called starting the grace period for task_struct long before the task leaves the runqueue.
Therefore use put_task_struct_rcu_user() in finish_task_switch() to guarantee that the there is a RCU lifetime after the task leaves the runqueue.
Besides the change in the start of the RCU grace period for the task_struct this change may cause perf_event_delayed_put and trace_sched_process_free. The function perf_event_delayed_put boils down to just a WARN_ON for cases that I assume never show happen. So I don't see any problem with delaying it.
The function trace_sched_process_free is a trace point and thus visible to user space. Occassionally userspace has the strangest dependencies so this has a miniscule chance of causing a regression. This change only changes the timing of when the tracepoint is called. The change in timing arguably gives userspace a more accurate picture of what is going on. So I don't expect there to be a regression.
In the case where a task self reaps we are pretty much guaranteed that the RCU grace period is delayed. So we should get quite a bit of coverage in of this worst case for the change in a normal threaded workload. So I expect any issues to turn up quickly or not at all.
I have lightly tested this change and everything appears to work fine.
Inspired-by: Linus Torvalds torvalds@linux-foundation.org Inspired-by: Oleg Nesterov oleg@redhat.com Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Cc: Chris Metcalf cmetcalf@ezchip.com Cc: Christoph Lameter cl@linux.com Cc: Davidlohr Bueso dave@stgolabs.net Cc: Kirill Tkhai tkhai@yandex.ru Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Mike Galbraith efault@gmx.de Cc: Paul E. McKenney paulmck@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Russell King - ARM Linux admin linux@armlinux.org.uk Cc: Thomas Gleixner tglx@linutronix.de Link: https://lkml.kernel.org/r/87r24jdpl5.fsf_-_@x220.int.ebiederm.org Signed-off-by: Ingo Molnar mingo@kernel.org
Conflicts: kernel/fork.c
Signed-off-by: Li Hua hucool.lihua@huawei.com Reviewed-by: Jian Cheng cj.chengjian@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/fork.c | 11 +++++++---- kernel/sched/core.c | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/kernel/fork.c b/kernel/fork.c index f81a61d1e79c7..964a6f20efe76 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -896,10 +896,13 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->stack_canary = get_random_canary(); #endif
- /* One for the user space visible state that goes away when reaped. */ - refcount_set(&tsk->rcu_users, 1); - /* One for the rcu users, and one for the scheduler */ - atomic_set(&tsk->usage, 2); + /* + * One for the user space visible state that goes away when reaped. + * One for the scheduler. + */ + refcount_set(&tsk->rcu_users, 2); + /* One for the rcu users */ + atomic_set(&tsk->usage, 1); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 41fee321ef83e..64c6a8cb8c739 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2712,7 +2712,7 @@ static struct rq *finish_task_switch(struct task_struct *prev) /* Task is done with its stack. */ put_task_stack(prev);
- put_task_struct(prev); + put_task_struct_rcu_user(prev); }
tick_nohz_task_switch();
From: "Eric W. Biederman" ebiederm@xmission.com
mainline inclusion from mainline-5.4-rc1 commit 154abafc68bfb7c2ef2ad5308a3b2de8968c3f61 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I3UKOW CVE: NA
-------------------------------------------------
Remove work arounds that were written before there was a grace period after tasks left the runqueue in finish_task_switch().
In particular now that there tasks exiting the runqueue exprience a RCU grace period none of the work performed by task_rcu_dereference() excpet the rcu_dereference() is necessary so replace task_rcu_dereference() with rcu_dereference().
Remove the code in rcuwait_wait_event() that checks to ensure the current task has not exited. It is no longer necessary as it is guaranteed that any running task will experience a RCU grace period after it leaves the run queueue.
Remove the comment in rcuwait_wake_up() as it is no longer relevant.
[Li Hua: change task_rcu_dereference to rcu_dereference in sync_runqueues_membarrier_state]
Ref: 8f95c90ceb54 ("sched/wait, RCU: Introduce rcuwait machinery") Ref: 150593bf8693 ("sched/api: Introduce task_rcu_dereference() and try_get_task_struct()") Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Cc: Chris Metcalf cmetcalf@ezchip.com Cc: Christoph Lameter cl@linux.com Cc: Davidlohr Bueso dave@stgolabs.net Cc: Kirill Tkhai tkhai@yandex.ru Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Mike Galbraith efault@gmx.de Cc: Oleg Nesterov oleg@redhat.com Cc: Paul E. McKenney paulmck@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Russell King - ARM Linux admin linux@armlinux.org.uk Cc: Thomas Gleixner tglx@linutronix.de Link: https://lkml.kernel.org/r/87lfurdpk9.fsf_-_@x220.int.ebiederm.org Signed-off-by: Ingo Molnar mingo@kernel.org
Conflicts: kernel/sched/membarrier.c
Signed-off-by: Li Hua hucool.lihua@huawei.com Reviewed-by: Jian Cheng cj.chengjian@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/rcuwait.h | 20 +++--------- include/linux/sched/task.h | 1 - kernel/exit.c | 67 -------------------------------------- kernel/sched/fair.c | 2 +- kernel/sched/membarrier.c | 6 ++-- 5 files changed, 8 insertions(+), 88 deletions(-)
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 90bfa3279a01c..810e1710a69c3 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -6,16 +6,11 @@
/* * rcuwait provides a way of blocking and waking up a single - * task in an rcu-safe manner; where it is forbidden to use - * after exit_notify(). task_struct is not properly rcu protected, - * unless dealing with rcu-aware lists, ie: find_task_by_*(). + * task in an rcu-safe manner. * - * Alternatively we have task_rcu_dereference(), but the return - * semantics have different implications which would break the - * wakeup side. The only time @task is non-nil is when a user is - * blocked (or checking if it needs to) on a condition, and reset - * as soon as we know that the condition has succeeded and are - * awoken. + * The only time @task is non-nil is when a user is blocked (or + * checking if it needs to) on a condition, and reset as soon as we + * know that the condition has succeeded and are awoken. */ struct rcuwait { struct task_struct *task; @@ -37,13 +32,6 @@ extern void rcuwait_wake_up(struct rcuwait *w); */ #define rcuwait_wait_event(w, condition) \ ({ \ - /* \ - * Complain if we are called after do_exit()/exit_notify(), \ - * as we cannot rely on the rcu critical region for the \ - * wakeup side. \ - */ \ - WARN_ON(current->exit_state); \ - \ rcu_assign_pointer((w)->task, current); \ for (;;) { \ /* \ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 4afd357a5e0c5..d06776c264ed7 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -104,7 +104,6 @@ static inline void put_task_struct_many(struct task_struct *t, int nr) __put_task_struct(t); }
-struct task_struct *task_rcu_dereference(struct task_struct **ptask); void put_task_struct_rcu_user(struct task_struct *task);
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT diff --git a/kernel/exit.c b/kernel/exit.c index 4c9ab2378082e..92846d9d25eff 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -224,69 +224,6 @@ void release_task(struct task_struct *p) goto repeat; }
-/* - * Note that if this function returns a valid task_struct pointer (!NULL) - * task->usage must remain >0 for the duration of the RCU critical section. - */ -struct task_struct *task_rcu_dereference(struct task_struct **ptask) -{ - struct sighand_struct *sighand; - struct task_struct *task; - - /* - * We need to verify that release_task() was not called and thus - * delayed_put_task_struct() can't run and drop the last reference - * before rcu_read_unlock(). We check task->sighand != NULL, - * but we can read the already freed and reused memory. - */ -retry: - task = rcu_dereference(*ptask); - if (!task) - return NULL; - - probe_kernel_address(&task->sighand, sighand); - - /* - * Pairs with atomic_dec_and_test() in put_task_struct(). If this task - * was already freed we can not miss the preceding update of this - * pointer. - */ - smp_rmb(); - if (unlikely(task != READ_ONCE(*ptask))) - goto retry; - - /* - * We've re-checked that "task == *ptask", now we have two different - * cases: - * - * 1. This is actually the same task/task_struct. In this case - * sighand != NULL tells us it is still alive. - * - * 2. This is another task which got the same memory for task_struct. - * We can't know this of course, and we can not trust - * sighand != NULL. - * - * In this case we actually return a random value, but this is - * correct. - * - * If we return NULL - we can pretend that we actually noticed that - * *ptask was updated when the previous task has exited. Or pretend - * that probe_slab_address(&sighand) reads NULL. - * - * If we return the new task (because sighand is not NULL for any - * reason) - this is fine too. This (new) task can't go away before - * another gp pass. - * - * And note: We could even eliminate the false positive if re-read - * task->sighand once again to avoid the falsely NULL. But this case - * is very unlikely so we don't care. - */ - if (!sighand) - return NULL; - - return task; -} - void rcuwait_wake_up(struct rcuwait *w) { struct task_struct *task; @@ -306,10 +243,6 @@ void rcuwait_wake_up(struct rcuwait *w) */ smp_mb(); /* (B) */
- /* - * Avoid using task_rcu_dereference() magic as long as we are careful, - * see comment in rcuwait_wait_event() regarding ->exit_state. - */ task = rcu_dereference(w->task); if (task) wake_up_process(task); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 08d877b57c4f2..43e5303eb6bc0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1668,7 +1668,7 @@ static void task_numa_compare(struct task_numa_env *env, return;
rcu_read_lock(); - cur = task_rcu_dereference(&dst_rq->curr); + cur = rcu_dereference(dst_rq->curr); if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) cur = NULL;
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index c4ea07e857985..8c4e14e6544a8 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -114,7 +114,7 @@ static int membarrier_global_expedited(void) * leaves the prior task mm in place as an optimization when * scheduling a kthread. */ - p = task_rcu_dereference(&cpu_rq(cpu)->curr); + p = rcu_dereference(cpu_rq(cpu)->curr); if (p->flags & PF_KTHREAD) continue;
@@ -183,7 +183,7 @@ static int membarrier_private_expedited(int flags) */ if (cpu == raw_smp_processor_id()) continue; - p = task_rcu_dereference(&cpu_rq(cpu)->curr); + p = rcu_dereference(cpu_rq(cpu)->curr); if (p && p->mm == mm) __cpumask_set_cpu(cpu, tmpmask); } @@ -249,7 +249,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) struct rq *rq = cpu_rq(cpu); struct task_struct *p;
- p = task_rcu_dereference(&rq->curr); + p = rcu_dereference(rq->curr); if (p && p->mm == mm) __cpumask_set_cpu(cpu, tmpmask); }
From: "Eric W. Biederman" ebiederm@xmission.com
mainline inclusion from mainline-5.4-rc1 commit 5311a98fef7d0dc2e8040ae0e18f5568d6d1dd5a category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I3UKOW CVE: NA
-------------------------------------------------
The current task on the runqueue is currently read with rcu_dereference().
To obtain ordinary RCU semantics for an rcu_dereference() of rq->curr it needs to be paired with rcu_assign_pointer() of rq->curr. Which provides the memory barrier necessary to order assignments to the task_struct and the assignment to rq->curr.
Unfortunately the assignment of rq->curr in __schedule is a hot path, and it has already been show that additional barriers in that code will reduce the performance of the scheduler. So I will attempt to describe below why you can effectively have ordinary RCU semantics without any additional barriers.
The assignment of rq->curr in init_idle is a slow path called once per cpu and that can use rcu_assign_pointer() without any concerns.
As I write this there are effectively two users of rcu_dereference() on rq->curr. There is the membarrier code in kernel/sched/membarrier.c that only looks at "->mm" after the rcu_dereference(). Then there is task_numa_compare() in kernel/sched/fair.c. My best reading of the code shows that task_numa_compare only access: "->flags", "->cpus_ptr", "->numa_group", "->numa_faults[]", "->total_numa_faults", and "->se.cfs_rq".
The code in __schedule() essentially does: rq_lock(...); smp_mb__after_spinlock();
next = pick_next_task(...); rq->curr = next;
context_switch(prev, next);
At the start of the function the rq_lock/smp_mb__after_spinlock pair provides a full memory barrier. Further there is a full memory barrier in context_switch().
This means that any task that has already run and modified itself (the common case) has already seen two memory barriers before __schedule() runs and begins executing. A task that modifies itself then sees a third full memory barrier pair with the rq_lock();
For a brand new task that is enqueued with wake_up_new_task() there are the memory barriers present from the taking and release the pi_lock and the rq_lock as the processes is enqueued as well as the full memory barrier at the start of __schedule() assuming __schedule() happens on the same cpu.
This means that by the time we reach the assignment of rq->curr except for values on the task struct modified in pick_next_task the code has the same guarantees as if it used rcu_assign_pointer().
Reading through all of the implementations of pick_next_task it appears pick_next_task is limited to modifying the task_struct fields "->se", "->rt", "->dl". These fields are the sched_entity structures of the varies schedulers.
Further "->se.cfs_rq" is only changed in cgroup attach/move operations initialized by userspace.
Unless I have missed something this means that in practice that the users of "rcu_dereference(rq->curr)" get normal RCU semantics of rcu_dereference() for the fields the care about, despite the assignment of rq->curr in __schedule() ot using rcu_assign_pointer.
Signed-off-by: Eric W. Biederman ebiederm@xmission.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Cc: Chris Metcalf cmetcalf@ezchip.com Cc: Christoph Lameter cl@linux.com Cc: Davidlohr Bueso dave@stgolabs.net Cc: Kirill Tkhai tkhai@yandex.ru Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Mike Galbraith efault@gmx.de Cc: Oleg Nesterov oleg@redhat.com Cc: Paul E. McKenney paulmck@kernel.org Cc: Peter Zijlstra peterz@infradead.org Cc: Russell King - ARM Linux admin linux@armlinux.org.uk Cc: Thomas Gleixner tglx@linutronix.de Link: https://lore.kernel.org/r/20190903200603.GW2349@hirez.programming.kicks-ass.... Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Li Hua hucool.lihua@huawei.com Reviewed-by: Jian Cheng cj.chengjian@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/sched/core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 64c6a8cb8c739..414432beab48c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3495,7 +3495,11 @@ static void __sched notrace __schedule(bool preempt)
if (likely(prev != next)) { rq->nr_switches++; - rq->curr = next; + /* + * RCU users of rcu_dereference(rq->curr) may not see + * changes to task_struct made by pick_next_task(). + */ + RCU_INIT_POINTER(rq->curr, next); /* * The membarrier system call requires each architecture * to have a full memory barrier after updating @@ -5475,7 +5479,8 @@ void init_idle(struct task_struct *idle, int cpu) __set_task_cpu(idle, cpu); rcu_read_unlock();
- rq->curr = rq->idle = idle; + rq->idle = idle; + rcu_assign_pointer(rq->curr, idle); idle->on_rq = TASK_ON_RQ_QUEUED; #ifdef CONFIG_SMP idle->on_cpu = 1;
From: Li Hua hucool.lihua@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I3UKOW CVE: NA
-------------------------------------------------
commit c9dfd37f715a ("tasks: Add a count of task RCU users") add new member into struct task_struct which will break KABI. This patch try to fix it.
Signed-off-by: Li Hua hucool.lihua@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Reviewed-by: Jian Cheng cj.chengjian@huawei.com Reviewed-by: Zhang Qiao zhangqiao22@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/sched.h | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h index 0ce6cd87e7509..ef03ea1450215 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1087,10 +1087,14 @@ struct task_struct {
struct tlbflush_unmap_batch tlb_ubc;
+#ifndef __GENKSYMS__ union { refcount_t rcu_users; struct rcu_head rcu; }; +#else + struct rcu_head rcu; +#endif
/* Cache last used pipe for splice(): */ struct pipe_inode_info *splice_pipe;