From: Jens Axboe axboe@kernel.dk
commit b5e683d5cab8cd433b06ae178621f083cabd4f63 upstream.
eventfd use cases from aio and io_uring can deadlock due to circular or resursive calling, when eventfd_signal() tries to grab the waitqueue lock. On top of that, it's also possible to construct notification chains that are deep enough that we could blow the stack.
Add a percpu counter that tracks the percpu recursion depth, warn if we exceed it. The counter is also exposed so that users of eventfd_signal() can do the right thing if it's non-zero in the context where it is called.
Cc: stable@vger.kernel.org # 4.19+ Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- fs/eventfd.c | 15 +++++++++++++++ include/linux/eventfd.h | 14 ++++++++++++++ 2 files changed, 29 insertions(+)
diff --git a/fs/eventfd.c b/fs/eventfd.c index 08d3bd6..ce1d171 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -22,6 +22,8 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h>
+DEFINE_PER_CPU(int, eventfd_wake_count); + struct eventfd_ctx { struct kref kref; wait_queue_head_t wqh; @@ -55,12 +57,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) { unsigned long flags;
+ /* + * Deadlock or stack overflow issues can happen if we recurse here + * through waitqueue wakeup handlers. If the caller users potentially + * nested waitqueues with custom wakeup handlers, then it should + * check eventfd_signal_count() before calling this function. If + * it returns true, the eventfd_signal() call should be deferred to a + * safe context. + */ + if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count))) + return 0; + spin_lock_irqsave(&ctx->wqh.lock, flags); + this_cpu_inc(eventfd_wake_count); if (ULLONG_MAX - ctx->count < n) n = ULLONG_MAX - ctx->count; ctx->count += n; if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, EPOLLIN); + this_cpu_dec(eventfd_wake_count); spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n; diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index ffcc772..dc4fd8a 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -12,6 +12,8 @@ #include <linux/fcntl.h> #include <linux/wait.h> #include <linux/err.h> +#include <linux/percpu-defs.h> +#include <linux/percpu.h>
/* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -40,6 +42,13 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt);
+DECLARE_PER_CPU(int, eventfd_wake_count); + +static inline bool eventfd_signal_count(void) +{ + return this_cpu_read(eventfd_wake_count); +} + #else /* CONFIG_EVENTFD */
/* @@ -68,6 +77,11 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, return -ENOSYS; }
+static inline bool eventfd_signal_count(void) +{ + return false; +} + #endif
#endif /* _LINUX_EVENTFD_H */