
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC4GP7 CVE: NA ---------------------------------------- Add the kernel_ipc module to support fast switching and communication between processes. Signed-off-by: chenrenhui <chenrenhui1@huawei.com> --- Kconfig | 2 + arch/arm64/configs/openeuler_defconfig | 1 + arch/x86/configs/openeuler_defconfig | 2 + include/linux/kernel_ipc.h | 48 ++++ ipc/Kconfig | 5 + ipc/Makefile | 2 +- ipc/kernel_ipc.c | 303 +++++++++++++++++++++++++ 7 files changed, 362 insertions(+), 1 deletion(-) create mode 100644 include/linux/kernel_ipc.h create mode 100644 ipc/Kconfig create mode 100644 ipc/kernel_ipc.c diff --git a/Kconfig b/Kconfig index 745bc773f567..38535ecc8aa6 100644 --- a/Kconfig +++ b/Kconfig @@ -21,6 +21,8 @@ source "drivers/Kconfig" source "fs/Kconfig" +source "ipc/Kconfig" + source "security/Kconfig" source "crypto/Kconfig" diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index fb9f92d11bde..4c7a26b1504a 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -6660,6 +6660,7 @@ CONFIG_FILE_MITIGATION_FALSE_SHARING=y # end of File systems CONFIG_RESCTRL=y +CONFIG_KERNEL_IPC=m # # Security options diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 47a79860bfb7..fee20534051d 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -7714,6 +7714,8 @@ CONFIG_DLM_DEBUG=y CONFIG_IO_WQ=y # end of File systems +# CONFIG_KERNEL_IPC is not set + # # Security options # diff --git a/include/linux/kernel_ipc.h b/include/linux/kernel_ipc.h new file mode 100644 index 000000000000..f4a5fd03d9f1 --- /dev/null +++ b/include/linux/kernel_ipc.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: Kernel IPC header + * Author: yangyun + * Create: 2024-05-31 + */ +#ifndef __KERNEL_IPC_H_ +#define __KERNEL_IPC_H_ + +struct kernel_ipc_bind_info { + //unsigned int session_id; + unsigned int data_size; + + struct task_struct *client_task; + struct task_struct *server_task; + // struct task_struct *server_task_get; + + bool is_calling; + bool client_need_exit; + bool server_need_exit; + + atomic_t nr_call; + + //struct kref ref; + spinlock_t lock; + struct list_head node; +}; + +void kernel_ipc_wakeup_server_task(struct kernel_ipc_bind_info *bind_info); + +void *kernel_ipc_bind(struct task_struct *server_task); + +void kernel_ipc_unbind(struct kernel_ipc_bind_info *bind_info, + struct task_struct *server_task); + +ssize_t kernel_ipc_do_call(struct kernel_ipc_bind_info *bind_info, + struct task_struct *tsk); + +long kernel_ipc_ret_call(struct kernel_ipc_bind_info *bind_info, + struct task_struct *tsk); + +long kernel_ipc_wait_call(struct kernel_ipc_bind_info *bind_info, + struct task_struct *tsk); + +void kernel_ipc_release(struct kernel_ipc_bind_info *bind_info); + +#endif diff --git a/ipc/Kconfig b/ipc/Kconfig new file mode 100644 index 000000000000..3ba44d78d1ff --- /dev/null +++ b/ipc/Kconfig @@ -0,0 +1,5 @@ +config KERNEL_IPC + tristate "Kernel IPC Call" + default n + help + Inter-process call, used to switch threads quickly. \ No newline at end of file diff --git a/ipc/Makefile b/ipc/Makefile index c2558c430f51..528a1233431a 100644 --- a/ipc/Makefile +++ b/ipc/Makefile @@ -9,4 +9,4 @@ obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o obj-$(CONFIG_IPC_NS) += namespace.o obj-$(CONFIG_POSIX_MQUEUE_SYSCTL) += mq_sysctl.o - +obj-$(CONFIG_KERNEL_IPC) += kernel_ipc.o diff --git a/ipc/kernel_ipc.c b/ipc/kernel_ipc.c new file mode 100644 index 000000000000..280de948d397 --- /dev/null +++ b/ipc/kernel_ipc.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef pr_fmt +# define pr_fmt(fmt) "kernel_ipc: " fmt +#endif + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/printk.h> +#include <linux/preempt.h> +#include <linux/sched/signal.h> +#include <linux/kernel_ipc.h> +#include <linux/sched/debug.h> + +//#define IPC_DEBUG(fmt, ...) kernel_ipc_print(KERN_DEBUG fmt, ##__VA_ARGS__) +#define IPC_DEBUG(fmt, ...) + + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("yangyun"); +MODULE_DESCRIPTION("kernel ipc"); +MODULE_VERSION("1.0"); + +static inline void bind_info_lock(struct kernel_ipc_bind_info *bind_info) +{ + spin_lock(&bind_info->lock); +} + +static inline void bind_info_unlock(struct kernel_ipc_bind_info *bind_info) +{ + spin_unlock(&bind_info->lock); +} + +static inline int kernel_ipc_check_task_consistency(struct task_struct *client, + struct task_struct *server) +{ + if (client->pid == server->pid) { + pr_err("error: client(%s/%d) and server(%s/%d) is same\n", client->comm, + client->pid, server->comm, server->pid); + return -EPERM; + } + + return 0; +} + +static inline ssize_t +kernel_ipc_call_check(struct kernel_ipc_bind_info *bind_info, + struct task_struct *tsk) +{ + ssize_t ret = 0; + struct task_struct *server_task; + + if (!bind_info) + return -ENOENT; + + if (bind_info->client_task) { + pr_err("error: bind already with client task: %s/%d, current is : %s/%d", + bind_info->client_task->comm, bind_info->client_task->pid, + tsk->comm, tsk->pid); + return -EEXIST; + } + + server_task = bind_info->server_task; + if (!server_task) { + pr_err("error: server thread is not exsit\n"); + return -ESRCH; + } + + return ret; +} + +static inline void kernel_ipc_client_init( + struct kernel_ipc_bind_info *bind_info, struct task_struct *tsk) +{ + bind_info->client_task = tsk; +} + + +static inline void kernel_ipc_client_exit( + struct kernel_ipc_bind_info *bind_info, struct task_struct *tsk) +{ + bind_info->client_task = NULL; +} + +static inline int kernel_ipc_get_client_exit_code( + const struct kernel_ipc_bind_info *bind_info) +{ + return bind_info->client_need_exit ? -ESRCH : 0; +} + +static inline void kernel_ipc_wakeup_client_task( + struct kernel_ipc_bind_info *bind_info) +{ + struct task_struct *client_task; + + client_task = bind_info->client_task; + bind_info->client_need_exit = true; + wake_up_process(client_task); +} + +void kernel_ipc_wakeup_server_task(struct kernel_ipc_bind_info *bind_info) +{ + struct task_struct *server_task; + + server_task = bind_info->server_task; + bind_info->server_need_exit = true; + wake_up_process(server_task); +} +EXPORT_SYMBOL_GPL(kernel_ipc_wakeup_server_task); + +void *kernel_ipc_bind(struct task_struct *server_task) +{ + struct kernel_ipc_bind_info *bind_info = NULL; + + bind_info = kcalloc(1, sizeof(struct kernel_ipc_bind_info), GFP_KERNEL); + if (!bind_info) { + pr_err("error: alloc kernel_ipc_bind_info failed\n"); + return ERR_PTR(-ENOMEM); + } + + bind_info->server_task = server_task; + + return (void *) bind_info; +} +EXPORT_SYMBOL_GPL(kernel_ipc_bind); + +void kernel_ipc_release(struct kernel_ipc_bind_info *bind_info) +{ + if (bind_info) { + if (bind_info->client_task && bind_info->is_calling) + kernel_ipc_wakeup_client_task(bind_info); + kfree(bind_info); + } +} +EXPORT_SYMBOL_GPL(kernel_ipc_release); + +void kernel_ipc_unbind(struct kernel_ipc_bind_info *bind_info, + struct task_struct *server_task) +{ + if (bind_info) { + if (bind_info->server_task == server_task) { + bind_info->server_task = NULL; + if (bind_info->client_task && bind_info->is_calling) + kernel_ipc_wakeup_client_task(bind_info); + kfree(bind_info); + } + } + +} +EXPORT_SYMBOL_GPL(kernel_ipc_unbind); + +ssize_t kernel_ipc_do_call(struct kernel_ipc_bind_info *bind_info, + struct task_struct *tsk) +{ + struct task_struct *server_task; + ssize_t ret; + + ret = kernel_ipc_call_check(bind_info, tsk); + if (ret) { + pr_err("kernel ipc call check and init failed, errno: %ld\n", ret); + return ret; + } + + kernel_ipc_client_init(bind_info, tsk); + + server_task = bind_info->server_task; + + bind_info->client_need_exit = false; + bind_info->is_calling = true; + + preempt_disable(); /* optimize performance if preemption occurs */ + smp_mb(); + wake_up_process(server_task); + preempt_enable(); + IPC_DEBUG("[cpu/%d][%s/%d] ipc do call server(%s/%d)\n", + smp_processor_id(), tsk->comm, tsk->pid, server_task->comm, + server_task->pid); + + set_current_state(TASK_INTERRUPTIBLE); + while (bind_info->is_calling) { + IPC_DEBUG("[cpu/%d][%s/%d] client begin schedule\n", smp_processor_id(), + tsk->comm, tsk->pid); + schedule(); + IPC_DEBUG("[cpu/%d][%s/%d] client schedule end\n", smp_processor_id(), + tsk->comm, tsk->pid); + if (signal_pending(current)) { + ret = -EINTR; + pr_err("[cpu/%d][%s/%d] client has signal pending break\n", + smp_processor_id(), tsk->comm, tsk->pid); + break; + } + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + + if (bind_info->is_calling) { + pr_err("[cpu/%d][%s/%d] server is still calling, but client is waken up\n", + smp_processor_id(), tsk->comm, tsk->pid); + pr_err("[cpu/%d][%s/%d] servertask(%s/%d) is running on cpu %d\n", + smp_processor_id(), tsk->comm, tsk->pid, server_task->comm, + server_task->pid, task_cpu(server_task)); + //show_stack(server_task, NULL, KERN_DEBUG); + pr_err("[cpu/%d][%s/%d] show_stack end in %s\n", + smp_processor_id(), tsk->comm, tsk->pid, __func__); + } + + kernel_ipc_client_exit(bind_info, tsk); + + if (ret == -EINTR) + return ret; + ret = kernel_ipc_get_client_exit_code(bind_info); + + return ret; +} +EXPORT_SYMBOL_GPL(kernel_ipc_do_call); + +long kernel_ipc_ret_call(struct kernel_ipc_bind_info *bind_info, + struct task_struct *tsk) +{ + struct task_struct *client_task; + + if (!bind_info->is_calling) + return 0; + + bind_info_lock(bind_info); + client_task = bind_info->client_task; + if (!client_task) { + bind_info_unlock(bind_info); + return -ESRCH; + } + + bind_info_unlock(bind_info); + + bind_info->is_calling = false; + preempt_disable(); + /* memory barrier for preempt */ + smp_mb(); + wake_up_process(client_task); + preempt_enable(); + IPC_DEBUG("[CPU/%d][%s/%d] client task pid: %d, state: %d\n", + smp_processor_id(), tsk->comm, tsk->pid, client_task->pid, + client_task->state); + + return 0; +} +EXPORT_SYMBOL_GPL(kernel_ipc_ret_call); + +long kernel_ipc_wait_call(struct kernel_ipc_bind_info *bind_info, + struct task_struct *tsk) +{ + long ret = 0; + sigset_t pending_signals; + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (bind_info->is_calling) + break; + + if (bind_info->server_need_exit) { + ret = -ENODEV; + break; + } + + schedule(); + + if (signal_pending_state(TASK_INTERRUPTIBLE, tsk) + && !bind_info->is_calling) { + if (fatal_signal_pending(tsk)) { + pr_err("[CPU/%d][%s/%d] current task has SIGKILL\n", + smp_processor_id(), tsk->comm, tsk->pid); + } + + pending_signals = current->pending.signal; + ret = -ERESTARTSYS; + break; + } + } + + set_current_state(TASK_RUNNING); + return ret; +} +EXPORT_SYMBOL_GPL(kernel_ipc_wait_call); + +static int __init +kernel_ipc_init(void) +{ + pr_info("kernel ipc init\n"); + return 0; +} + +static void __exit +kernel_ipc_exit(void) +{ + pr_info("kernel ipc exit\n"); +} + + +module_init(kernel_ipc_init); +module_exit(kernel_ipc_exit); -- 2.33.0