From: Liao Chang <liaochang1@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/ID5CMS -------------------------------- In xcall 2.0 each process is associated with an unique xcall area. In the mmap process, associate an xcall area with all matching executable files and populate the system call table to prepare for hijacking and replacing custom system calls. Signed-off-by: Liao Chang <liaochang1@huawei.com> Signed-off-by: Zheng Xinyu <zhengxinyu6@huawei.com> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/include/asm/xcall.h | 17 ++++++ arch/arm64/kernel/xcall/core.c | 106 +++++++++++++++++++++++++++++++++ include/linux/mm_types.h | 4 ++ include/linux/xcall.h | 13 ++++ kernel/fork.c | 2 + mm/mmap.c | 14 ++++- 6 files changed, 153 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/xcall.h b/arch/arm64/include/asm/xcall.h index a35e3803efad..27aaf4344d0f 100644 --- a/arch/arm64/include/asm/xcall.h +++ b/arch/arm64/include/asm/xcall.h @@ -4,13 +4,16 @@ #include <linux/atomic.h> #include <linux/jump_label.h> +#include <linux/mm_types.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/xcall.h> +#include <linux/refcount.h> #include <asm/actlr.h> #include <asm/cpufeature.h> +#include <asm/syscall.h> struct xcall_comm { char *name; @@ -30,9 +33,23 @@ struct xcall { char *name; }; +struct xcall_area { + /* + * 0...NR_syscalls - 1: function pointers to hijack default syscall + * NR_syscalls...NR_syscalls * 2 - 1: function pointers in kernel module + */ + unsigned long sys_call_table[NR_syscalls * 2]; + refcount_t ref; + struct xcall *xcall; +}; + #ifdef CONFIG_DYNAMIC_XCALL extern int xcall_attach(struct xcall_comm *info); extern int xcall_detach(struct xcall_comm *info); + +#define mm_xcall_area(mm) ((struct xcall_area *)((mm)->xcall)) +#else +#define mm_xcall_area(mm) (NULL) #endif /* CONFIG_DYNAMIC_XCALL */ DECLARE_STATIC_KEY_FALSE(xcall_enable); diff --git a/arch/arm64/kernel/xcall/core.c b/arch/arm64/kernel/xcall/core.c index ccfc8df323aa..15fb2d4424ef 100644 --- a/arch/arm64/kernel/xcall/core.c +++ b/arch/arm64/kernel/xcall/core.c @@ -42,6 +42,13 @@ static struct xcall_prog *get_xcall_prog_locked(const char *module) return ret; } +static long inv_xcall(struct pt_regs *regs) +{ + return -ENOSYS; +} + +#define inv_xcall_syscall ((unsigned long)inv_xcall) + static struct xcall *get_xcall(struct xcall *xcall) { refcount_inc(&xcall->ref); @@ -126,6 +133,105 @@ static int init_xcall(struct xcall *xcall, struct xcall_comm *comm) return 0; } +static int fill_xcall_syscall(struct xcall_area *area, struct xcall *xcall) +{ + struct xcall_prog_object *obj; + unsigned int scno_offset; + + obj = xcall->program->objs; + while (obj->func) { + scno_offset = NR_syscalls + obj->scno; + if (area->sys_call_table[scno_offset] != inv_xcall_syscall) + return -EINVAL; + + area->sys_call_table[scno_offset] = obj->func; + obj += 1; + } + + return 0; +} + +static struct xcall_area *create_xcall_area(struct mm_struct *mm) +{ + struct xcall_area *area; + int i; + + area = kzalloc(sizeof(*area), GFP_KERNEL); + if (!area) + return NULL; + + refcount_set(&area->ref, 1); + + for (i = 0; i < NR_syscalls; i++) { + area->sys_call_table[i] = inv_xcall_syscall; + area->sys_call_table[i + NR_syscalls] = inv_xcall_syscall; + } + + smp_store_release(&mm->xcall, area); + return area; +} + +/* + * Initialize the xcall data of mm_struct data. + * And register xcall into one address space, which includes create + * the mm_struct associated xcall_area data + */ +int xcall_mmap(struct vm_area_struct *vma, struct mm_struct *mm) +{ + struct xcall_area *area; + struct xcall *xcall; + + if (list_empty(&xcalls_list)) + return 0; + + spin_lock(&xcall_list_lock); + xcall = find_xcall(NULL, file_inode(vma->vm_file)); + if (!xcall || !xcall->program) { + spin_unlock(&xcall_list_lock); + return -EINVAL; + } + spin_unlock(&xcall_list_lock); + + area = mm_xcall_area(mm); + if (!area && !create_xcall_area(mm)) { + put_xcall(xcall); + return -ENOMEM; + } + + area = (struct xcall_area *)READ_ONCE(mm->xcall); + // Each process is allowed to be associated with only one xcall. + if (!cmpxchg(&area->xcall, NULL, xcall) && !fill_xcall_syscall(area, xcall)) + return 0; + + put_xcall(xcall); + return -EINVAL; +} + +void mm_init_xcall_area(struct mm_struct *mm, struct task_struct *p) +{ + struct xcall_area *area = mm_xcall_area(mm); + + if (area) + refcount_inc(&area->ref); +} + +void clear_xcall_area(struct mm_struct *mm) +{ + struct xcall_area *area = mm_xcall_area(mm); + + if (!area) + return; + + if (!refcount_dec_and_test(&area->ref)) + return; + + if (area->xcall) + put_xcall(area->xcall); + + kfree(area); + mm->xcall = NULL; +} + int xcall_attach(struct xcall_comm *comm) { struct xcall *xcall; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 64c38b09e18d..633283dce0a7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1016,7 +1016,11 @@ struct mm_struct { #else KABI_RESERVE(1) #endif +#ifdef CONFIG_DYNAMIC_XCALL + KABI_USE(2, void *xcall) +#else KABI_RESERVE(2) +#endif KABI_RESERVE(3) KABI_RESERVE(4) KABI_RESERVE(5) diff --git a/include/linux/xcall.h b/include/linux/xcall.h index b7110d02c6bd..5b0242f5a6a6 100644 --- a/include/linux/xcall.h +++ b/include/linux/xcall.h @@ -9,6 +9,10 @@ #include <linux/module.h> #include <linux/path.h> +struct vm_area_struct; +struct mm_struct; +struct inode; + struct xcall_prog_object { unsigned long scno; unsigned long func; @@ -28,12 +32,21 @@ struct xcall_prog { #ifdef CONFIG_DYNAMIC_XCALL extern int xcall_prog_register(struct xcall_prog *prog); extern void xcall_prog_unregister(struct xcall_prog *prog); +extern void mm_init_xcall_area(struct mm_struct *mm, struct task_struct *p); +extern void clear_xcall_area(struct mm_struct *mm); +extern int xcall_mmap(struct vm_area_struct *vma, struct mm_struct *mm); #else /* !CONFIG_DYNAMIC_XCALL */ static inline int xcall_prog_register(struct xcall_prog *prog) { return -EINVAL; } static inline void xcall_prog_unregister(struct xcall_prog *prog) {} +static inline void mm_init_xcall_area(struct mm_struct *mm, struct task_struct *p) {} +static inline void clear_xcall_area(struct mm_struct *mm) {} +static inline int xcall_mmap(struct vm_area_struct *vma, struct mm_struct *mm) +{ + return 0; +} #endif /* CONFIG_DYNAMIC_XCALL */ #endif /* _LINUX_XCALL_H */ diff --git a/kernel/fork.c b/kernel/fork.c index 78663ca68160..f659f24b9ba2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1367,6 +1367,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, #if defined(CONFIG_DAMON_MEM_SAMPLING) mm->damon_fifo = NULL; #endif + mm_init_xcall_area(mm, p); mm_init_uprobes_state(mm); hugetlb_count_init(mm); @@ -1420,6 +1421,7 @@ static inline void __mmput(struct mm_struct *mm) { VM_BUG_ON(atomic_read(&mm->mm_users)); + clear_xcall_area(mm); uprobe_clear_state(mm); exit_aio(mm); ksm_exit(mm); diff --git a/mm/mmap.c b/mm/mmap.c index fb54df419ea2..27f8e4dd8d72 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -48,6 +48,7 @@ #include <linux/sched/mm.h> #include <linux/ksm.h> #include <linux/share_pool.h> +#include <linux/xcall.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> @@ -590,9 +591,12 @@ static inline void vma_complete(struct vma_prepare *vp, if (!vp->skip_vma_uprobe) { uprobe_mmap(vp->vma); + xcall_mmap(vp->vma, mm); - if (vp->adj_next) + if (vp->adj_next) { uprobe_mmap(vp->adj_next); + xcall_mmap(vp->adj_next, mm); + } } } @@ -622,8 +626,10 @@ static inline void vma_complete(struct vma_prepare *vp, goto again; } } - if (vp->insert && vp->file) + if (vp->insert && vp->file) { uprobe_mmap(vp->insert); + xcall_mmap(vp->insert, mm); + } validate_mm(mm); } @@ -2943,8 +2949,10 @@ static unsigned long __mmap_region(struct mm_struct *mm, struct file *file, mm->locked_vm += (len >> PAGE_SHIFT); } - if (file) + if (file) { uprobe_mmap(vma); + xcall_mmap(vma, mm); + } /* * New (or expanded) vma always get soft dirty status. -- 2.34.1