From: Wang Wensheng wangwensheng4@huawei.com
We provide a char device and the user could use ioctl in userspace to access the method supported by kernel.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- include/uapi/linux/xshmem_framework.h | 33 +++ lib/Kconfig | 7 + lib/Makefile | 1 + lib/xshmem/Makefile | 1 + lib/xshmem/xshmem_framework.c | 514 ++++++++++++++++++++++++++++++++++ lib/xshmem/xshmem_framework.h | 46 +++ 6 files changed, 602 insertions(+) create mode 100644 include/uapi/linux/xshmem_framework.h create mode 100644 lib/xshmem/Makefile create mode 100644 lib/xshmem/xshmem_framework.c create mode 100644 lib/xshmem/xshmem_framework.h
diff --git a/include/uapi/linux/xshmem_framework.h b/include/uapi/linux/xshmem_framework.h new file mode 100644 index 0000000..caf782b --- /dev/null +++ b/include/uapi/linux/xshmem_framework.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2021. All rights reserved. + * Author: Huawei OS Kernel Lab + * Create: Thu Jun 17 03:19:19 2021 + */ +#ifndef __USR_XSHMEM_FRAMEWORK_H +#define __USR_XSHMEM_FRAMEWORK_H + +#define XSHM_KEY_SIZE 253U + +struct xshm_reg_arg { + int algo; + unsigned int pool_size; + unsigned int key_len; + char *key; +}; + +struct xshm_alloc_arg { + int pool_id; + int size; +}; + +struct xshm_free_arg { + int pool_id; + unsigned int offset; +}; + +#define XSHMEM_POOL_REGISTER _IOW('X', 1, struct xshm_reg_arg) +#define XSHMEM_POOL_UNREGISTER _IO ('X', 2) +#define XSHMEM_POOL_ALLOC _IOW('X', 3, struct xshm_alloc_arg) +#define XSHMEM_POOL_FREE _IOW('X', 4, struct xshm_free_arg) + +#endif diff --git a/lib/Kconfig b/lib/Kconfig index edb7d40..070b3df 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -632,3 +632,10 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2 bool + +config XSHMEM_FRAMEWORK + tristate "A region-manage mechanism in kernel" + help + Supply a region-manage mechanism in kernel. The user can register a + region and alloc/get/put/delete BLOCKs from the region via ioctl of + the associated chardev. diff --git a/lib/Makefile b/lib/Makefile index f5ee8e8..57a7897 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -285,3 +285,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o +obj-$(CONFIG_XSHMEM_FRAMEWORK) += xshmem/ diff --git a/lib/xshmem/Makefile b/lib/xshmem/Makefile new file mode 100644 index 0000000..a60db51 --- /dev/null +++ b/lib/xshmem/Makefile @@ -0,0 +1 @@ +obj-m+=xshmem_framework.o diff --git a/lib/xshmem/xshmem_framework.c b/lib/xshmem/xshmem_framework.c new file mode 100644 index 0000000..7209cc0 --- /dev/null +++ b/lib/xshmem/xshmem_framework.c @@ -0,0 +1,514 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2021. All rights reserved. + * Author: Huawei OS Kernel Lab + * Create: Tue Jun 15 06:34:28 2021 + */ +#define pr_fmt(fmt) "XSHMEM: " fmt + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/mutex.h> +#include <linux/idr.h> +#include <linux/fs.h> +#include <linux/hashtable.h> +#include <linux/miscdevice.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/jhash.h> +#include <linux/rcupdate.h> + +#include "xshmem_framework.h" + +#define XSHM_HLIST_TABLE_BIT 13 +#define XSHMEM_MAX_ID (1 << 14) + +/* Protect the lifetime for all POOL */ +static DEFINE_MUTEX(xshmem_mutex); +static DEFINE_IDR(xshmem_idr); +static DEFINE_HASHTABLE(xshmem_key_list, XSHM_HLIST_TABLE_BIT); +static int next_xshmem_id; + +static struct miscdevice xshmem_dev; + +/* FIXME: use lock to portect the list_head */ +static LIST_HEAD(registered_algo); + +static struct xshm_pool_algo *xshmem_find_algo(int algo) +{ + struct xshm_pool_algo *tmp; + + list_for_each_entry(tmp, ®istered_algo, algo_node) + if (tmp->num == algo) + return tmp; + + return NULL; +} + +static struct hlist_head *hash_bucket(char *key, int len) +{ + u32 bucket = jhash(key, len, 0); + return &xshmem_key_list[bucket & ((1U << XSHM_HLIST_TABLE_BIT) - 1U)]; +} + +struct xshm_task_pool_node { + struct list_head task_node; // list node in TASK + struct list_head pool_node; // list node in POOL + struct xshm_task *task; + struct xshm_pool *pool; +}; + +struct xshm_task { + pid_t pid; + int pool_count; + struct list_head list_head; + struct mutex task_mutex; +}; + +static int xshmem_open(struct inode *inode, struct file *file) +{ + struct xshm_task *ptask; + + ptask = kmalloc(sizeof(*ptask), GFP_KERNEL); + if (unlikely(!ptask)) + return -ENOMEM; + + ptask->pid = current->pid; + ptask->pool_count = 0; + INIT_LIST_HEAD(&ptask->list_head); + mutex_init(&ptask->task_mutex); + + file->private_data = ptask; + + return 0; +} + +static bool task_is_attached(struct xshm_task *task, struct xshm_pool *xp) +{ + struct xshm_task_pool_node *node; + + list_for_each_entry(node, &task->list_head, task_node) + if (node->pool == xp) + return true; + + return false; +} + +/* the refcnt of the xp should be increased by one on success */ +static int xshmem_pool_attach(struct xshm_task *task, struct xshm_pool *xp) +{ + struct xshm_task_pool_node *node; + + if (task_is_attached(task, xp)) { + pr_err("TASK has been attached already\n"); + return -EEXIST; + } + + node = kmalloc(sizeof(*node), GFP_KERNEL); + if (unlikely(!node)) { + pr_err("alloc xshm_task_pool_node failed\n"); + return -ENOMEM; + } + + node->task = task; + node->pool = xp; + list_add_tail(&node->task_node, &task->list_head); + spin_lock(&xp->xp_task_spinlock); + list_add_tail(&node->pool_node, &xp->list_head); + spin_unlock(&xp->xp_task_spinlock); + + atomic_inc(&xp->refcnt); + task->pool_count++; + + return 0; +} + +static int xshmem_pool_detach(struct xshm_task *task, struct xshm_pool *xp) +{ + struct xshm_task_pool_node *node; + + list_for_each_entry(node, &task->list_head, task_node) + if (node->pool == xp) { + spin_lock(&xp->xp_task_spinlock); + list_del(&node->pool_node); + spin_unlock(&xp->xp_task_spinlock); + + list_del(&node->task_node); + /* Use xshmem_pool_put() ?? */ + atomic_dec(&xp->refcnt); + task->pool_count--; + kfree(node); + return 0; + } + + pr_err("the POOL has already been detached\n"); + return -ESRCH; +} + +/* + * get the POOL specified by key, create one if it not exist. + */ +static struct xshm_pool *xshmem_pool_create(char *key, unsigned int key_len, + struct xshm_pool_algo *algo, unsigned int pool_size) +{ + int id, ret; + struct xshm_pool *xp; + struct hlist_head *bucket; + + bucket = hash_bucket(key, key_len); + + mutex_lock(&xshmem_mutex); + hlist_for_each_entry(xp, bucket, hnode) + if (key_len == xp->key_len && !strncmp(key, xp->key, key_len)) { + if (xp->size != pool_size || xp->algo != algo) { + mutex_unlock(&xshmem_mutex); + pr_err("the pool size or algorithm invalid\n"); + return ERR_PTR(-EINVAL); + } else + goto exist; + } + + xp = kmalloc(sizeof(*xp) + key_len + 1, GFP_KERNEL); + if (unlikely(!xp)) { + mutex_unlock(&xshmem_mutex); + return ERR_PTR(-ENOMEM); + } + + xp->algo = algo; + xp->size = pool_size; + ret = algo->xshm_pool_init(xp); + if (ret < 0) { + mutex_unlock(&xshmem_mutex); + kfree(xp); + pr_err("init hook failed\n"); + return ERR_PTR(ret); + } + + id = idr_alloc(&xshmem_idr, xp, next_xshmem_id, XSHMEM_MAX_ID, GFP_KERNEL); + if (id < 0) { + if (next_xshmem_id) + id = idr_alloc(&xshmem_idr, xp, 0, next_xshmem_id, GFP_KERNEL); + if (id < 0) { + mutex_unlock(&xshmem_mutex); + algo->xshm_pool_free(xp); + kfree(xp); + pr_err("idr alloc failed\n"); + return ERR_PTR(id); + } + } + + next_xshmem_id = (id + 1) & (XSHMEM_MAX_ID - 1); + + xp->key_len = key_len; + xp->pool_id = id; + atomic_set(&xp->refcnt, 0); + INIT_LIST_HEAD(&xp->list_head); + strncpy(xp->key, key, key_len); + xp->key[key_len] = '\0'; + mutex_init(&xp->xp_block_mutex); + spin_lock_init(&xp->xp_task_spinlock); + + hlist_add_head(&xp->hnode, bucket); + +exist: + /* + * Here increase the POOL's refcnt by one, for the convenience of fallback in + * error branch in ioctl_xshmem_pool_register(). + */ + atomic_inc(&xp->refcnt); + mutex_unlock(&xshmem_mutex); + + return xp; +} + +static struct xshm_pool *xshmem_pool_get(int pool_id) +{ + struct xshm_pool *xp; + + mutex_lock(&xshmem_mutex); + xp = idr_find(&xshmem_idr, pool_id); + if (xp) + atomic_inc(&xp->refcnt); + mutex_unlock(&xshmem_mutex); + + return xp; +} + +static void xshmem_pool_put(struct xshm_pool *xp) +{ + mutex_lock(&xshmem_mutex); + if (!atomic_dec_and_test(&xp->refcnt)) { + mutex_unlock(&xshmem_mutex); + return; + } + + hlist_del(&xp->hnode); + idr_remove(&xshmem_idr, xp->pool_id); + mutex_unlock(&xshmem_mutex); + + xp->algo->xshm_pool_free(xp); + kfree(xp); +} + +static int ioctl_xshmem_pool_register(struct xshm_task *task, unsigned long arg) +{ + int ret; + struct xshm_pool *xp; + unsigned int key_len; + char key[XSHM_KEY_SIZE]; + struct xshm_reg_arg reg_arg; + struct xshm_pool_algo *algo; + + if (copy_from_user(®_arg, (struct xshmem_reg_arg __user *)arg, sizeof(reg_arg))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + algo = xshmem_find_algo(reg_arg.algo); + if (unlikely(!algo)) { + pr_err("unsupported algorithm\n"); + return -ENODEV; + } + + key_len = reg_arg.key_len; + if (!key_len || key_len > XSHM_KEY_SIZE) + return -EINVAL; + + if (copy_from_user(key, (char __user *)reg_arg.key, key_len)) { + pr_err("copy key from user failed\n"); + return -EFAULT; + } + + xp = xshmem_pool_create(key, key_len, algo, reg_arg.pool_size); + if (IS_ERR(xp)) + return PTR_ERR(xp); + + ret = xshmem_pool_attach(task, xp); + xshmem_pool_put(xp); + + return ret < 0 ? ret : xp->pool_id; +} + +static int ioctl_xshmem_pool_unregister(struct xshm_task *task, unsigned long arg) +{ + int ret; + struct xshm_pool *xp; + + xp = xshmem_pool_get(arg); + if (!xp) { + pr_err("couldn't find the pool\n"); + return -ENODEV; + } + + ret = xshmem_pool_detach(task, xp); + + xshmem_pool_put(xp); + + return ret; +} + +static int ioctl_xshmem_pool_alloc(struct xshm_task *task, unsigned long arg) +{ + int ret; + struct xshm_pool *xp; + struct xshm_alloc_arg alloc_arg; + + if (copy_from_user(&alloc_arg, (struct xshm_alloc_arg __user*)arg, sizeof(alloc_arg))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + xp = xshmem_pool_get(alloc_arg.pool_id); + if (!xp) { + pr_err("invalid pool_id\n"); + return -EINVAL; + } + + if (!task_is_attached(task, xp)) { + xshmem_pool_put(xp); + pr_err("TASK is not attached to POOL\n"); + return -EINVAL; + } + + mutex_lock(&xp->xp_block_mutex); + ret = xp->algo->xshm_block_alloc(xp, alloc_arg.size); + mutex_unlock(&xp->xp_block_mutex); + + xshmem_pool_put(xp); + + return ret; +} + +static int ioctl_xshmem_pool_free(struct xshm_task *task, unsigned long arg) +{ + int ret; + struct xshm_pool *xp; + struct xshm_free_arg free_arg; + + if (copy_from_user(&free_arg, (struct xshm_free_arg __user*)arg, sizeof(free_arg))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + xp = xshmem_pool_get(free_arg.pool_id); + if (!xp) { + pr_err("invalid pool_id\n"); + return -EINVAL; + } + + if (!task_is_attached(task, xp)) { + xshmem_pool_put(xp); + pr_err("TASK is not attached to POOL\n"); + return -EINVAL; + } + + mutex_lock(&xp->xp_block_mutex); + ret = xp->algo->xshm_block_free(xp, free_arg.offset); + mutex_unlock(&xp->xp_block_mutex); + + xshmem_pool_put(xp); + + return ret; +} + +static long xshmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + struct xshm_task *task = file->private_data; + + mutex_lock(&task->task_mutex); + switch(cmd) { + case XSHMEM_POOL_REGISTER: + ret = ioctl_xshmem_pool_register(task, arg); + break; + case XSHMEM_POOL_UNREGISTER: + ret = ioctl_xshmem_pool_unregister(task, arg); + break; + case XSHMEM_POOL_ALLOC: + ret = ioctl_xshmem_pool_alloc(task, arg); + break; + case XSHMEM_POOL_FREE: + ret = ioctl_xshmem_pool_free(task, arg); + break; + default: + mutex_unlock(&task->task_mutex); + pr_err("unsupported command\n"); + return -EINVAL; + } + mutex_unlock(&task->task_mutex); + + return ret; +} + +static int xshmem_release(struct inode *inode, struct file *file) +{ + struct xshm_task_pool_node *node, *tmp; + struct xshm_task *task = file->private_data; + + list_for_each_entry_safe(node, tmp, &task->list_head, task_node) { + list_del(&node->task_node); + spin_lock(&node->pool->xp_task_spinlock); + list_del(&node->pool_node); + spin_unlock(&node->pool->xp_task_spinlock); + xshmem_pool_put(node->pool); + kfree(node); + } + + kfree(task); + + return 0; +} + +static struct file_operations xshmem_fops = { + .owner = THIS_MODULE, + .open = xshmem_open, + .release = xshmem_release, + .unlocked_ioctl = xshmem_ioctl, +}; + +int xshmem_register_algo(struct xshm_pool_algo *algo) +{ + struct xshm_pool_algo *tmp; + + if (!algo || !algo->xshm_pool_init || !algo->xshm_pool_free || + !algo->xshm_block_alloc || !algo->xshm_block_free) + return -EINVAL; + + list_for_each_entry(tmp, ®istered_algo, algo_node) + if (algo->num == tmp->num) { + pr_err("algorithm with the same id(%d,%s) has been registered\n", + tmp->num, tmp->name); + return -EEXIST; + } + + list_add_tail(&algo->algo_node, ®istered_algo); + + pr_info("algo: no: %d, name: %s, registered\n", algo->num, algo->name); + + return 0; +} +EXPORT_SYMBOL_GPL(xshmem_register_algo); + +static int empty_algo_pool_init(struct xshm_pool *xp) +{ + pr_info("pool_init_hook: algo:%s, pool_size: %d\n", xp->algo->name, xp->size); + return 0; +} + +static int empty_algo_pool_free(struct xshm_pool *xp) +{ + pr_info("pool_free_hook: algo:%s, pool_size: %d, pool_id:%d\n", xp->algo->name, xp->size, xp->pool_id); + return 0; +} + +static int empty_algo_block_alloc(struct xshm_pool *xp, int size) +{ + pr_info("block_alloc_hook:pool_id:%d, alloc_size:%d\n", xp->pool_id, size); + return 0; +} + +static int empty_algo_block_free(struct xshm_pool *xp, int offset) +{ + pr_info("block_free_hook:pool_id:%d, offset:%d\n", xp->pool_id, offset); + return 0; +} + +/* Use just for test */ +static struct xshm_pool_algo empty_algo = { + .num = 0, + .name = "empty_algo", + .xshm_pool_init = empty_algo_pool_init, + .xshm_pool_free = empty_algo_pool_free, + .xshm_block_alloc = empty_algo_block_alloc, + .xshm_block_free = empty_algo_block_free, +}; + +static int __init xshmem_init(void) +{ + int ret; + + xshmem_dev.minor = MISC_DYNAMIC_MINOR; + xshmem_dev.name = "xshmem_dev"; + xshmem_dev.fops = &xshmem_fops; + + ret = misc_register(&xshmem_dev); + if (ret) { + pr_err("misc_register failed, %d\n", ret); + return ret; + } + + xshmem_register_algo(&empty_algo); + + return 0; +}; +module_init(xshmem_init); + +static void __exit xshmem_exit(void) +{ + misc_deregister(&xshmem_dev); + pr_info("module exit\n"); +} +module_exit(xshmem_exit); + +MODULE_AUTHOR("Wang Wensheng wangwensheng4@huawei.com"); +MODULE_LICENSE("GPL v2"); diff --git a/lib/xshmem/xshmem_framework.h b/lib/xshmem/xshmem_framework.h new file mode 100644 index 0000000..7308ce4 --- /dev/null +++ b/lib/xshmem/xshmem_framework.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2021. All rights reserved. + * Author: Huawei OS Kernel Lab + * Create: Tue Jun 15 06:49:07 2021 + */ +#ifndef __XSHMEM_FRAMEWORK_H +#define __XSHMEM_FRAMEWORK_H + +#include <linux/types.h> +#include <linux/xshmem_framework.h> + +struct xshm_pool { + int pool_id; + int size; + atomic_t refcnt; + + /* Used to protect the list of TASK attached */ + spinlock_t xp_task_spinlock; + struct list_head list_head; + struct hlist_node hnode; + + struct xshm_pool_algo *algo; + + /* Used to serialize the alloc and free operation on the POOL */ + struct mutex xp_block_mutex; + void *private; + + int key_len; + /* MUST be the last element */ + char key[0]; +}; + +#define ALGO_NAME_MAX 20 +struct xshm_pool_algo { + int num; + char name[ALGO_NAME_MAX]; + struct list_head algo_node; + int (*xshm_pool_init)(struct xshm_pool *xp); + int (*xshm_pool_free)(struct xshm_pool *xp); + int (*xshm_block_alloc)(struct xshm_pool *xp, int size); + int (*xshm_block_free)(struct xshm_pool *xp, int offset); +}; + +int xshmem_register_algo(struct xshm_pool_algo *algo); + +#endif