From: Wang Wensheng wangwensheng4@huawei.com
Use get and put operation to change the refcnt and the BLOCK would be delete automatically when the refcnt reach zero.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- include/uapi/linux/xshmem_framework.h | 17 ++-- lib/xshmem/xshmem_framework.c | 152 +++++++++++++++++++++++++--------- lib/xshmem/xshmem_framework.h | 14 +++- lib/xshmem/xshmem_fsc.c | 26 +++--- 4 files changed, 141 insertions(+), 68 deletions(-)
diff --git a/include/uapi/linux/xshmem_framework.h b/include/uapi/linux/xshmem_framework.h index caf782b..79a9815 100644 --- a/include/uapi/linux/xshmem_framework.h +++ b/include/uapi/linux/xshmem_framework.h @@ -15,19 +15,18 @@ struct xshm_reg_arg { char *key; };
-struct xshm_alloc_arg { +struct xshm_block_arg { int pool_id; - int size; -}; - -struct xshm_free_arg { - int pool_id; - unsigned int offset; + union { + int size; // for alloc + unsigned int offset; // for get and put + }; };
#define XSHMEM_POOL_REGISTER _IOW('X', 1, struct xshm_reg_arg) #define XSHMEM_POOL_UNREGISTER _IO ('X', 2) -#define XSHMEM_POOL_ALLOC _IOW('X', 3, struct xshm_alloc_arg) -#define XSHMEM_POOL_FREE _IOW('X', 4, struct xshm_free_arg) +#define XSHMEM_BLOCK_ALLOC _IOW('X', 3, struct xshm_block_arg) +#define XSHMEM_BLOCK_GET _IOW('X', 4, struct xshm_block_arg) +#define XSHMEM_BLOCK_PUT _IOW('X', 5, struct xshm_block_arg)
#endif diff --git a/lib/xshmem/xshmem_framework.c b/lib/xshmem/xshmem_framework.c index f6e0772..9b4b390 100644 --- a/lib/xshmem/xshmem_framework.c +++ b/lib/xshmem/xshmem_framework.c @@ -94,7 +94,10 @@ static bool task_is_attached(struct xshm_task *task, struct xshm_pool *xp) return false; }
-/* the refcnt of the xp should be increased by one on success */ +/* + * The refcnt of the xp should be increased by one on success. + * We don't require the xshmem_mutex here since the xp->refcnt has been increased already. + */ static int xshmem_pool_attach(struct xshm_task *task, struct xshm_pool *xp) { struct xshm_task_pool_node *node; @@ -123,6 +126,7 @@ static int xshmem_pool_attach(struct xshm_task *task, struct xshm_pool *xp) return 0; }
+/* The caller should increase xp->refcnt to protect the POOL from being destroyed. */ static int xshmem_pool_detach(struct xshm_task *task, struct xshm_pool *xp) { struct xshm_task_pool_node *node; @@ -134,7 +138,6 @@ static int xshmem_pool_detach(struct xshm_task *task, struct xshm_pool *xp) spin_unlock(&xp->xp_task_spinlock);
list_del(&node->task_node); - /* Use xshmem_pool_put() ?? */ atomic_dec(&xp->refcnt); task->pool_count--; kfree(node); @@ -199,21 +202,25 @@ static struct xshm_pool *xshmem_pool_create(char *key, unsigned int key_len,
next_xshmem_id = (id + 1) & (XSHMEM_MAX_ID - 1);
- xp->key_len = key_len; xp->pool_id = id; atomic_set(&xp->refcnt, 0); + spin_lock_init(&xp->xp_task_spinlock); INIT_LIST_HEAD(&xp->list_head); + + mutex_init(&xp->xp_block_mutex); + INIT_LIST_HEAD(&xp->block_head); + + xp->key_len = key_len; strncpy(xp->key, key, key_len); xp->key[key_len] = '\0'; - mutex_init(&xp->xp_block_mutex); - spin_lock_init(&xp->xp_task_spinlock);
hlist_add_head(&xp->hnode, bucket);
exist: /* - * Here increase the POOL's refcnt by one, for the convenience of fallback in - * error branch in ioctl_xshmem_pool_register(). + * Here increase the POOL's refcnt by one, one for the convenience of fallback in + * error branch in ioctl_xshmem_pool_register(), another to promise that the POOL + * cannot be delete in the later attach routine. */ atomic_inc(&xp->refcnt); mutex_unlock(&xshmem_mutex); @@ -236,6 +243,8 @@ static struct xshm_pool *xshmem_pool_get(int pool_id)
static void xshmem_pool_put(struct xshm_pool *xp) { + struct xshm_block *blk, *tmp; + mutex_lock(&xshmem_mutex); if (!atomic_dec_and_test(&xp->refcnt)) { mutex_unlock(&xshmem_mutex); @@ -246,6 +255,12 @@ static void xshmem_pool_put(struct xshm_pool *xp) idr_remove(&xshmem_idr, xp->pool_id); mutex_unlock(&xshmem_mutex);
+ list_for_each_entry_safe(blk, tmp, &xp->block_head, block_node) { + list_del(&blk->block_node); + WARN(blk->refcnt, "POOL deleted with referenced BLOCK\n"); + kfree(blk); + } + xp->algo->xshm_pool_free(xp); kfree(xp); } @@ -260,7 +275,7 @@ static int ioctl_xshmem_pool_register(struct xshm_task *task, unsigned long arg) struct xshm_pool_algo *algo;
if (copy_from_user(®_arg, (struct xshmem_reg_arg __user *)arg, sizeof(reg_arg))) { - pr_err("copy_from_user failed\n"); + pr_err("register: copy_from_user failed\n"); return -EFAULT; }
@@ -271,8 +286,10 @@ static int ioctl_xshmem_pool_register(struct xshm_task *task, unsigned long arg) }
key_len = reg_arg.key_len; - if (!key_len || key_len > XSHM_KEY_SIZE) + if (!key_len || key_len > XSHM_KEY_SIZE) { + pr_err("key_len invalid\n"); return -EINVAL; + }
if (copy_from_user(key, (char __user *)reg_arg.key, key_len)) { pr_err("copy key from user failed\n"); @@ -307,50 +324,96 @@ static int ioctl_xshmem_pool_unregister(struct xshm_task *task, unsigned long ar return ret; }
-static int ioctl_xshmem_pool_alloc(struct xshm_task *task, unsigned long arg) +/* The caller must hold xp->xp_block_mutex */ +static struct xshm_block *xshmem_find_block(struct xshm_pool *xp, u32 offset) +{ + struct xshm_block *blk; + + list_for_each_entry(blk, &xp->block_head, block_node) + if (blk->offset == offset) + return blk; + + return NULL; +} + +static int ioctl_xshmem_block_alloc(struct xshm_pool *xp, int size) { int ret; - struct xshm_pool *xp; - struct xshm_alloc_arg alloc_arg; + struct xshm_block *blk;
- if (copy_from_user(&alloc_arg, (struct xshm_alloc_arg __user*)arg, sizeof(alloc_arg))) { - pr_err("copy_from_user failed\n"); - return -EFAULT; + blk = kmalloc(sizeof(*blk), GFP_KERNEL); + if (unlikely(!blk)) { + pr_err("alloc xshm_block memory failed\n"); + return -ENOMEM; }
- xp = xshmem_pool_get(alloc_arg.pool_id); - if (!xp) { - pr_err("invalid pool_id\n"); - return -EINVAL; + mutex_lock(&xp->xp_block_mutex); + ret = xp->algo->xshm_block_alloc(xp, blk, size); + if (ret < 0) + kfree(blk); + else { + blk->refcnt = 1; + blk->offset = ret; + list_add_tail(&blk->block_node, &xp->block_head); } + mutex_unlock(&xp->xp_block_mutex);
- if (!task_is_attached(task, xp)) { - xshmem_pool_put(xp); - pr_err("TASK is not attached to POOL\n"); - return -EINVAL; - } + return ret; +} + +static int ioctl_xshmem_block_get(struct xshm_pool *xp, u32 offset) +{ + int ret = 0; + struct xshm_block *blk;
mutex_lock(&xp->xp_block_mutex); - ret = xp->algo->xshm_block_alloc(xp, alloc_arg.size); + blk = xshmem_find_block(xp, offset); + if (blk) + blk->refcnt++; + else { + pr_err("get unalloced block\n"); + ret = -ENODEV; + } mutex_unlock(&xp->xp_block_mutex);
- xshmem_pool_put(xp); + return ret; +} + +static int ioctl_xshmem_block_put(struct xshm_pool *xp, u32 offset) +{ + int ret = 0; + struct xshm_block *blk; + + mutex_lock(&xp->xp_block_mutex); + blk = xshmem_find_block(xp, offset); + if (blk) { + blk->refcnt--; + if (!blk->refcnt) { + ret = xp->algo->xshm_block_free(xp, blk); // should not fail + list_del(&blk->block_node); + kfree(blk); + } + } else { + pr_err("free unalloced block\n"); + ret = -ENODEV; + } + mutex_unlock(&xp->xp_block_mutex);
return ret; }
-static int ioctl_xshmem_pool_free(struct xshm_task *task, unsigned long arg) +static int ioctl_xshmem_block_common(struct xshm_task *task, unsigned int cmd, unsigned long arg) { int ret; struct xshm_pool *xp; - struct xshm_free_arg free_arg; + struct xshm_block_arg block_arg;
- if (copy_from_user(&free_arg, (struct xshm_free_arg __user*)arg, sizeof(free_arg))) { + if (copy_from_user(&block_arg, (struct xshm_free_arg __user*)arg, sizeof(block_arg))) { pr_err("copy_from_user failed\n"); return -EFAULT; }
- xp = xshmem_pool_get(free_arg.pool_id); + xp = xshmem_pool_get(block_arg.pool_id); if (!xp) { pr_err("invalid pool_id\n"); return -EINVAL; @@ -362,9 +425,17 @@ static int ioctl_xshmem_pool_free(struct xshm_task *task, unsigned long arg) return -EINVAL; }
- mutex_lock(&xp->xp_block_mutex); - ret = xp->algo->xshm_block_free(xp, free_arg.offset); - mutex_unlock(&xp->xp_block_mutex); + switch(cmd) { + case XSHMEM_BLOCK_ALLOC: + ret = ioctl_xshmem_block_alloc(xp, block_arg.size); + break; + case XSHMEM_BLOCK_PUT: + ret = ioctl_xshmem_block_put(xp, block_arg.offset); + break; + case XSHMEM_BLOCK_GET: + ret = ioctl_xshmem_block_get(xp, block_arg.offset); + break; + }
xshmem_pool_put(xp);
@@ -384,11 +455,10 @@ static long xshmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case XSHMEM_POOL_UNREGISTER: ret = ioctl_xshmem_pool_unregister(task, arg); break; - case XSHMEM_POOL_ALLOC: - ret = ioctl_xshmem_pool_alloc(task, arg); - break; - case XSHMEM_POOL_FREE: - ret = ioctl_xshmem_pool_free(task, arg); + case XSHMEM_BLOCK_ALLOC: + case XSHMEM_BLOCK_PUT: + case XSHMEM_BLOCK_GET: + ret = ioctl_xshmem_block_common(task, cmd, arg); break; default: mutex_unlock(&task->task_mutex); @@ -461,15 +531,15 @@ static int empty_algo_pool_free(struct xshm_pool *xp) return 0; }
-static int empty_algo_block_alloc(struct xshm_pool *xp, u32 size) +static int empty_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk, u32 size) { pr_info("block_alloc_hook:pool_id:%d, alloc_size:%d\n", xp->pool_id, size); return 0; }
-static int empty_algo_block_free(struct xshm_pool *xp, u32 offset) +static int empty_algo_block_free(struct xshm_pool *xp, struct xshm_block *blk) { - pr_info("block_free_hook:pool_id:%d, offset:%d\n", xp->pool_id, offset); + pr_info("block_free_hook:pool_id:%d, offset:%d\n", xp->pool_id, blk->offset); return 0; }
diff --git a/lib/xshmem/xshmem_framework.h b/lib/xshmem/xshmem_framework.h index c781e95..317547f 100644 --- a/lib/xshmem/xshmem_framework.h +++ b/lib/xshmem/xshmem_framework.h @@ -23,6 +23,7 @@ struct xshm_pool {
/* Used to serialize the alloc and free operation on the POOL */ struct mutex xp_block_mutex; + struct list_head block_head; /* for all alloced block */ void *private;
int key_len; @@ -30,6 +31,15 @@ struct xshm_pool { char key[0]; };
+struct xshm_block { + int refcnt; + u32 offset; /* the size of the block is not cared in the + framework, the specified algorithm should + manage it properly */ + struct list_head block_node; + void *private; +}; + #define ALGO_NAME_MAX 20 struct xshm_pool_algo { int num; @@ -37,8 +47,8 @@ struct xshm_pool_algo { struct list_head algo_node; int (*xshm_pool_init)(struct xshm_pool *xp); int (*xshm_pool_free)(struct xshm_pool *xp); - int (*xshm_block_alloc)(struct xshm_pool *xp, u32 size); - int (*xshm_block_free)(struct xshm_pool *xp, u32 offset); + int (*xshm_block_alloc)(struct xshm_pool *xp, struct xshm_block *blk, u32 size); + int (*xshm_block_free)(struct xshm_pool *xp, struct xshm_block *blk); };
int xshmem_register_algo(struct xshm_pool_algo *algo); diff --git a/lib/xshmem/xshmem_fsc.c b/lib/xshmem/xshmem_fsc.c index bb01926..87d7533 100644 --- a/lib/xshmem/xshmem_fsc.c +++ b/lib/xshmem/xshmem_fsc.c @@ -166,8 +166,9 @@ static int split_new_block(struct fsc_ctrl *ctrl, struct fsc_block *block, u32 s } }
-static int fsc_algo_block_alloc(struct xshm_pool *xp, u32 size) +static int fsc_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk, u32 size) { + int ret; u32 aligned_size; struct fsc_block *block; struct fsc_ctrl *ctrl = xp->private; @@ -187,7 +188,12 @@ static int fsc_algo_block_alloc(struct xshm_pool *xp, u32 size) if (!block) return -ENOSPC;
- return split_new_block(ctrl, block, aligned_size); + ret = split_new_block(ctrl, block, aligned_size); + if (ret >= 0) + /* split success */ + blk->private = block; + + return ret; }
static void check_and_combine_next_block(struct fsc_ctrl *ctrl, struct fsc_block *block) @@ -227,22 +233,10 @@ static void check_and_combine_prev_block(struct fsc_ctrl *ctrl, struct fsc_block kfree(prev); }
-static int fsc_algo_block_free(struct xshm_pool *xp, u32 offset) +static int fsc_algo_block_free(struct xshm_pool *xp, struct xshm_block *blk) { - bool found = false; - struct fsc_block *block; struct fsc_ctrl *ctrl = xp->private; - - list_for_each_entry(block, &ctrl->block_head, block_node) - if (block->start == offset) { - found = true; - break; - } - - if (!found) { - pr_err("the block is not alloced\n"); - return -EINVAL; - } + struct fsc_block *block = blk->private;
if (block->is_free) { pr_err("free unalloced block\n");