From: Wang Wensheng wangwensheng4@huawei.com
The user should get the BLOCK before use and put it after. The kernel would put all the BLOCKs assocated with a TASK when the TASK exit.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- include/uapi/linux/xshmem_framework.h | 4 + lib/xshmem/xshmem_framework.c | 165 ++++++++++++++++++++++++++-------- lib/xshmem/xshmem_fsc.c | 2 +- 3 files changed, 133 insertions(+), 38 deletions(-)
diff --git a/include/uapi/linux/xshmem_framework.h b/include/uapi/linux/xshmem_framework.h index 79a9815..2ac2467 100644 --- a/include/uapi/linux/xshmem_framework.h +++ b/include/uapi/linux/xshmem_framework.h @@ -29,4 +29,8 @@ struct xshm_block_arg { #define XSHMEM_BLOCK_GET _IOW('X', 4, struct xshm_block_arg) #define XSHMEM_BLOCK_PUT _IOW('X', 5, struct xshm_block_arg)
+#define XSHMEM_ALGO_EMPTY -1 +#define XSHMEM_ALGO_BLOCK 0 +#define XSHMEM_ALGO_FSC 1 + #endif diff --git a/lib/xshmem/xshmem_framework.c b/lib/xshmem/xshmem_framework.c index 9b4b390..561f235 100644 --- a/lib/xshmem/xshmem_framework.c +++ b/lib/xshmem/xshmem_framework.c @@ -51,18 +51,27 @@ static struct hlist_head *hash_bucket(char *key, int len) return &xshmem_key_list[bucket & ((1U << XSHM_HLIST_TABLE_BIT) - 1U)]; }
+struct xshm_task_block_cnt { + struct list_head list; + struct xshm_block *blk; + int refcnt; +}; + struct xshm_task_pool_node { - struct list_head task_node; // list node in TASK - struct list_head pool_node; // list node in POOL - struct xshm_task *task; - struct xshm_pool *pool; + struct list_head task_node; // list node in TASK + struct list_head pool_node; // list node in POOL + struct xshm_task *task; + struct xshm_pool *pool; + + /* list_head for all BLOCKs' refcnt in the POOL allocated by the TASK */ + struct list_head block_head; };
struct xshm_task { - pid_t pid; - int pool_count; - struct list_head list_head; - struct mutex task_mutex; + pid_t pid; + int pool_count; + struct list_head list_head; + struct mutex task_mutex; };
static int xshmem_open(struct inode *inode, struct file *file) @@ -83,15 +92,15 @@ static int xshmem_open(struct inode *inode, struct file *file) return 0; }
-static bool task_is_attached(struct xshm_task *task, struct xshm_pool *xp) +static struct xshm_task_pool_node *find_task_pool_node(struct xshm_task *task, struct xshm_pool *xp) { struct xshm_task_pool_node *node;
list_for_each_entry(node, &task->list_head, task_node) if (node->pool == xp) - return true; + return node;
- return false; + return NULL; }
/* @@ -102,7 +111,7 @@ static int xshmem_pool_attach(struct xshm_task *task, struct xshm_pool *xp) { struct xshm_task_pool_node *node;
- if (task_is_attached(task, xp)) { + if (find_task_pool_node(task, xp)) { pr_err("TASK has been attached already\n"); return -EEXIST; } @@ -116,6 +125,9 @@ static int xshmem_pool_attach(struct xshm_task *task, struct xshm_pool *xp) node->task = task; node->pool = xp; list_add_tail(&node->task_node, &task->list_head); + INIT_LIST_HEAD(&node->block_head); + + /* Do not add node to xp->list_head until all its elements initilized */ spin_lock(&xp->xp_task_spinlock); list_add_tail(&node->pool_node, &xp->list_head); spin_unlock(&xp->xp_task_spinlock); @@ -126,6 +138,31 @@ static int xshmem_pool_attach(struct xshm_task *task, struct xshm_pool *xp) return 0; }
+static void xshmem_block_destroy(struct xshm_pool *xp, struct xshm_block *blk) +{ + xp->algo->xshm_block_free(xp, blk); // should not fail + list_del(&blk->block_node); + kfree(blk); +} + +static void xshmem_task_clear_block(struct xshm_task_pool_node *node) +{ + struct xshm_pool *xp = node->pool; + struct xshm_task_block_cnt *cnt, *tmp; + + mutex_lock(&xp->xp_block_mutex); + list_for_each_entry_safe(cnt, tmp, &node->block_head, list) { + struct xshm_block *blk = cnt->blk; + + blk->refcnt -= cnt->refcnt; + if (!blk->refcnt) + xshmem_block_destroy(xp, blk); + + kfree(cnt); + } + mutex_unlock(&xp->xp_block_mutex); +} + /* The caller should increase xp->refcnt to protect the POOL from being destroyed. */ static int xshmem_pool_detach(struct xshm_task *task, struct xshm_pool *xp) { @@ -137,6 +174,8 @@ static int xshmem_pool_detach(struct xshm_task *task, struct xshm_pool *xp) list_del(&node->pool_node); spin_unlock(&xp->xp_task_spinlock);
+ xshmem_task_clear_block(node); + list_del(&node->task_node); atomic_dec(&xp->refcnt); task->pool_count--; @@ -243,8 +282,6 @@ static struct xshm_pool *xshmem_pool_get(int pool_id)
static void xshmem_pool_put(struct xshm_pool *xp) { - struct xshm_block *blk, *tmp; - mutex_lock(&xshmem_mutex); if (!atomic_dec_and_test(&xp->refcnt)) { mutex_unlock(&xshmem_mutex); @@ -255,11 +292,7 @@ static void xshmem_pool_put(struct xshm_pool *xp) idr_remove(&xshmem_idr, xp->pool_id); mutex_unlock(&xshmem_mutex);
- list_for_each_entry_safe(blk, tmp, &xp->block_head, block_node) { - list_del(&blk->block_node); - WARN(blk->refcnt, "POOL deleted with referenced BLOCK\n"); - kfree(blk); - } + WARN(!list_empty(&xp->block_head), "POOL deleted with referenced BLOCK\n");
xp->algo->xshm_pool_free(xp); kfree(xp); @@ -336,10 +369,11 @@ static struct xshm_block *xshmem_find_block(struct xshm_pool *xp, u32 offset) return NULL; }
-static int ioctl_xshmem_block_alloc(struct xshm_pool *xp, int size) +static int ioctl_xshmem_block_alloc(struct xshm_pool *xp, struct xshm_task_pool_node *node, int size) { int ret; struct xshm_block *blk; + struct xshm_task_block_cnt *cnt;
blk = kmalloc(sizeof(*blk), GFP_KERNEL); if (unlikely(!blk)) { @@ -347,30 +381,71 @@ static int ioctl_xshmem_block_alloc(struct xshm_pool *xp, int size) return -ENOMEM; }
+ cnt = kmalloc(sizeof(*cnt), GFP_KERNEL); + if (unlikely(!cnt)) { + pr_err("alloc xshm_task_block_cnt memory failed\n"); + kfree(blk); + return -ENOMEM; + } + mutex_lock(&xp->xp_block_mutex); ret = xp->algo->xshm_block_alloc(xp, blk, size); - if (ret < 0) + if (ret < 0) { kfree(blk); - else { + kfree(cnt); + } else { blk->refcnt = 1; blk->offset = ret; list_add_tail(&blk->block_node, &xp->block_head); + + cnt->refcnt = 1; + cnt->blk = blk; + list_add_tail(&cnt->list, &node->block_head); } mutex_unlock(&xp->xp_block_mutex);
return ret; }
-static int ioctl_xshmem_block_get(struct xshm_pool *xp, u32 offset) +static struct xshm_task_block_cnt * +find_task_block_cnt(struct xshm_task_pool_node *node, struct xshm_block *blk) +{ + struct xshm_task_block_cnt *cnt; + + list_for_each_entry(cnt, &node->block_head, list) + if (cnt->blk == blk) + return cnt; + + return NULL; +} + +static int ioctl_xshmem_block_get(struct xshm_pool *xp, struct xshm_task_pool_node *node, u32 offset) { int ret = 0; struct xshm_block *blk;
mutex_lock(&xp->xp_block_mutex); blk = xshmem_find_block(xp, offset); - if (blk) + if (blk) { + struct xshm_task_block_cnt *cnt; + + cnt = find_task_block_cnt(node, blk); + if (!cnt) { + cnt = kmalloc(sizeof(*cnt), GFP_KERNEL); + if (!cnt) { + mutex_unlock(&xp->xp_block_mutex); + pr_err("get:alloc xshm_task_block_cnt memory failed\n"); + return -ENOMEM; + } + + cnt->refcnt = 1; + cnt->blk = blk; + list_add_tail(&cnt->list, &node->block_head); + } else + cnt->refcnt++; + blk->refcnt++; - else { + } else { pr_err("get unalloced block\n"); ret = -ENODEV; } @@ -379,7 +454,7 @@ static int ioctl_xshmem_block_get(struct xshm_pool *xp, u32 offset) return ret; }
-static int ioctl_xshmem_block_put(struct xshm_pool *xp, u32 offset) +static int ioctl_xshmem_block_put(struct xshm_pool *xp, struct xshm_task_pool_node *node, u32 offset) { int ret = 0; struct xshm_block *blk; @@ -387,12 +462,24 @@ static int ioctl_xshmem_block_put(struct xshm_pool *xp, u32 offset) mutex_lock(&xp->xp_block_mutex); blk = xshmem_find_block(xp, offset); if (blk) { - blk->refcnt--; - if (!blk->refcnt) { - ret = xp->algo->xshm_block_free(xp, blk); // should not fail - list_del(&blk->block_node); - kfree(blk); + struct xshm_task_block_cnt *cnt; + + cnt = find_task_block_cnt(node, blk); + if (!cnt) { + mutex_unlock(&xp->xp_block_mutex); + pr_err("the TASK cannot put the block\n"); + return -EPERM; } + + cnt->refcnt--; + if (!cnt->refcnt) { + list_del(&cnt->list); + kfree(cnt); + } + + blk->refcnt--; + if (!blk->refcnt) + xshmem_block_destroy(xp, blk); } else { pr_err("free unalloced block\n"); ret = -ENODEV; @@ -407,6 +494,7 @@ static int ioctl_xshmem_block_common(struct xshm_task *task, unsigned int cmd, u int ret; struct xshm_pool *xp; struct xshm_block_arg block_arg; + struct xshm_task_pool_node *node;
if (copy_from_user(&block_arg, (struct xshm_free_arg __user*)arg, sizeof(block_arg))) { pr_err("copy_from_user failed\n"); @@ -419,7 +507,7 @@ static int ioctl_xshmem_block_common(struct xshm_task *task, unsigned int cmd, u return -EINVAL; }
- if (!task_is_attached(task, xp)) { + if (!(node = find_task_pool_node(task, xp))) { xshmem_pool_put(xp); pr_err("TASK is not attached to POOL\n"); return -EINVAL; @@ -427,13 +515,13 @@ static int ioctl_xshmem_block_common(struct xshm_task *task, unsigned int cmd, u
switch(cmd) { case XSHMEM_BLOCK_ALLOC: - ret = ioctl_xshmem_block_alloc(xp, block_arg.size); + ret = ioctl_xshmem_block_alloc(xp, node, block_arg.size); break; case XSHMEM_BLOCK_PUT: - ret = ioctl_xshmem_block_put(xp, block_arg.offset); + ret = ioctl_xshmem_block_put(xp, node, block_arg.offset); break; case XSHMEM_BLOCK_GET: - ret = ioctl_xshmem_block_get(xp, block_arg.offset); + ret = ioctl_xshmem_block_get(xp, node, block_arg.offset); break; }
@@ -476,10 +564,13 @@ static int xshmem_release(struct inode *inode, struct file *file) struct xshm_task *task = file->private_data;
list_for_each_entry_safe(node, tmp, &task->list_head, task_node) { - list_del(&node->task_node); spin_lock(&node->pool->xp_task_spinlock); list_del(&node->pool_node); spin_unlock(&node->pool->xp_task_spinlock); + + xshmem_task_clear_block(node); + + list_del(&node->task_node); xshmem_pool_put(node->pool); kfree(node); } @@ -545,7 +636,7 @@ static int empty_algo_block_free(struct xshm_pool *xp, struct xshm_block *blk)
/* Use just for test */ static struct xshm_pool_algo empty_algo = { - .num = 0, + .num = XSHMEM_ALGO_EMPTY, .name = "empty_algo", .xshm_pool_init = empty_algo_pool_init, .xshm_pool_free = empty_algo_pool_free, diff --git a/lib/xshmem/xshmem_fsc.c b/lib/xshmem/xshmem_fsc.c index 87d7533..2b18a25 100644 --- a/lib/xshmem/xshmem_fsc.c +++ b/lib/xshmem/xshmem_fsc.c @@ -252,7 +252,7 @@ static int fsc_algo_block_free(struct xshm_pool *xp, struct xshm_block *blk) }
struct xshm_pool_algo fsc_algo = { - .num = 1, + .num = XSHMEM_ALGO_FSC, .name = "fsc_algo", .xshm_pool_init = fsc_algo_pool_init, .xshm_pool_free = fsc_algo_pool_free,