From: Wang Wensheng wangwensheng4@huawei.com
We allow a TASK to register the same POOL more than once. The count is saved in kernel and would be decreased by one when the user unregister the POOL. When that count goes into zero, we actually put the POOL.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- lib/xshmem/xshmem_framework.c | 81 ++++++++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 36 deletions(-)
diff --git a/lib/xshmem/xshmem_framework.c b/lib/xshmem/xshmem_framework.c index 0d4e819..9966b48 100644 --- a/lib/xshmem/xshmem_framework.c +++ b/lib/xshmem/xshmem_framework.c @@ -62,8 +62,9 @@ struct xshm_task_block_cnt { };
struct xshm_task_pool_node { - struct list_head task_node; // list node in TASK - struct list_head pool_node; // list node in POOL + int regcnt; /* register count of the TASK registering to the POOL */ + struct list_head task_node; /* list node in TASK */ + struct list_head pool_node; /* list node in POOL */ struct xshm_task *task; struct xshm_pool *pool;
@@ -160,31 +161,32 @@ static int xshmem_pool_attach(struct xshm_task *task, struct xshm_pool *xp) { struct xshm_task_pool_node *node;
- if (find_task_pool_node(task, xp)) { - pr_err("TASK has been attached already\n"); - return -EEXIST; - } + node = find_task_pool_node(task, xp);
- node = kmalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(!node)) { - pr_err("alloc xshm_task_pool_node failed\n"); - return -ENOMEM; - } + if (!node) { + node = kmalloc(sizeof(*node), GFP_KERNEL); + if (unlikely(!node)) { + pr_err("alloc xshm_task_pool_node failed\n"); + return -ENOMEM; + }
- node->task = task; - node->pool = xp; - atomic64_set(&node->alloc_size, 0); - atomic64_set(&node->real_alloc_size, 0); - list_add_tail(&node->task_node, &task->list_head); - INIT_LIST_HEAD(&node->list_head); + node->regcnt = 1; + node->task = task; + node->pool = xp; + atomic64_set(&node->alloc_size, 0); + atomic64_set(&node->real_alloc_size, 0); + list_add_tail(&node->task_node, &task->list_head); + INIT_LIST_HEAD(&node->list_head);
- /* Do not add node to xp->list_head until all its elements initilized */ - spin_lock(&xp->xp_task_spinlock); - list_add_tail(&node->pool_node, &xp->list_head); - spin_unlock(&xp->xp_task_spinlock); + /* Do not add node to xp->list_head until all its elements initilized */ + spin_lock(&xp->xp_task_spinlock); + list_add_tail(&node->pool_node, &xp->list_head); + spin_unlock(&xp->xp_task_spinlock);
- atomic_inc(&xp->refcnt); - task->pool_count++; + atomic_inc(&xp->refcnt); + task->pool_count++; + } else + node->regcnt++;
return 0; } @@ -220,20 +222,24 @@ static int xshmem_pool_detach(struct xshm_task *task, struct xshm_pool *xp) { struct xshm_task_pool_node *node;
- list_for_each_entry(node, &task->list_head, task_node) - if (node->pool == xp) { - spin_lock(&xp->xp_task_spinlock); - list_del(&node->pool_node); - spin_unlock(&xp->xp_task_spinlock); + node = find_task_pool_node(task, xp); + if (node) { + node->regcnt--; + if (node->regcnt) + return 0;
- xshmem_task_clear_block(node); + spin_lock(&xp->xp_task_spinlock); + list_del(&node->pool_node); + spin_unlock(&xp->xp_task_spinlock);
- list_del(&node->task_node); - atomic_dec(&xp->refcnt); - task->pool_count--; - kfree(node); - return 0; - } + xshmem_task_clear_block(node); + + list_del(&node->task_node); + atomic_dec(&xp->refcnt); + task->pool_count--; + kfree(node); + return 0; + }
pr_err("the POOL has already been detached\n"); return -ESRCH; @@ -639,14 +645,17 @@ static int xshmem_release(struct inode *inode, struct file *file) mutex_unlock(&task_mutex);
list_for_each_entry_safe(node, tmp, &task->list_head, task_node) { + if (node->regcnt > 1) + pr_warn("Not all user unregister the POOL, id:%d\n", node->pool->pool_id); + spin_lock(&node->pool->xp_task_spinlock); list_del(&node->pool_node); spin_unlock(&node->pool->xp_task_spinlock);
xshmem_task_clear_block(node); - list_del(&node->task_node); xshmem_pool_put(node->pool); + kfree(node); }