From: Wang Wensheng wangwensheng4@huawei.com
All allocated BLOCKs should be contained in the POOL and we find the BLOCK first on put/get/delete operations. Use rbtree as the container would be more efficient.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- lib/xshmem/xshmem_framework.c | 57 +++++++++++++++++++++++++++++++++++-------- lib/xshmem/xshmem_framework.h | 5 ++-- 2 files changed, 50 insertions(+), 12 deletions(-)
diff --git a/lib/xshmem/xshmem_framework.c b/lib/xshmem/xshmem_framework.c index 392abaa..cf8f91e 100644 --- a/lib/xshmem/xshmem_framework.c +++ b/lib/xshmem/xshmem_framework.c @@ -194,7 +194,7 @@ static int xshmem_pool_attach(struct xshm_task *task, struct xshm_pool *xp) static void xshmem_block_destroy(struct xshm_pool *xp, struct xshm_block *blk) { xp->algo->xshm_block_free(xp, blk); // should not fail - list_del(&blk->block_node); + rb_erase(&blk->rb_node, &xp->block_root); kfree(blk); }
@@ -293,7 +293,7 @@ static struct xshm_pool *xshmem_pool_create(struct xshm_pool_algo *algo, struct INIT_LIST_HEAD(&xp->list_head);
mutex_init(&xp->xp_block_mutex); - INIT_LIST_HEAD(&xp->block_head); + xp->block_root.rb_node = NULL;
xp->key_len = key_len; strncpy(xp->key, key, key_len); @@ -366,7 +366,7 @@ static int xshmem_pool_put(struct xshm_pool *xp) idr_remove(&xshmem_idr, xp->pool_id); mutex_unlock(&xshmem_mutex);
- WARN(!list_empty(&xp->block_head), "POOL deleted with referenced BLOCK\n"); + WARN(!RB_EMPTY_ROOT(&xp->block_root), "POOL deleted with referenced BLOCK\n");
xp->algo->xshm_pool_free(xp); kfree(xp); @@ -437,15 +437,47 @@ static int ioctl_xshmem_pool_unregister(struct xshm_task *task, unsigned long ar /* The caller must hold xp->xp_block_mutex */ static struct xshm_block *xshmem_find_block(struct xshm_pool *xp, u32 offset) { - struct xshm_block *blk; + struct rb_node *rb_node = xp->block_root.rb_node; + + while (rb_node) { + struct xshm_block *blk = rb_entry(rb_node, struct xshm_block, rb_node);
- list_for_each_entry(blk, &xp->block_head, block_node) - if (blk->offset == offset) + if (offset > blk->offset) + rb_node = rb_node->rb_right; + else if (offset < blk->offset) + rb_node = rb_node->rb_left; + else return blk; + }
return NULL; }
+static void xshmem_add_block(struct xshm_pool *xp, struct xshm_block *blk) +{ + struct rb_node *parent = NULL; + struct rb_node **link = &xp->block_root.rb_node; + + while (*link) { + struct xshm_block *tmp = NULL; + + parent = *link; + tmp = rb_entry(*link, struct xshm_block, rb_node); + + if (blk->offset > tmp->offset) + link = &parent->rb_right; + else if (blk->offset < tmp->offset) + link = &parent->rb_left; + else { + WARN(1, "alloc one block more than once, the POOL's algo got broken\n"); + return; + } + } + + rb_link_node(&blk->rb_node, parent, link); + rb_insert_color(&blk->rb_node, &xp->block_root); +} + static int ioctl_xshmem_block_alloc(struct xshm_pool *xp, struct xshm_task_pool_node *node, struct xshm_block_arg *arg) { @@ -476,7 +508,7 @@ static int ioctl_xshmem_block_alloc(struct xshm_pool *xp, struct xshm_task_pool_ blk->refcnt = 1; arg->offset = blk->offset; INIT_LIST_HEAD(&blk->list_head); - list_add_tail(&blk->block_node, &xp->block_head); + xshmem_add_block(xp, blk);
task_link_blk(node, blk, cnt); } @@ -784,16 +816,18 @@ int xshmem_register_algo(struct xshm_pool_algo *algo) } EXPORT_SYMBOL_GPL(xshmem_register_algo);
+#define EMPTY_ALGO_OFFSET_SHIFT 32 +#define EMPTY_ALGO_POOL_SIZE_MASK ((1UL << 32) - 1) static bool empty_algo_pool_same(struct xshm_pool *xp, struct xshm_reg_arg *arg) { pr_info("pool_same_hook: algo:%s, pool_size: %ld\n", xp->algo->name, arg->pool_size); - return (unsigned long)xp->private == arg->pool_size; + return ((unsigned long)xp->private & EMPTY_ALGO_POOL_SIZE_MASK) == arg->pool_size; }
static int empty_algo_pool_init(struct xshm_pool *xp, struct xshm_reg_arg *arg) { pr_info("pool_init_hook: algo:%s, pool_size: %ld\n", xp->algo->name, arg->pool_size); - xp->private = (void *)arg->pool_size; + xp->private = (void *)(arg->pool_size & EMPTY_ALGO_POOL_SIZE_MASK); return 0; }
@@ -808,7 +842,10 @@ static int empty_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk) { pr_info("block_alloc_hook:pool_id:%d, alloc_size:%lu\n", xp->pool_id, blk->alloc_size); blk->real_size = blk->alloc_size; - blk->offset = 0; + blk->offset = (unsigned long)xp->private >> EMPTY_ALGO_OFFSET_SHIFT; + xp->private = (void *)(((unsigned long)xp->private & EMPTY_ALGO_POOL_SIZE_MASK) | + ((((unsigned long)xp->private >> EMPTY_ALGO_OFFSET_SHIFT) + 1)) + << EMPTY_ALGO_OFFSET_SHIFT); return 0; }
diff --git a/lib/xshmem/xshmem_framework.h b/lib/xshmem/xshmem_framework.h index 6234047..7498b8e 100644 --- a/lib/xshmem/xshmem_framework.h +++ b/lib/xshmem/xshmem_framework.h @@ -8,6 +8,7 @@
#include <linux/types.h> #include <linux/xshmem_framework.h> +#include <linux/rbtree.h> #include <linux/seq_file.h>
struct xshm_pool { @@ -23,7 +24,7 @@ struct xshm_pool {
/* Used to serialize the alloc and free operation on the POOL */ struct mutex xp_block_mutex; - struct list_head block_head; /* for all alloced block */ + struct rb_root block_root; /* for all alloced block */ void *private;
int key_len; @@ -37,7 +38,7 @@ struct xshm_block { unsigned long alloc_size; unsigned long real_size;
- struct list_head block_node; + struct rb_node rb_node; void *private;
struct list_head list_head;