From: Wang Wensheng wangwensheng4@huawei.com
We split the free range to a series of BLOCKs with same size and use bitmap to mark whether it's free.
When allocating, we return the first free BLOCK and change its free-state if the alloc size is smaller than the BLOCK size.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- include/uapi/linux/xshmem_framework.h | 1 + lib/xshmem/Makefile | 2 +- lib/xshmem/xshmem_blk.c | 115 ++++++++++++++++++++++++++++++++++ lib/xshmem/xshmem_framework.c | 2 + 4 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 lib/xshmem/xshmem_blk.c
diff --git a/include/uapi/linux/xshmem_framework.h b/include/uapi/linux/xshmem_framework.h index 08ac069..ffb3546 100644 --- a/include/uapi/linux/xshmem_framework.h +++ b/include/uapi/linux/xshmem_framework.h @@ -11,6 +11,7 @@ struct xshm_reg_arg { int algo; unsigned int pool_size; + unsigned int block_size; /* used for blk algorithm */ unsigned int key_len; char *key; }; diff --git a/lib/xshmem/Makefile b/lib/xshmem/Makefile index e879f82..b7cf800 100644 --- a/lib/xshmem/Makefile +++ b/lib/xshmem/Makefile @@ -1,2 +1,2 @@ obj-m+=xshmem.o -xshmem-objs+=xshmem_fsc.o xshmem_framework.o +xshmem-objs+=xshmem_fsc.o xshmem_framework.o xshmem_blk.o diff --git a/lib/xshmem/xshmem_blk.c b/lib/xshmem/xshmem_blk.c new file mode 100644 index 0000000..a3d93a3 --- /dev/null +++ b/lib/xshmem/xshmem_blk.c @@ -0,0 +1,115 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2021. All rights reserved. + * Author: Huawei OS Kernel Lab + * Create: Tue Jun 29 02:15:10 2021 + */ +#define pr_fmt(fmt) "XSHMEM_FSC: " fmt + +#include <linux/types.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include "xshmem_framework.h" + +#define XSHMEM_BLK_ALIGN 16 + +struct blk_ctrl { + int pool_size; + int block_size; + + int total_block; + int next_free; + unsigned long map[0]; +}; + +static bool blk_algo_pool_same(struct xshm_pool *xp, struct xshm_reg_arg *arg) +{ + struct blk_ctrl *ctrl = xp->private; + + return ctrl->pool_size == arg->pool_size && ctrl->block_size == arg->block_size; +} + +static int blk_algo_pool_init(struct xshm_pool *xp, struct xshm_reg_arg *arg) +{ + int block_cnt; + struct blk_ctrl *ctrl; + + if (!IS_ALIGNED(arg->pool_size, XSHMEM_BLK_ALIGN) || + !IS_ALIGNED(arg->block_size, XSHMEM_BLK_ALIGN) || + arg->pool_size < arg->block_size || + !arg->pool_size) + return -EINVAL; + + block_cnt = arg->pool_size / arg->block_size; + /* should we use vmalloc here? */ + ctrl = kzalloc(sizeof(*ctrl) + sizeof(long) * BITS_TO_LONGS(block_cnt), GFP_KERNEL); + if (!ctrl) { + pr_err("alloc memory for blk_ctrl failed\n"); + return -ENOMEM; + } + + ctrl->pool_size = arg->pool_size; + ctrl->block_size = arg->block_size; + ctrl->total_block = block_cnt; + ctrl->next_free = 0; + + xp->private = ctrl; + + return 0; +} + +static int blk_algo_pool_free(struct xshm_pool *xp) +{ + kfree(xp->private); + return 0; +} + +static int blk_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk, u32 size) +{ + int idx; + struct blk_ctrl *ctrl = xp->private; + + if (size > ctrl->block_size) { + pr_err("the alloc size too big\n"); + return -EINVAL; + } + + idx = find_next_zero_bit(ctrl->map, ctrl->total_block, ctrl->next_free); + if (idx == ctrl->total_block) { + idx = find_next_zero_bit(ctrl->map, ctrl->next_free, 0); + if (idx == ctrl->next_free) + idx = -ENOSPC; + } + + if (idx < 0) { + pr_err("no free BLOCK left in the POOL\n"); + return idx; + } + + set_bit(idx, ctrl->map); + ctrl->next_free = (idx + 1) % ctrl->total_block; + + blk->private = (void *)(long)idx; + + return idx * ctrl->block_size; +} + +static int blk_algo_block_free(struct xshm_pool *xp, struct xshm_block *blk) +{ + int idx = (long)blk->private; + struct blk_ctrl *ctrl = xp->private; + + clear_bit(idx, ctrl->map); + return 0; +} + +struct xshm_pool_algo blk_algo = { + .num = XSHMEM_ALGO_BLOCK, + .name = "blk_algo", + .xshm_pool_same = blk_algo_pool_same, + .xshm_pool_init = blk_algo_pool_init, + .xshm_pool_free = blk_algo_pool_free, + .xshm_block_alloc = blk_algo_block_alloc, + .xshm_block_free = blk_algo_block_free, +}; diff --git a/lib/xshmem/xshmem_framework.c b/lib/xshmem/xshmem_framework.c index 61fe44a..74bce3b 100644 --- a/lib/xshmem/xshmem_framework.c +++ b/lib/xshmem/xshmem_framework.c @@ -684,6 +684,7 @@ static struct xshm_pool_algo empty_algo = { };
extern struct xshm_pool_algo fsc_algo; +extern struct xshm_pool_algo blk_algo;
static int __init xshmem_init(void) { @@ -700,6 +701,7 @@ static int __init xshmem_init(void) }
xshmem_register_algo(&fsc_algo); + xshmem_register_algo(&blk_algo); xshmem_register_algo(&empty_algo);
return 0;