From: Wang Wensheng wangwensheng4@huawei.com
FSC algorithms orgainzes all free blocks in servial list depend on the size of the free blocks.
On allocation, we take the first block from the freelist whose size is bigger than the alloc size and split it.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- lib/xshmem/Makefile | 3 +- lib/xshmem/xshmem_framework.c | 7 +- lib/xshmem/xshmem_framework.h | 4 +- lib/xshmem/xshmem_fsc.c | 267 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 276 insertions(+), 5 deletions(-) create mode 100644 lib/xshmem/xshmem_fsc.c
diff --git a/lib/xshmem/Makefile b/lib/xshmem/Makefile index a60db51..e879f82 100644 --- a/lib/xshmem/Makefile +++ b/lib/xshmem/Makefile @@ -1 +1,2 @@ -obj-m+=xshmem_framework.o +obj-m+=xshmem.o +xshmem-objs+=xshmem_fsc.o xshmem_framework.o diff --git a/lib/xshmem/xshmem_framework.c b/lib/xshmem/xshmem_framework.c index 7209cc0..f6e0772 100644 --- a/lib/xshmem/xshmem_framework.c +++ b/lib/xshmem/xshmem_framework.c @@ -461,13 +461,13 @@ static int empty_algo_pool_free(struct xshm_pool *xp) return 0; }
-static int empty_algo_block_alloc(struct xshm_pool *xp, int size) +static int empty_algo_block_alloc(struct xshm_pool *xp, u32 size) { pr_info("block_alloc_hook:pool_id:%d, alloc_size:%d\n", xp->pool_id, size); return 0; }
-static int empty_algo_block_free(struct xshm_pool *xp, int offset) +static int empty_algo_block_free(struct xshm_pool *xp, u32 offset) { pr_info("block_free_hook:pool_id:%d, offset:%d\n", xp->pool_id, offset); return 0; @@ -483,6 +483,8 @@ static struct xshm_pool_algo empty_algo = { .xshm_block_free = empty_algo_block_free, };
+extern struct xshm_pool_algo fsc_algo; + static int __init xshmem_init(void) { int ret; @@ -497,6 +499,7 @@ static int __init xshmem_init(void) return ret; }
+ xshmem_register_algo(&fsc_algo); xshmem_register_algo(&empty_algo);
return 0; diff --git a/lib/xshmem/xshmem_framework.h b/lib/xshmem/xshmem_framework.h index 7308ce4..c781e95 100644 --- a/lib/xshmem/xshmem_framework.h +++ b/lib/xshmem/xshmem_framework.h @@ -37,8 +37,8 @@ struct xshm_pool_algo { struct list_head algo_node; int (*xshm_pool_init)(struct xshm_pool *xp); int (*xshm_pool_free)(struct xshm_pool *xp); - int (*xshm_block_alloc)(struct xshm_pool *xp, int size); - int (*xshm_block_free)(struct xshm_pool *xp, int offset); + int (*xshm_block_alloc)(struct xshm_pool *xp, u32 size); + int (*xshm_block_free)(struct xshm_pool *xp, u32 offset); };
int xshmem_register_algo(struct xshm_pool_algo *algo); diff --git a/lib/xshmem/xshmem_fsc.c b/lib/xshmem/xshmem_fsc.c new file mode 100644 index 0000000..bb01926 --- /dev/null +++ b/lib/xshmem/xshmem_fsc.c @@ -0,0 +1,267 @@ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2021. All rights reserved. + * Author: Huawei OS Kernel Lab + * Create: Fri Jun 18 07:38:11 2021 + */ +#define pr_fmt(fmt) "XSHMEM_FSC: " fmt + +#include <linux/types.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include "xshmem_framework.h" + +#define XSHMEM_FSC_MAX_SHIFT 31U +#define XSHMEM_FSC_MIN_SHIFT 4U +#define XSHMEM_FSC_MAX_TYPE (XSHMEM_FSC_MAX_SHIFT - XSHMEM_FSC_MIN_SHIFT + 1) // exclude +#define XSHMEM_FSC_ALIGN (1U << XSHMEM_FSC_MIN_SHIFT) +#define XSHMEM_FSC_MAX_ALLOC_SIZE (1U << XSHMEM_FSC_MAX_SHIFT) // exclude + +/* The valid input size is above 0b10000. + * valid return is 0,1,...,MAX_FSC_MAX_TYPE - 1 */ +static int fsc_size2type(u32 size) +{ + return fls(size) - XSHMEM_FSC_MIN_SHIFT - 1; +} + +/* the block reagion is [start, start + size) */ +struct fsc_block { + bool is_free; + u32 start; + u32 size; + struct list_head block_node; + struct list_head free_node; +}; + +struct fsc_ctrl { + u32 total_size; + u32 free_size; + DECLARE_BITMAP(free_map, XSHMEM_FSC_MAX_TYPE); /* bit0~bit27 represent type0~type27 */ + struct list_head block_head; + struct list_head free_list[XSHMEM_FSC_MAX_TYPE]; +}; + +/* insert the block to proper free_list */ +static void fsc_insert_block(struct fsc_ctrl *ctrl, struct fsc_block *block) +{ + int type = fsc_size2type(block->size); + + block->is_free = true; + list_add_tail(&block->free_node, &ctrl->free_list[type]); + ctrl->free_size += block->size; + set_bit(type, ctrl->free_map); +} + +/* delete the block from free_list */ +static void fsc_delete_block(struct fsc_ctrl *ctrl, struct fsc_block *block) +{ + int type = fsc_size2type(block->size); + + block->is_free = false; + list_del(&block->free_node); + ctrl->free_size -= block->size; + if (list_empty(&ctrl->free_list[type])) + clear_bit(type, ctrl->free_map); +} + +static int fsc_algo_pool_init(struct xshm_pool *xp) +{ + int i; + struct fsc_ctrl *ctrl; + struct fsc_block *block; + + if (!IS_ALIGNED(xp->size, XSHMEM_FSC_ALIGN) || !xp->size) { + pr_err("input xshm_pool size invalid\n"); + return -EINVAL; + } + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + block = kmalloc(sizeof(*block), GFP_KERNEL); + if (!ctrl || !block) { + kfree(ctrl); + kfree(block); + return -ENOMEM; + } + + ctrl->total_size = xp->size; + ctrl->free_size = 0; + INIT_LIST_HEAD(&ctrl->block_head); + for (i = 0; i < ARRAY_SIZE(ctrl->free_list); i++) + INIT_LIST_HEAD(&ctrl->free_list[i]); + + block->start = 0; + block->size = xp->size; + list_add_tail(&block->block_node, &ctrl->block_head); + fsc_insert_block(ctrl, block); + + xp->private = ctrl; + + return 0; +} + +static int fsc_algo_pool_free(struct xshm_pool *xp) +{ + struct fsc_block *block, *tmp; + struct fsc_ctrl *ctrl = xp->private; + + WARN(ctrl->total_size != ctrl->free_size, "The alloced block are not all free\n"); + + list_for_each_entry_safe(block, tmp, &ctrl->block_head, block_node) { + list_del(&block->block_node); + kfree(block); + } + + kfree(ctrl); + + return 0; +} + +static struct fsc_block *find_free_block(struct fsc_ctrl *ctrl, u32 size) +{ + u32 type; + struct fsc_block *block; + + type = fsc_size2type(size); + type = find_next_bit(ctrl->free_map, XSHMEM_FSC_MAX_TYPE, type + 1); + if (type == XSHMEM_FSC_MAX_TYPE) { + /* The list of which the significant bits is more than required, + * contains no more free block. We need to traverse the same + * level list */ + type = fsc_size2type(size); + list_for_each_entry(block, &ctrl->free_list[type], free_node) + if (block->size >= size) + return block; + + pr_err("cannot find a xshmem_block\n"); + return NULL; + } else + return list_first_entry(&ctrl->free_list[type], struct fsc_block, free_node); +} + +static int split_new_block(struct fsc_ctrl *ctrl, struct fsc_block *block, u32 size) +{ + fsc_delete_block(ctrl, block); + + if (block->size < size + XSHMEM_FSC_ALIGN) + /* Remaining space is not enough for new blk */ + return block->start; + else { + struct fsc_block *new; + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) { + fsc_insert_block(ctrl, block); + pr_err("split: alloc new block memory failed\n"); + return -ENOMEM; + } + new->start = block->start + size; + new->size = block->size - size; + block->size = size; + + list_add(&new->block_node, &block->block_node); + fsc_insert_block(ctrl, new); + + return block->start; + } +} + +static int fsc_algo_block_alloc(struct xshm_pool *xp, u32 size) +{ + u32 aligned_size; + struct fsc_block *block; + struct fsc_ctrl *ctrl = xp->private; + + if (!size) { + pr_err("alloc size invalid\n"); + return -EINVAL; + } + + aligned_size = ALIGN(size, XSHMEM_FSC_ALIGN); + if (aligned_size < size || aligned_size >= XSHMEM_FSC_MAX_ALLOC_SIZE) { + pr_err("alloc size too big\n"); + return -EOVERFLOW; + } + + block = find_free_block(ctrl, aligned_size); + if (!block) + return -ENOSPC; + + return split_new_block(ctrl, block, aligned_size); +} + +static void check_and_combine_next_block(struct fsc_ctrl *ctrl, struct fsc_block *block) +{ + struct fsc_block *next; + + if (list_is_last(&block->block_node, &ctrl->block_head)) + return; + + next = list_next_entry(block, block_node); + if (!next->is_free) + return; + + fsc_delete_block(ctrl, next); + block->size += next->size; + + list_del(&next->block_node); + kfree(next); +} + +static void check_and_combine_prev_block(struct fsc_ctrl *ctrl, struct fsc_block *block) +{ + struct fsc_block *prev; + + if (block->block_node.prev == &ctrl->block_head) + return; + + prev = list_prev_entry(block, block_node); + if (!prev->is_free) + return; + + fsc_delete_block(ctrl, prev); + block->size += prev->size; + block->start = prev->start; + + list_del(&prev->block_node); + kfree(prev); +} + +static int fsc_algo_block_free(struct xshm_pool *xp, u32 offset) +{ + bool found = false; + struct fsc_block *block; + struct fsc_ctrl *ctrl = xp->private; + + list_for_each_entry(block, &ctrl->block_head, block_node) + if (block->start == offset) { + found = true; + break; + } + + if (!found) { + pr_err("the block is not alloced\n"); + return -EINVAL; + } + + if (block->is_free) { + pr_err("free unalloced block\n"); + return -EFAULT; + } + + check_and_combine_next_block(ctrl, block); + check_and_combine_prev_block(ctrl, block); + + fsc_insert_block(ctrl, block); + + return 0; +} + +struct xshm_pool_algo fsc_algo = { + .num = 1, + .name = "fsc_algo", + .xshm_pool_init = fsc_algo_pool_init, + .xshm_pool_free = fsc_algo_pool_free, + .xshm_block_alloc = fsc_algo_block_alloc, + .xshm_block_free = fsc_algo_block_free, +};