From: Wang Wensheng wangwensheng4@huawei.com
Add /proc/xshmem directory that contains information of the POOLs and TASKs.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- lib/xshmem/xshmem_blk.c | 20 ++++- lib/xshmem/xshmem_framework.c | 174 +++++++++++++++++++++++++++++++++++------- lib/xshmem/xshmem_framework.h | 11 ++- lib/xshmem/xshmem_fsc.c | 18 ++++- 4 files changed, 185 insertions(+), 38 deletions(-)
diff --git a/lib/xshmem/xshmem_blk.c b/lib/xshmem/xshmem_blk.c index 81012e2..b8ce7df 100644 --- a/lib/xshmem/xshmem_blk.c +++ b/lib/xshmem/xshmem_blk.c @@ -19,6 +19,7 @@ struct blk_ctrl { int block_size;
int total_block; + int free_block; int next_free; unsigned long map[0]; }; @@ -53,6 +54,7 @@ static int blk_algo_pool_init(struct xshm_pool *xp, struct xshm_reg_arg *arg) ctrl->block_size = arg->block_size; ctrl->total_block = block_cnt; ctrl->next_free = 0; + ctrl->free_block = block_cnt;
xp->private = ctrl;
@@ -65,9 +67,18 @@ static int blk_algo_pool_free(struct xshm_pool *xp) return 0; }
-static long blk_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk, unsigned long size) +static void blk_algo_pool_show(struct xshm_pool *xp, struct seq_file *seq) +{ + struct blk_ctrl *ctrl = xp->private; + + seq_printf(seq, " pool_size:%#x, block_size:%#x, free_block:%d\n", + ctrl->pool_size, ctrl->block_size, ctrl->free_block); +} + +static int blk_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk) { long idx; + unsigned long size = blk->alloc_size; struct blk_ctrl *ctrl = xp->private;
if (size > ctrl->block_size) { @@ -89,10 +100,13 @@ static long blk_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk, u
set_bit(idx, ctrl->map); ctrl->next_free = (idx + 1) % ctrl->total_block; + ctrl->free_block--;
blk->private = (void *)(long)idx; + blk->offset = idx * ctrl->block_size; + blk->real_size = ctrl->block_size;
- return idx * ctrl->block_size; + return 0; }
static int blk_algo_block_free(struct xshm_pool *xp, struct xshm_block *blk) @@ -100,6 +114,7 @@ static int blk_algo_block_free(struct xshm_pool *xp, struct xshm_block *blk) long idx = (long)blk->private; struct blk_ctrl *ctrl = xp->private;
+ ctrl->free_block++; clear_bit(idx, ctrl->map); return 0; } @@ -110,6 +125,7 @@ struct xshm_pool_algo blk_algo = { .xshm_pool_same = blk_algo_pool_same, .xshm_pool_init = blk_algo_pool_init, .xshm_pool_free = blk_algo_pool_free, + .xshm_pool_show = blk_algo_pool_show, .xshm_block_alloc = blk_algo_block_alloc, .xshm_block_free = blk_algo_block_free, }; diff --git a/lib/xshmem/xshmem_framework.c b/lib/xshmem/xshmem_framework.c index 97c44e3..0d4e819 100644 --- a/lib/xshmem/xshmem_framework.c +++ b/lib/xshmem/xshmem_framework.c @@ -17,6 +17,8 @@ #include <linux/uaccess.h> #include <linux/jhash.h> #include <linux/rcupdate.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h>
#include "xshmem_framework.h"
@@ -59,27 +61,61 @@ struct xshm_task_block_cnt { struct xshm_task_pool_node *node; };
-static void task_block_cnt_destroy(struct xshm_task_block_cnt *cnt) -{ - list_del(&cnt->node_list); - list_del(&cnt->blk_list); - kfree(cnt); -} - struct xshm_task_pool_node { struct list_head task_node; // list node in TASK struct list_head pool_node; // list node in POOL struct xshm_task *task; struct xshm_pool *pool;
+ atomic64_t alloc_size; + atomic64_t real_alloc_size; /* list_head for all BLOCKs' refcnt in the POOL allocated by the TASK */ struct list_head list_head; };
+static void task_alloc_size_update(struct xshm_task_pool_node *node, + struct xshm_block *blk, bool inc) +{ + if (inc) { + atomic64_add(blk->alloc_size, &node->alloc_size); + atomic64_add(blk->real_size, &node->real_alloc_size); + } else { + atomic64_sub(blk->alloc_size, &node->alloc_size); + atomic64_sub(blk->real_size, &node->real_alloc_size); + } +} + +static void task_link_blk(struct xshm_task_pool_node *node, + struct xshm_block *blk, + struct xshm_task_block_cnt *cnt) +{ + cnt->refcnt = 1; + cnt->blk = blk; + cnt->node = node; + list_add_tail(&cnt->node_list, &node->list_head); + list_add_tail(&cnt->blk_list, &blk->list_head); + + task_alloc_size_update(node, blk, true); +} + +static void task_unlink_blk(struct xshm_task_block_cnt *cnt) +{ + list_del(&cnt->node_list); + list_del(&cnt->blk_list); + + task_alloc_size_update(cnt->node, cnt->blk, false); + + kfree(cnt); +} + +static DEFINE_MUTEX(task_mutex); +static LIST_HEAD(all_task); + struct xshm_task { pid_t pid; int pool_count; struct list_head list_head; + struct list_head list; struct mutex task_mutex; };
@@ -98,6 +134,10 @@ static int xshmem_open(struct inode *inode, struct file *file)
file->private_data = ptask;
+ mutex_lock(&task_mutex); + list_add_tail(&ptask->list, &all_task); + mutex_unlock(&task_mutex); + return 0; }
@@ -133,6 +173,8 @@ static int xshmem_pool_attach(struct xshm_task *task, struct xshm_pool *xp)
node->task = task; node->pool = xp; + atomic64_set(&node->alloc_size, 0); + atomic64_set(&node->real_alloc_size, 0); list_add_tail(&node->task_node, &task->list_head); INIT_LIST_HEAD(&node->list_head);
@@ -164,10 +206,11 @@ static void xshmem_task_clear_block(struct xshm_task_pool_node *node) struct xshm_block *blk = cnt->blk;
blk->refcnt -= cnt->refcnt; + + task_unlink_blk(cnt); + if (!blk->refcnt) xshmem_block_destroy(xp, blk); - - task_block_cnt_destroy(cnt); } mutex_unlock(&xp->xp_block_mutex); } @@ -384,7 +427,7 @@ static struct xshm_block *xshmem_find_block(struct xshm_pool *xp, u32 offset) static int ioctl_xshmem_block_alloc(struct xshm_pool *xp, struct xshm_task_pool_node *node, struct xshm_block_arg *arg) { - long ret; + int ret; struct xshm_block *blk; struct xshm_task_block_cnt *cnt;
@@ -401,27 +444,23 @@ static int ioctl_xshmem_block_alloc(struct xshm_pool *xp, struct xshm_task_pool_ return -ENOMEM; }
+ blk->alloc_size = arg->size; mutex_lock(&xp->xp_block_mutex); - ret = xp->algo->xshm_block_alloc(xp, blk, arg->size); + ret = xp->algo->xshm_block_alloc(xp, blk); if (ret < 0) { kfree(blk); kfree(cnt); } else { blk->refcnt = 1; - blk->offset = ret; - arg->offset = ret; + arg->offset = blk->offset; INIT_LIST_HEAD(&blk->list_head); list_add_tail(&blk->block_node, &xp->block_head);
- cnt->refcnt = 1; - cnt->blk = blk; - cnt->node = node; - list_add_tail(&cnt->node_list, &node->list_head); - list_add_tail(&cnt->blk_list, &blk->list_head); + task_link_blk(node, blk, cnt); } mutex_unlock(&xp->xp_block_mutex);
- return ret < 0 ? ret : 0; + return ret; }
static struct xshm_task_block_cnt * @@ -455,12 +494,7 @@ static int ioctl_xshmem_block_get(struct xshm_pool *xp, struct xshm_task_pool_no return -ENOMEM; }
- cnt->refcnt = 1; - cnt->blk = blk; - cnt->node = node; - - list_add_tail(&cnt->node_list, &node->list_head); - list_add_tail(&cnt->blk_list, &blk->list_head); + task_link_blk(node, blk, cnt); } else cnt->refcnt++;
@@ -495,7 +529,7 @@ static int ioctl_xshmem_block_put(struct xshm_pool *xp, struct xshm_task_pool_no if (force_del) { list_for_each_entry_safe(cnt, tmp, &blk->list_head, blk_list) { blk->refcnt -= cnt->refcnt; - task_block_cnt_destroy(cnt); + task_unlink_blk(cnt); }
WARN(blk->refcnt, "refcnt for BLOCK broken\n"); @@ -503,7 +537,7 @@ static int ioctl_xshmem_block_put(struct xshm_pool *xp, struct xshm_task_pool_no } else { cnt->refcnt--; if (!cnt->refcnt) - task_block_cnt_destroy(cnt); + task_unlink_blk(cnt);
blk->refcnt--; if (!blk->refcnt) @@ -600,6 +634,10 @@ static int xshmem_release(struct inode *inode, struct file *file) struct xshm_task_pool_node *node, *tmp; struct xshm_task *task = file->private_data;
+ mutex_lock(&task_mutex); + list_del(&task->list); + mutex_unlock(&task_mutex); + list_for_each_entry_safe(node, tmp, &task->list_head, task_node) { spin_lock(&node->pool->xp_task_spinlock); list_del(&node->pool_node); @@ -624,6 +662,79 @@ static struct file_operations xshmem_fops = { .unlocked_ioctl = xshmem_ioctl, };
+static void per_task_info_show(struct seq_file *seq, struct xshm_task *task) +{ + struct xshm_task_pool_node *node; + + mutex_lock(&task->task_mutex); + + seq_printf(seq, "TASK:%d\n", task->pid); + list_for_each_entry(node, &task->list_head, task_node) + seq_printf(seq, " POOL:%-4d %#-8llx %#-8llx\n", node->pool->pool_id, + (u64)atomic64_read(&node->alloc_size), + (u64)atomic64_read(&node->real_alloc_size)); + + mutex_unlock(&task->task_mutex); +} + +static int task_info_show(struct seq_file *seq, void *offset) +{ + struct xshm_task *task; + + mutex_lock(&task_mutex); + list_for_each_entry(task, &all_task, list) + per_task_info_show(seq, task); + mutex_unlock(&task_mutex); + + return 0; +} + +static int per_pool_info_show(int id, void *p, void *data) +{ + struct xshm_pool *xp = p; + struct seq_file *seq = data; + + seq_printf(seq, "POOL:%d\n key:%s, refcnt:%d, algo:%s\n", + id, xp->key, atomic_read(&xp->refcnt), xp->algo->name); + if (xp->algo->xshm_pool_show) { + mutex_lock(&xp->xp_block_mutex); + xp->algo->xshm_pool_show(xp, seq); + mutex_unlock(&xp->xp_block_mutex); + } + + return 0; +} + +static int pool_info_show(struct seq_file *seq, void *offset) +{ + int ret; + + mutex_lock(&xshmem_mutex); + ret = idr_for_each(&xshmem_idr, per_pool_info_show, seq); + mutex_unlock(&xshmem_mutex); + + return ret; +} + +static void __init xshmem_proc_fs_init(void) +{ + struct proc_dir_entry *entry; + + entry = proc_mkdir("xshmem", NULL); + if (!entry) { + pr_err("create proc dir failed\n"); + return; + } + + if (!proc_create_single("task_info", 0400, entry, task_info_show)) + pr_warn("task_info file create failed\n"); + + if (!proc_create_single("pool_info", 0400, entry, pool_info_show)) + pr_warn("pool_info file create failed\n"); + + return; +} + int xshmem_register_algo(struct xshm_pool_algo *algo) { struct xshm_pool_algo *tmp; @@ -668,9 +779,11 @@ static int empty_algo_pool_free(struct xshm_pool *xp) return 0; }
-static long empty_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk, unsigned long size) +static int empty_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk) { - pr_info("block_alloc_hook:pool_id:%d, alloc_size:%lu\n", xp->pool_id, size); + pr_info("block_alloc_hook:pool_id:%d, alloc_size:%lu\n", xp->pool_id, blk->alloc_size); + blk->real_size = blk->alloc_size; + blk->offset = 0; return 0; }
@@ -712,12 +825,15 @@ static int __init xshmem_init(void) xshmem_register_algo(&blk_algo); xshmem_register_algo(&empty_algo);
+ xshmem_proc_fs_init(); + return 0; }; module_init(xshmem_init);
static void __exit xshmem_exit(void) { + remove_proc_subtree("xshmem", NULL); misc_deregister(&xshmem_dev); pr_info("module exit\n"); } diff --git a/lib/xshmem/xshmem_framework.h b/lib/xshmem/xshmem_framework.h index a939a69..6234047 100644 --- a/lib/xshmem/xshmem_framework.h +++ b/lib/xshmem/xshmem_framework.h @@ -8,6 +8,7 @@
#include <linux/types.h> #include <linux/xshmem_framework.h> +#include <linux/seq_file.h>
struct xshm_pool { int pool_id; @@ -32,9 +33,10 @@ struct xshm_pool {
struct xshm_block { int refcnt; - unsigned long offset; /* the size of the block is not cared in the - framework, the specified algorithm should - manage it properly */ + unsigned long offset; + unsigned long alloc_size; + unsigned long real_size; + struct list_head block_node; void *private;
@@ -49,7 +51,8 @@ struct xshm_pool_algo { bool (*xshm_pool_same)(struct xshm_pool *xp, struct xshm_reg_arg *arg); int (*xshm_pool_init)(struct xshm_pool *xp, struct xshm_reg_arg *arg); int (*xshm_pool_free)(struct xshm_pool *xp); - long (*xshm_block_alloc)(struct xshm_pool *xp, struct xshm_block *blk, unsigned long size); + void (*xshm_pool_show)(struct xshm_pool *xp, struct seq_file *seq); + int (*xshm_block_alloc)(struct xshm_pool *xp, struct xshm_block *blk); int (*xshm_block_free)(struct xshm_pool *xp, struct xshm_block *blk); };
diff --git a/lib/xshmem/xshmem_fsc.c b/lib/xshmem/xshmem_fsc.c index fac8b12..5e52da3 100644 --- a/lib/xshmem/xshmem_fsc.c +++ b/lib/xshmem/xshmem_fsc.c @@ -124,6 +124,13 @@ static int fsc_algo_pool_free(struct xshm_pool *xp) return 0; }
+static void fsc_algo_pool_show(struct xshm_pool *xp, struct seq_file *seq) +{ + struct fsc_ctrl *ctrl = xp->private; + + seq_printf(seq, " total_size: %#x, free_size: %#x\n", ctrl->total_size, ctrl->free_size); +} + static struct fsc_block *find_free_block(struct fsc_ctrl *ctrl, u32 size) { u32 type; @@ -173,10 +180,10 @@ static int split_new_block(struct fsc_ctrl *ctrl, struct fsc_block *block, u32 s } }
-static long fsc_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk, unsigned long size) +static int fsc_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk) { int ret; - u32 aligned_size; + u32 aligned_size, size = blk->alloc_size; struct fsc_block *block; struct fsc_ctrl *ctrl = xp->private;
@@ -196,9 +203,13 @@ static long fsc_algo_block_alloc(struct xshm_pool *xp, struct xshm_block *blk, u return -ENOSPC;
ret = split_new_block(ctrl, block, aligned_size); - if (ret >= 0) + if (ret >= 0) { /* split success */ blk->private = block; + blk->offset = ret; + blk->real_size = block->size; + return 0; + }
return ret; } @@ -264,6 +275,7 @@ struct xshm_pool_algo fsc_algo = { .xshm_pool_same = fsc_algo_pool_same, .xshm_pool_init = fsc_algo_pool_init, .xshm_pool_free = fsc_algo_pool_free, + .xshm_pool_show = fsc_algo_pool_show, .xshm_block_alloc = fsc_algo_block_alloc, .xshm_block_free = fsc_algo_block_free, };