From: Zhou Guanghui zhouguanghui1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I5DS9S CVE: NA
-------------------------------------------------
struct sp_mapping is used to manage the address space of a shared pool. During the initialization of the shared pool, normal address spaces are created to allocate the memory of the current shared pool.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- include/linux/share_pool.h | 18 +++++++++++++ mm/share_pool.c | 52 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index 022e61bb6ce4..654dc8cc2922 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -101,6 +101,17 @@ struct sp_proc_stat { atomic64_t k2u_size; };
+/* + * address space management + */ +struct sp_mapping { + unsigned long flag; + atomic_t user; + unsigned long start[MAX_DEVID]; + unsigned long end[MAX_DEVID]; + struct rb_root area_root; +}; + /* Processes in the same sp_group can share memory. * Memory layout for share pool: * @@ -142,6 +153,8 @@ struct sp_group { atomic_t use_count; /* protect the group internal elements, except spa_list */ struct rw_semaphore rw_lock; + struct sp_mapping *dvpp; + struct sp_mapping *normal; };
/* a per-process(per mm) struct which manages a sp_group_node list */ @@ -155,6 +168,11 @@ struct sp_group_master { struct list_head node_list; struct mm_struct *mm; struct sp_proc_stat *stat; + /* + * Used to apply for the shared pool memory of the current process. + * For example, sp_alloc non-share memory or k2task. + */ + struct sp_group *local; };
/* diff --git a/mm/share_pool.c b/mm/share_pool.c index 076243713f83..6c70ff72b7af 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -130,6 +130,48 @@ static DECLARE_RWSEM(sp_spg_stat_sem); /* for kthread buff_module_guard_work */ static struct sp_proc_stat kthread_stat;
+#define SP_MAPPING_DVPP 0x1 +#define SP_MAPPING_NORMAL 0x2 +static struct sp_mapping *sp_mapping_normal; + +static void sp_mapping_range_init(struct sp_mapping *spm) +{ + int i; + + for (i = 0; i < MAX_DEVID; i++) { + if (spm->flag & SP_MAPPING_NORMAL) { + spm->start[i] = MMAP_SHARE_POOL_START; + spm->end[i] = MMAP_SHARE_POOL_16G_START; + continue; + } + + if (!is_sp_dev_addr_enabled(i)) { + spm->start[i] = MMAP_SHARE_POOL_16G_START + + i * MMAP_SHARE_POOL_16G_START; + spm->end[i] = spm->start[i] + MMAP_SHARE_POOL_16G_START; + } else { + spm->start[i] = sp_dev_va_start[i]; + spm->end[i] = spm->start[i] + sp_dev_va_size[i]; + } + } +} + +static struct sp_mapping *sp_mapping_create(unsigned long flag) +{ + struct sp_mapping *spm; + + spm = kzalloc(sizeof(struct sp_mapping), GFP_KERNEL); + if (!spm) + return ERR_PTR(-ENOMEM); + + spm->flag = flag; + sp_mapping_range_init(spm); + atomic_set(&spm->user, 0); + spm->area_root = RB_ROOT; + + return spm; +} + /* The caller must hold sp_group_sem */ static struct sp_group_master *sp_init_group_master_locked( struct mm_struct *mm, bool *exist) @@ -4432,12 +4474,22 @@ static void __init sp_device_number_detect(void)
static int __init share_pool_init(void) { + if (!sp_is_enabled()) + return 0; + /* lockless, as init kthread has no sp operation else */ spg_none = create_spg(GROUP_NONE); /* without free spg_none, not a serious problem */ if (IS_ERR(spg_none) || !spg_none) goto fail;
+ sp_mapping_normal = sp_mapping_create(SP_MAPPING_NORMAL); + if (IS_ERR(sp_mapping_normal)) { + sp_group_drop(spg_none); + goto fail; + } + atomic_inc(&sp_mapping_normal->user); + sp_device_number_detect(); proc_sharepool_init();