From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
--------------------------------------------------
If memcg oom_kill_disable is false(enable oom killer), and sysctl enable_oom_killer is also false(disable oom killer), the memory allocated for this memcg will exceed the limit. This means that the memcg cannot limit the memory usage.
Therefore, ensure that sysctl enable_oom_killer is not false. Otherwise, wait for memory resources.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/memcontrol.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ad8fafcdf87..aa8f6f75eea1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1942,7 +1942,11 @@ bool mem_cgroup_oom_synchronize(bool handle) if (locked) mem_cgroup_oom_notify(memcg);
- if (locked && !memcg->oom_kill_disable) { + if (locked && +#ifdef CONFIG_ASCEND_OOM + sysctl_enable_oom_killer != 0 && +#endif + !memcg->oom_kill_disable) { mem_cgroup_unmark_under_oom(memcg); finish_wait(&memcg_oom_waitq, &owait.wait); mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
-------------------------------------
The function of kmem cgroup is enabled by default for ascend.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/mm/init.c | 5 +++++ mm/memcontrol.c | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index a6b9048ea1a4..0773205251ca 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -789,6 +789,11 @@ void ascend_enable_all_features(void) if (IS_ENABLED(CONFIG_PMU_WATCHDOG)) pmu_nmi_enable = true;
+ if (IS_ENABLED(CONFIG_MEMCG_KMEM)) { + extern bool cgroup_memory_nokmem; + cgroup_memory_nokmem = false; + } + #ifdef CONFIG_ARM64_PSEUDO_NMI enable_pseudo_nmi = true; #endif diff --git a/mm/memcontrol.c b/mm/memcontrol.c index aa8f6f75eea1..dfc09dcced67 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -86,7 +86,7 @@ struct mem_cgroup *root_mem_cgroup __read_mostly; static bool cgroup_memory_nosocket;
/* Kernel memory accounting disabled */ -static bool cgroup_memory_nokmem = true; +bool cgroup_memory_nokmem = true;
/* Whether the swap controller is active */ #ifdef CONFIG_MEMCG_SWAP
From: Cheng Jian cj.chengjian@huawei.com
hulk inclusion category: bugfix bugzilla: 38261 CVE: NA
---------------------------
If we disable CONFIG_SMP, try_steal will lose its definition, resulting in a compile error as follows.
kernel/sched/fair.c: In function ‘pick_next_task_fair’: kernel/sched/fair.c:7001:15: error: implicit declaration of function ‘try_steal’ [-Werror=implicit-function-declaration] new_tasks = try_steal(rq, rf); ^~~~~~~~~
We can use allnoconfig to reproduce this problem.
Signed-off-by: Cheng Jian cj.chengjian@huawei.com Reviewed-by: Bin Li huawei.libin@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- kernel/sched/fair.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 97e956012a60..7258a270ea15 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3901,6 +3901,7 @@ static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
static inline void rq_idle_stamp_update(struct rq *rq) {} static inline void rq_idle_stamp_clear(struct rq *rq) {} +static inline int try_steal(struct rq *this_rq, struct rq_flags *rf) { return 0; } static inline void overload_clear(struct rq *rq) {} static inline void overload_set(struct rq *rq) {}
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: feature bugzilla: 46867 CVE: NA
------------------------------------------------- Statistical functions of sharepool are separated in several places and should be put together.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 86 ++++++++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 41 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 4fa539e452ef..d5d3d40d49e5 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -2393,6 +2393,8 @@ static int __init enable_share_k2u_to_group(char *s) } __setup("enable_sp_share_k2u_spg", enable_share_k2u_to_group);
+/*** Statistical and maintenance functions ***/ + int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { @@ -2418,35 +2420,6 @@ int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns, return 0; }
-static int idr_proc_stat_cb(int id, void *p, void *data) -{ - struct sp_group *spg; - struct sp_proc_stat *stat = p; - struct seq_file *seq = data; - - mutex_lock(&sp_mutex); - spg = __sp_find_spg(id, SPG_ID_DEFAULT); - if (spg_valid(spg)) { - seq_printf(seq, "%-12d %-10d %-18ld\n", - id, spg->id, byte2kb(stat->amount)); - } - mutex_unlock(&sp_mutex); - - return 0; -} - -static int proc_stat_show(struct seq_file *seq, void *offset) -{ - /* print the file header */ - seq_printf(seq, "%-12s %-10s %-18s\n", - "Process ID", "Group ID", "Aligned Apply(KB)"); - /* print kthread buff_module_guard_work */ - seq_printf(seq, "%-12s %-10s %-18ld\n", - "guard", "-", byte2kb(kthread_stat.amount)); - idr_for_each(&sp_stat_idr, idr_proc_stat_cb, seq); - return 0; -} - static void rb_spa_stat_show(struct seq_file *seq) { struct rb_node *node; struct sp_area *spa; @@ -2565,6 +2538,49 @@ static int spa_stat_show(struct seq_file *seq, void *offset) return 0; }
+static int idr_proc_stat_cb(int id, void *p, void *data) +{ + struct sp_group *spg; + struct sp_proc_stat *stat = p; + struct seq_file *seq = data; + + mutex_lock(&sp_mutex); + spg = __sp_find_spg(id, SPG_ID_DEFAULT); + if (spg_valid(spg)) { + seq_printf(seq, "%-12d %-10d %-18ld\n", + id, spg->id, byte2kb(stat->amount)); + } + mutex_unlock(&sp_mutex); + + return 0; +} + +static int proc_stat_show(struct seq_file *seq, void *offset) +{ + /* print the file header */ + seq_printf(seq, "%-12s %-10s %-18s\n", + "Process ID", "Group ID", "Aligned Apply(KB)"); + /* print kthread buff_module_guard_work */ + seq_printf(seq, "%-12s %-10s %-18ld\n", + "guard", "-", byte2kb(kthread_stat.amount)); + idr_for_each(&sp_stat_idr, idr_proc_stat_cb, seq); + return 0; +} + +/* + * Called by proc_root_init() to initialize the /proc/sharepool subtree + */ +void __init proc_sharepool_init(void) +{ + if (!proc_mkdir("sharepool", NULL)) + return; + + proc_create_single_data("sharepool/proc_stat", 0400, NULL, proc_stat_show, NULL); + proc_create_single_data("sharepool/spa_stat", 0400, NULL, spa_stat_show, NULL); +} + +/*** End of tatistical and maintenance functions ***/ + vm_fault_t sharepool_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, @@ -2653,18 +2669,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, } EXPORT_SYMBOL(sharepool_no_page);
-/* - * Called by proc_root_init() to initialize the /proc/sharepool subtree - */ -void __init proc_sharepool_init(void) -{ - if (!proc_mkdir("sharepool", NULL)) - return; - - proc_create_single_data("sharepool/proc_stat", S_IRUSR, NULL, proc_stat_show, NULL); - proc_create_single_data("sharepool/spa_stat", S_IRUSR, NULL, spa_stat_show, NULL); -} - struct page *sp_alloc_pages(struct vm_struct *area, gfp_t mask, unsigned int page_order, int node) {
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: feature bugzilla: 46867 CVE: NA
------------------------------------------------- spg_overview() will show how many normal page memory and hugepage memory are allocated for a sp_group.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 12 ++++++++---- mm/share_pool.c | 28 ++++++++++++++++++++++------ 2 files changed, 30 insertions(+), 10 deletions(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index 3c5a41ae5bd1..eb0358bb6633 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -66,6 +66,8 @@ extern bool vmap_allow_huge; */ struct sp_group { int id; + /* record the number of hugepage allocation failures */ + int hugepage_failures; struct file *file; struct file *file_hugetlb; /* list head of processes */ @@ -76,12 +78,14 @@ struct sp_group { atomic_t spa_num; /* total size of all sp_area from sp_alloc and k2u(spg) */ atomic64_t size; - /* record the number of hugepage allocation failures */ - int hugepage_failures; - /* is_alive == false means it's being destroyed */ - bool is_alive; + /* total size of all sp_area from sp_alloc normal page */ + atomic64_t alloc_nsize; + /* total size of all sp_area from sp_alloc hugepage */ + atomic64_t alloc_hsize; /* we define the creator process of a sp_group as owner */ struct task_struct *owner; + /* is_alive == false means it's being destroyed */ + bool is_alive; /* dvpp_multi_spaces == true means multiple dvpp 16G spaces are set */ bool dvpp_multi_spaces; unsigned long dvpp_va_start; diff --git a/mm/share_pool.c b/mm/share_pool.c index d5d3d40d49e5..20da5a4d2735 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -52,8 +52,8 @@ #define spg_valid(spg) ((spg) && ((spg)->is_alive == true)) #define ESPGMMEXIT 4000
-#define byte2kb(size) ((size) / 1024) -#define byte2mb(size) ((size) / 1024 / 1024) +#define byte2kb(size) ((size) >> 10) +#define byte2mb(size) ((size) >> 20)
/* mdc scene hack */ int enable_mdc_default_group; @@ -366,6 +366,8 @@ static struct sp_group *find_or_alloc_sp_group(int spg_id) spg->id = spg_id; atomic_set(&spg->spa_num, 0); atomic64_set(&spg->size, 0); + atomic64_set(&spg->alloc_nsize, 0); + atomic64_set(&spg->alloc_hsize, 0); spg->is_alive = true; spg->hugepage_failures = 0; spg->dvpp_multi_spaces = false; @@ -925,6 +927,12 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, if (spa->spg) { atomic_inc(&spg->spa_num); atomic64_add(size, &spg->size); + if (type == SPA_TYPE_ALLOC) { + if (spa->is_hugepage) + atomic64_add(size, &spg->alloc_hsize); + else + atomic64_add(size, &spg->alloc_nsize); + } atomic_inc(&spg_stat.spa_total_num); atomic64_add(size, &spg_stat.spa_total_size); list_add_tail(&spa->link, &spg->spa_list); @@ -1003,6 +1011,12 @@ static void sp_free_area(struct sp_area *spa) if (spa->spg) { atomic_dec(&spa->spg->spa_num); atomic64_sub(spa->real_size, &spa->spg->size); + if (spa->type == SPA_TYPE_ALLOC) { + if (spa->is_hugepage) + atomic64_sub(spa->real_size, &spa->spg->alloc_hsize); + else + atomic64_sub(spa->real_size, &spa->spg->alloc_nsize); + } atomic_dec(&spg_stat.spa_total_num); atomic64_sub(spa->real_size, &spg_stat.spa_total_size); list_del(&spa->link); @@ -2509,16 +2523,18 @@ static int idr_spg_stat_cb(int id, void *p, void *data) struct sp_group *spg = p; struct seq_file *seq = data;
- seq_printf(seq, "Group %-10d size: %13ld KB, spa num: %d.\n", - id, byte2kb(atomic64_read(&spg->size)), - atomic_read(&spg->spa_num)); + seq_printf(seq, "Group %6d size: %ld KB, spa num: %d, normal alloc: %ld KB, " + "huge alloc: %ld KB\n", + id, byte2kb(atomic64_read(&spg->size)), atomic_read(&spg->spa_num), + byte2kb(atomic64_read(&spg->alloc_nsize)), + byte2kb(atomic64_read(&spg->alloc_hsize)));
return 0; }
static void spg_overview_show(struct seq_file *seq) { - seq_printf(seq, "Share pool total size: %13ld KB, spa total num: %d.\n", + seq_printf(seq, "Share pool total size: %ld KB, spa total num: %d.\n", byte2kb(atomic64_read(&spg_stat.spa_total_size)), atomic_read(&spg_stat.spa_total_num)); mutex_lock(&sp_mutex);
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: feature bugzilla: 46867 CVE: NA
------------------------------------------------- Some new fields will be shown in /proc/sharepool/proc_stat, including: 1. VIRT, the virtual memory amount. 2. RES, resident memory size. 3. Shm, resident shared memory size which is a part of RES. 4. Non-SP_RES, resident memory size excluding share pool memory. 5. Non-SP_Shm, resident shared memory size size excluding share pool memory.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 72 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 21 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 20da5a4d2735..e55f0d8755bc 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -54,6 +54,7 @@
#define byte2kb(size) ((size) >> 10) #define byte2mb(size) ((size) >> 20) +#define page2kb(page_num) ((page_num) << (PAGE_SHIFT - 10))
/* mdc scene hack */ int enable_mdc_default_group; @@ -83,12 +84,13 @@ static DEFINE_IDR(sp_stat_idr);
/* per process memory usage statistics indexed by tgid */ struct sp_proc_stat { + struct mm_struct *mm; char comm[TASK_COMM_LEN]; /* * alloc amount minus free amount, may be negative when freed by * another task in the same sp group. */ - long amount; + long alloc_size; };
/* for kthread buff_module_guard_work */ @@ -100,7 +102,8 @@ static struct sp_proc_stat kthread_stat = {0}; */ static struct sp_proc_stat *sp_init_proc_stat(struct task_struct *tsk) { struct sp_proc_stat *stat; - int id = tsk->mm->sp_stat_id; + struct mm_struct *mm = tsk->mm; + int id = mm->sp_stat_id; int tgid = tsk->tgid; int ret;
@@ -118,7 +121,8 @@ static struct sp_proc_stat *sp_init_proc_stat(struct task_struct *tsk) { return ERR_PTR(-ENOMEM); }
- stat->amount = 0; + stat->alloc_size = 0; + stat->mm = mm; get_task_comm(stat->comm, tsk); ret = idr_alloc(&sp_stat_idr, stat, tgid, tgid + 1, GFP_KERNEL); if (ret < 0) { @@ -128,7 +132,7 @@ static struct sp_proc_stat *sp_init_proc_stat(struct task_struct *tsk) { return ERR_PTR(ret); }
- tsk->mm->sp_stat_id = ret; + mm->sp_stat_id = ret; return stat; }
@@ -746,11 +750,11 @@ void sp_group_post_exit(struct mm_struct *mm) * * We decide to print a info when seeing both of the scenarios. */ - if (stat && stat->amount != 0) + if (stat && stat->alloc_size != 0) pr_info("share pool: process %s(%d) of sp group %d exits. " "It applied %ld aligned KB\n", stat->comm, mm->sp_stat_id, - mm->sp_group->id, byte2kb(stat->amount)); + mm->sp_group->id, byte2kb(stat->alloc_size));
idr_remove(&sp_stat_idr, mm->sp_stat_id);
@@ -1200,11 +1204,11 @@ int sp_free(unsigned long addr)
/* pointer stat may be invalid because of kthread buff_module_guard_work */ if (current->mm == NULL) { - kthread_stat.amount -= spa->real_size; + kthread_stat.alloc_size -= spa->real_size; } else { stat = idr_find(&sp_stat_idr, current->mm->sp_stat_id); if (stat) - stat->amount -= spa->real_size; + stat->alloc_size -= spa->real_size; else BUG(); } @@ -1445,7 +1449,7 @@ void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) if (!IS_ERR(p)) { stat = idr_find(&sp_stat_idr, current->mm->sp_stat_id); if (stat) - stat->amount += size_aligned; + stat->alloc_size += size_aligned; }
out: @@ -2424,10 +2428,10 @@ int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns, mutex_unlock(&sp_mutex); return 0; } - seq_printf(m, "%-10s %-18s %-15s\n", - "Group ID", "Aligned Apply(KB)", "HugePage Fails"); - seq_printf(m, "%-10d %-18ld %-15d\n", - spg->id, byte2kb(stat->amount), spg->hugepage_failures); + seq_printf(m, "%-8s %-9s %-13s\n", + "Group_ID", "SP_ALLOC", "HugePage Fail"); + seq_printf(m, "%-8d %-9ld %-13d\n", + spg->id, byte2kb(stat->alloc_size), spg->hugepage_failures); } mutex_unlock(&sp_mutex);
@@ -2559,13 +2563,36 @@ static int idr_proc_stat_cb(int id, void *p, void *data) struct sp_group *spg; struct sp_proc_stat *stat = p; struct seq_file *seq = data; + struct mm_struct *mm = stat->mm; + unsigned long anon, file, shmem, total_rss; + /* + * non_sp_res: resident memory size excluding share pool memory + * non_sp_shm: resident shared memory size size excluding share pool + * memory + */ + long sp_alloc_nsize, non_sp_res, non_sp_shm;
mutex_lock(&sp_mutex); spg = __sp_find_spg(id, SPG_ID_DEFAULT); - if (spg_valid(spg)) { - seq_printf(seq, "%-12d %-10d %-18ld\n", - id, spg->id, byte2kb(stat->amount)); - } + if (!spg_valid(spg) || !mmget_not_zero(mm)) + goto out_unlock; + + sp_alloc_nsize = byte2kb(atomic64_read(&spg->alloc_nsize)); + anon = get_mm_counter(mm, MM_ANONPAGES); + file = get_mm_counter(mm, MM_FILEPAGES); + shmem = get_mm_counter(mm, MM_SHMEMPAGES); + total_rss = anon + file + shmem; + non_sp_res = page2kb(total_rss) - sp_alloc_nsize; + non_sp_shm = page2kb(shmem) - sp_alloc_nsize; + non_sp_shm = non_sp_shm < 0 ? 0 : non_sp_shm; /* to be investigated */ + + seq_printf(seq, "%-8d %-8d %-9ld %-10ld %-8ld %-7ld %-7ld %-10ld\n", + id, spg->id, byte2kb(stat->alloc_size), non_sp_res, + page2kb(mm->total_vm), page2kb(total_rss), page2kb(shmem), + non_sp_shm); + mmput(mm); + +out_unlock: mutex_unlock(&sp_mutex);
return 0; @@ -2573,12 +2600,15 @@ static int idr_proc_stat_cb(int id, void *p, void *data)
static int proc_stat_show(struct seq_file *seq, void *offset) { + spg_overview_show(seq); + spa_overview_show(seq); /* print the file header */ - seq_printf(seq, "%-12s %-10s %-18s\n", - "Process ID", "Group ID", "Aligned Apply(KB)"); + seq_printf(seq, "%-8s %-8s %-9s %-10s %-8s %-7s %-7s %-10s\n", + "PID", "Group_ID", "SP_ALLOC", "Non-SP_RES", "VIRT", "RES", + "Shm", "Non-SP_Shm"); /* print kthread buff_module_guard_work */ - seq_printf(seq, "%-12s %-10s %-18ld\n", - "guard", "-", byte2kb(kthread_stat.amount)); + seq_printf(seq, "%-8s %-8s %-9ld\n", + "guard", "-", byte2kb(kthread_stat.alloc_size)); idr_for_each(&sp_stat_idr, idr_proc_stat_cb, seq); return 0; }
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: feature bugzilla: 46867 CVE: NA
------------------------------------------------- 1. Addresses can't be printed for security issue. 2. Some small printing fixes.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index e55f0d8755bc..88afaeb999ba 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -635,7 +635,7 @@ int sp_group_add_task(int pid, int spg_id) if (printk_ratelimit()) { pr_warn("share pool: task add group failed when mm populate " "failed (potential no enough memory): %d " - "spa flag is %d\n", ret, spa->type); + "spa type is %d\n", ret, spa->type); } sp_munmap_task_areas(mm, spa->link.next); spin_lock(&sp_area_lock); @@ -1516,8 +1516,10 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa, unsigned long addr, buf, offset;
if (spg_valid(spa->spg)) { + /* k2u to group */ file = spa_file(spa); } else { + /* k2u to task */ if (spa->is_hugepage) { file = hugetlb_file_setup(HUGETLB_ANON_FILE, spa_size(spa), VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, hsize_log); @@ -1550,7 +1552,7 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa, if (ret) { do_munmap(mm, ret_addr, spa_size(spa), NULL); pr_err("share pool: remap vmalloc hugepage failed, " - "ret %d, kva is %lx\n", ret, kva); + "ret %d, kva is %pK\n", ret, (void *)kva); ret_addr = ret; goto put_mm; } @@ -1721,7 +1723,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, } else if (ret == 0) { /* do nothing */ } else { - pr_err("it is not vmalloc address\n"); + pr_err("share pool: k2u kva not vmalloc address\n"); return ERR_PTR(ret); } /* aligned down kva is convenient for caller to start with any valid kva */ @@ -1749,7 +1751,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, }
if (!vmalloc_area_set_flag(spa, kva_aligned, VM_SHAREPOOL)) { - pr_err("%s: the kva %ld is not valid\n", __func__, kva_aligned); + pr_err("share pool: %s: the kva %pK is not valid\n", __func__, (void *)kva_aligned); goto out; }
@@ -1778,7 +1780,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, }
if (!vmalloc_area_set_flag(spa, kva_aligned, VM_SHAREPOOL)) { - pr_err("%s: the kva %ld is not valid\n", __func__, kva_aligned); + pr_err("share pool: %s: the kva %pK is not valid\n", __func__, (void *)kva_aligned); goto out; }
@@ -1797,8 +1799,8 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, } else { /* associate vma and spa */ if (!vmalloc_area_clr_flag(spa, kva_aligned, VM_SHAREPOOL)) - pr_warn("share pool: %s: the kva %ld is not valid \n", - __func__, kva_aligned); + pr_warn("share pool: %s: the kva %pK is not valid\n", + __func__, (void *)kva_aligned); }
out:
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: feature bugzilla: 46867 CVE: NA
------------------------------------------------- To collect maintenance information of k2u more conviniently in the future, we should leverage input parameter pid. 1. Check sanity of pid in sp_make_share_k2u(), then get pointer of mm by get_task_mm(). 2. The input param pid of sp_make_share_kva_to_task() is replaced by mm.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 110 +++++++++++++++++++++++++----------------------- 1 file changed, 57 insertions(+), 53 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 88afaeb999ba..d4f721de6b58 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -1586,45 +1586,27 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa, return ret_addr; }
+/** + * Share kernel memory to a specified task + * @kva: the VA of shared kernel memory + * @spa: the sp area associated with the shared user address + * @mm: mm_struct of target task + * + * Return: the shared user address to start at + */ static void *sp_make_share_kva_to_task(unsigned long kva, struct sp_area *spa, - int pid) + struct mm_struct *mm) { - struct task_struct *tsk; unsigned long ret_addr; - void *p = ERR_PTR(-ENODEV); - int ret = 0; - - rcu_read_lock(); - tsk = find_task_by_vpid(pid); - if (!tsk || (tsk->flags & PF_EXITING)) - ret = -ESRCH; - else - get_task_struct(tsk); - - rcu_read_unlock(); - if (ret) - return ERR_PTR(ret);
- ret_addr = sp_remap_kva_to_vma(kva, spa, tsk->mm); + ret_addr = sp_remap_kva_to_vma(kva, spa, mm); if (IS_ERR_VALUE(ret_addr)) { pr_err("share pool: remap k2u to task failed, ret %ld\n", ret_addr); - p = ERR_PTR(ret_addr); - goto out; + return ERR_PTR(ret_addr); }
- p = (void *)ret_addr; - - task_lock(tsk); - if (tsk->mm == NULL) { - sp_munmap(tsk->mm, spa->va_start, spa_size(spa)); - p = ERR_PTR(-ESRCH); - } else { - spa->mm = tsk->mm; - } - task_unlock(tsk); -out: - put_task_struct(tsk); - return p; + spa->mm = mm; + return (void *)ret_addr; }
static void *sp_make_share_kva_to_spg(unsigned long kva, struct sp_area *spa, @@ -1708,7 +1690,9 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, unsigned long kva_aligned; unsigned long size_aligned; unsigned int page_size = PAGE_SIZE; - int ret; + struct task_struct *tsk; + struct mm_struct *mm; + int ret = 0, is_hugepage;
if (sp_flags & ~SP_DVPP) { if (printk_ratelimit()) @@ -1716,53 +1700,71 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, return ERR_PTR(-EINVAL); }
- ret = is_vmap_hugepage(kva); - if (ret > 0) { + is_hugepage = is_vmap_hugepage(kva); + if (is_hugepage > 0) { sp_flags |= SP_HUGEPAGE; page_size = PMD_SIZE; - } else if (ret == 0) { + } else if (is_hugepage == 0) { /* do nothing */ } else { pr_err("share pool: k2u kva not vmalloc address\n"); - return ERR_PTR(ret); + return ERR_PTR(is_hugepage); } + /* aligned down kva is convenient for caller to start with any valid kva */ kva_aligned = ALIGN_DOWN(kva, page_size); size_aligned = ALIGN(kva + size, page_size) - kva_aligned;
+ rcu_read_lock(); + tsk = find_task_by_vpid(pid); + if (!tsk || (tsk->flags & PF_EXITING)) + ret = -ESRCH; + else + get_task_struct(tsk); + + rcu_read_unlock(); + if (ret) + return ERR_PTR(ret); + + mm = get_task_mm(tsk); + if (mm == NULL) { + uva = ERR_PTR(-ESRCH); + goto out_put_task; + } + mutex_lock(&sp_mutex); spg = __sp_find_spg(pid, SPG_ID_DEFAULT); if (spg == NULL) { /* k2u to task */ if (spg_id != SPG_ID_NONE && spg_id != SPG_ID_DEFAULT) { - mutex_unlock(&sp_mutex); if (printk_ratelimit()) pr_err("share pool: k2task invalid spg id %d\n", spg_id); - return ERR_PTR(-EINVAL); + uva = ERR_PTR(-EINVAL); + goto out_unlock; } spa = sp_alloc_area(size_aligned, sp_flags, NULL, SPA_TYPE_K2TASK); if (IS_ERR(spa)) { - mutex_unlock(&sp_mutex); if (printk_ratelimit()) pr_err("share pool: k2u(task) failed due to alloc spa failure " "(potential no enough virtual memory when -75): %ld\n", PTR_ERR(spa)); - return spa; + uva = spa; + goto out_unlock; }
if (!vmalloc_area_set_flag(spa, kva_aligned, VM_SHAREPOOL)) { pr_err("share pool: %s: the kva %pK is not valid\n", __func__, (void *)kva_aligned); - goto out; + goto out_drop_spa; }
- uva = sp_make_share_kva_to_task(kva_aligned, spa, pid); + uva = sp_make_share_kva_to_task(kva_aligned, spa, mm); } else if (spg_valid(spg)) { /* k2u to group */ if (spg_id != SPG_ID_DEFAULT && spg_id != spg->id) { - mutex_unlock(&sp_mutex); if (printk_ratelimit()) pr_err("share pool: k2spg invalid spg id %d\n", spg_id); - return ERR_PTR(-EINVAL); + uva = ERR_PTR(-EINVAL); + goto out_unlock; }
if (enable_share_k2u_spg) @@ -1771,27 +1773,26 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, spa = sp_alloc_area(size_aligned, sp_flags, NULL, SPA_TYPE_K2TASK);
if (IS_ERR(spa)) { - mutex_unlock(&sp_mutex); if (printk_ratelimit()) pr_err("share pool: k2u(spg) failed due to alloc spa failure " "(potential no enough virtual memory when -75): %ld\n", PTR_ERR(spa)); - return spa; + uva = spa; + goto out_unlock; }
if (!vmalloc_area_set_flag(spa, kva_aligned, VM_SHAREPOOL)) { pr_err("share pool: %s: the kva %pK is not valid\n", __func__, (void *)kva_aligned); - goto out; + goto out_drop_spa; }
if (spa->spg) uva = sp_make_share_kva_to_spg(kva_aligned, spa, spg); else - uva = sp_make_share_kva_to_task(kva_aligned, spa, pid); + uva = sp_make_share_kva_to_task(kva_aligned, spa, mm); } else { - mutex_unlock(&sp_mutex); - pr_err("share pool: failed to make k2u\n"); - return NULL; + /* group is dead, return -ENODEV */ + pr_err("share pool: failed to make k2u, sp group is dead\n"); }
if (!IS_ERR(uva)) { @@ -1803,12 +1804,15 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, __func__, (void *)kva_aligned); }
-out: +out_drop_spa: __sp_area_drop(spa); +out_unlock: mutex_unlock(&sp_mutex); + mmput(mm); +out_put_task: + put_task_struct(tsk);
sp_dump_stack(); - return uva; } EXPORT_SYMBOL_GPL(sp_make_share_k2u);
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: feature bugzilla: 46867 CVE: NA
------------------------------------------------- Add a new column SP_K2U in /proc/sharepool/proc_stat which means k2u size for a task. If a task is the target of k2u(to task) but never added to a sp group, its sp group id is notated as '-'.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 77 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 61 insertions(+), 16 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index d4f721de6b58..d77bcf59c7cc 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -91,6 +91,7 @@ struct sp_proc_stat { * another task in the same sp group. */ long alloc_size; + long k2u_size; };
/* for kthread buff_module_guard_work */ @@ -121,7 +122,7 @@ static struct sp_proc_stat *sp_init_proc_stat(struct task_struct *tsk) { return ERR_PTR(-ENOMEM); }
- stat->alloc_size = 0; + stat->alloc_size = stat->k2u_size = 0; stat->mm = mm; get_task_comm(stat->comm, tsk); ret = idr_alloc(&sp_stat_idr, stat, tgid, tgid + 1, GFP_KERNEL); @@ -750,11 +751,13 @@ void sp_group_post_exit(struct mm_struct *mm) * * We decide to print a info when seeing both of the scenarios. */ - if (stat && stat->alloc_size != 0) + if (stat && (stat->alloc_size != 0 || stat->k2u_size != 0)) pr_info("share pool: process %s(%d) of sp group %d exits. " - "It applied %ld aligned KB\n", + "It applied %ld aligned KB, k2u shared %ld aligned " + "KB\n", stat->comm, mm->sp_stat_id, - mm->sp_group->id, byte2kb(stat->alloc_size)); + mm->sp_group->id, byte2kb(stat->alloc_size), + byte2kb(stat->k2u_size));
idr_remove(&sp_stat_idr, mm->sp_stat_id);
@@ -1692,6 +1695,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, unsigned int page_size = PAGE_SIZE; struct task_struct *tsk; struct mm_struct *mm; + struct sp_proc_stat *stat; int ret = 0, is_hugepage;
if (sp_flags & ~SP_DVPP) { @@ -1733,6 +1737,18 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, }
mutex_lock(&sp_mutex); + /* + * Process statistics initialization. if the target process has been + * added to a sp group, then stat will be returned immediately. + * I believe there is no need to free stat in error handling branches. + */ + stat = sp_init_proc_stat(tsk); + if (IS_ERR(stat)) { + uva = stat; + pr_err("share pool: init proc stat failed, ret %lx\n", PTR_ERR(stat)); + goto out_unlock; + } + spg = __sp_find_spg(pid, SPG_ID_DEFAULT); if (spg == NULL) { /* k2u to task */ @@ -1797,6 +1813,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size,
if (!IS_ERR(uva)) { uva = uva + (kva - kva_aligned); + stat->k2u_size += size_aligned; } else { /* associate vma and spa */ if (!vmalloc_area_clr_flag(spa, kva_aligned, VM_SHAREPOOL)) @@ -2082,6 +2099,7 @@ static int sp_unshare_uva(unsigned long uva, unsigned long size, int pid, int sp unsigned long uva_aligned; unsigned long size_aligned; unsigned int page_size; + struct sp_proc_stat *stat;
mutex_lock(&sp_mutex); /* @@ -2202,6 +2220,16 @@ static int sp_unshare_uva(unsigned long uva, unsigned long size, int pid, int sp }
sp_dump_stack(); + /* pointer stat may be invalid because of kthread buff_module_guard_work */ + if (current->mm == NULL) { + kthread_stat.k2u_size -= spa->real_size; + } else { + stat = idr_find(&sp_stat_idr, current->mm->sp_stat_id); + if (stat) + stat->k2u_size -= spa->real_size; + else + WARN(1, "share_pool: %s: null process stat\n", __func__); + }
out_clr_flag: /* deassociate vma and spa */ @@ -2566,6 +2594,7 @@ static int spa_stat_show(struct seq_file *seq, void *offset)
static int idr_proc_stat_cb(int id, void *p, void *data) { + int spg_id; struct sp_group *spg; struct sp_proc_stat *stat = p; struct seq_file *seq = data; @@ -2579,11 +2608,21 @@ static int idr_proc_stat_cb(int id, void *p, void *data) long sp_alloc_nsize, non_sp_res, non_sp_shm;
mutex_lock(&sp_mutex); - spg = __sp_find_spg(id, SPG_ID_DEFAULT); - if (!spg_valid(spg) || !mmget_not_zero(mm)) + if (!mmget_not_zero(mm)) goto out_unlock; + /* + * a task which is the target of k2u(to task) but without adding to a + * sp group should be handled correctly. + */ + spg = __sp_find_spg(id, SPG_ID_DEFAULT); + if (!spg_valid(spg)) { + spg_id = 0; + sp_alloc_nsize = 0; + } else { + spg_id = spg->id; + sp_alloc_nsize = byte2kb(atomic64_read(&spg->alloc_nsize)); + }
- sp_alloc_nsize = byte2kb(atomic64_read(&spg->alloc_nsize)); anon = get_mm_counter(mm, MM_ANONPAGES); file = get_mm_counter(mm, MM_FILEPAGES); shmem = get_mm_counter(mm, MM_SHMEMPAGES); @@ -2592,10 +2631,15 @@ static int idr_proc_stat_cb(int id, void *p, void *data) non_sp_shm = page2kb(shmem) - sp_alloc_nsize; non_sp_shm = non_sp_shm < 0 ? 0 : non_sp_shm; /* to be investigated */
- seq_printf(seq, "%-8d %-8d %-9ld %-10ld %-8ld %-7ld %-7ld %-10ld\n", - id, spg->id, byte2kb(stat->alloc_size), non_sp_res, - page2kb(mm->total_vm), page2kb(total_rss), page2kb(shmem), - non_sp_shm); + seq_printf(seq, "%-8d ", id); + if (spg_id == 0) + seq_printf(seq, "%-8c ", '-'); + else + seq_printf(seq, "%-8d ", spg_id); + seq_printf(seq, "%-9ld %-9ld %-10ld %-8ld %-7ld %-7ld %-10ld\n", + byte2kb(stat->alloc_size), byte2kb(stat->k2u_size), + non_sp_res, page2kb(mm->total_vm), page2kb(total_rss), + page2kb(shmem), non_sp_shm); mmput(mm);
out_unlock: @@ -2609,12 +2653,13 @@ static int proc_stat_show(struct seq_file *seq, void *offset) spg_overview_show(seq); spa_overview_show(seq); /* print the file header */ - seq_printf(seq, "%-8s %-8s %-9s %-10s %-8s %-7s %-7s %-10s\n", - "PID", "Group_ID", "SP_ALLOC", "Non-SP_RES", "VIRT", "RES", - "Shm", "Non-SP_Shm"); + seq_printf(seq, "%-8s %-8s %-9s %-9s %-10s %-8s %-7s %-7s %-10s\n", + "PID", "Group_ID", "SP_ALLOC", "SP_K2U", "Non-SP_RES", + "VIRT", "RES", "Shm", "Non-SP_Shm"); /* print kthread buff_module_guard_work */ - seq_printf(seq, "%-8s %-8s %-9ld\n", - "guard", "-", byte2kb(kthread_stat.alloc_size)); + seq_printf(seq, "%-8s %-8s %-9ld %-9ld\n", + "guard", "-", byte2kb(kthread_stat.alloc_size), + byte2kb(kthread_stat.k2u_size)); idr_for_each(&sp_stat_idr, idr_proc_stat_cb, seq); return 0; }
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: feature bugzilla: 46867 CVE: NA
------------------------------------------------- Add a new column SP_RES in /proc/sharepool/proc_stat which means sp_alloc size for a task. It contains other sp_alloc memories applied by the tasks in the same sp_group.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 2 ++ mm/share_pool.c | 24 ++++++++++++++++-------- 2 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index eb0358bb6633..70b841d0eb8e 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -82,6 +82,8 @@ struct sp_group { atomic64_t alloc_nsize; /* total size of all sp_area from sp_alloc hugepage */ atomic64_t alloc_hsize; + /* total size of all sp_area from ap_alloc */ + atomic64_t alloc_size; /* we define the creator process of a sp_group as owner */ struct task_struct *owner; /* is_alive == false means it's being destroyed */ diff --git a/mm/share_pool.c b/mm/share_pool.c index d77bcf59c7cc..4aeb82e8958b 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -373,6 +373,7 @@ static struct sp_group *find_or_alloc_sp_group(int spg_id) atomic64_set(&spg->size, 0); atomic64_set(&spg->alloc_nsize, 0); atomic64_set(&spg->alloc_hsize, 0); + atomic64_set(&spg->alloc_size, 0); spg->is_alive = true; spg->hugepage_failures = 0; spg->dvpp_multi_spaces = false; @@ -939,6 +940,7 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, atomic64_add(size, &spg->alloc_hsize); else atomic64_add(size, &spg->alloc_nsize); + atomic64_add(size, &spg->alloc_size); } atomic_inc(&spg_stat.spa_total_num); atomic64_add(size, &spg_stat.spa_total_size); @@ -1023,6 +1025,7 @@ static void sp_free_area(struct sp_area *spa) atomic64_sub(spa->real_size, &spa->spg->alloc_hsize); else atomic64_sub(spa->real_size, &spa->spg->alloc_nsize); + atomic64_sub(spa->real_size, &spa->spg->alloc_size); } atomic_dec(&spg_stat.spa_total_num); atomic64_sub(spa->real_size, &spg_stat.spa_total_size); @@ -2561,9 +2564,10 @@ static int idr_spg_stat_cb(int id, void *p, void *data) struct sp_group *spg = p; struct seq_file *seq = data;
- seq_printf(seq, "Group %6d size: %ld KB, spa num: %d, normal alloc: %ld KB, " - "huge alloc: %ld KB\n", + seq_printf(seq, "Group %6d size: %ld KB, spa num: %d, total alloc: %ld KB, " + "normal alloc: %ld KB, huge alloc: %ld KB\n", id, byte2kb(atomic64_read(&spg->size)), atomic_read(&spg->spa_num), + byte2kb(atomic64_read(&spg->alloc_size)), byte2kb(atomic64_read(&spg->alloc_nsize)), byte2kb(atomic64_read(&spg->alloc_hsize)));
@@ -2602,10 +2606,12 @@ static int idr_proc_stat_cb(int id, void *p, void *data) unsigned long anon, file, shmem, total_rss; /* * non_sp_res: resident memory size excluding share pool memory + * sp_res: resident memory size of share pool, including normal + * page and hugepage memory * non_sp_shm: resident shared memory size size excluding share pool * memory */ - long sp_alloc_nsize, non_sp_res, non_sp_shm; + long sp_alloc_nsize, non_sp_res, sp_res, non_sp_shm;
mutex_lock(&sp_mutex); if (!mmget_not_zero(mm)) @@ -2618,9 +2624,11 @@ static int idr_proc_stat_cb(int id, void *p, void *data) if (!spg_valid(spg)) { spg_id = 0; sp_alloc_nsize = 0; + sp_res = 0; } else { spg_id = spg->id; sp_alloc_nsize = byte2kb(atomic64_read(&spg->alloc_nsize)); + sp_res = byte2kb(atomic64_read(&spg->alloc_size)); }
anon = get_mm_counter(mm, MM_ANONPAGES); @@ -2636,8 +2644,8 @@ static int idr_proc_stat_cb(int id, void *p, void *data) seq_printf(seq, "%-8c ", '-'); else seq_printf(seq, "%-8d ", spg_id); - seq_printf(seq, "%-9ld %-9ld %-10ld %-8ld %-7ld %-7ld %-10ld\n", - byte2kb(stat->alloc_size), byte2kb(stat->k2u_size), + seq_printf(seq, "%-9ld %-9ld %-9ld %-10ld %-8ld %-7ld %-7ld %-10ld\n", + byte2kb(stat->alloc_size), byte2kb(stat->k2u_size), sp_res, non_sp_res, page2kb(mm->total_vm), page2kb(total_rss), page2kb(shmem), non_sp_shm); mmput(mm); @@ -2653,9 +2661,9 @@ static int proc_stat_show(struct seq_file *seq, void *offset) spg_overview_show(seq); spa_overview_show(seq); /* print the file header */ - seq_printf(seq, "%-8s %-8s %-9s %-9s %-10s %-8s %-7s %-7s %-10s\n", - "PID", "Group_ID", "SP_ALLOC", "SP_K2U", "Non-SP_RES", - "VIRT", "RES", "Shm", "Non-SP_Shm"); + seq_printf(seq, "%-8s %-8s %-9s %-9s %-9s %-10s %-8s %-7s %-7s %-10s\n", + "PID", "Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES", + "Non-SP_RES", "VIRT", "RES", "Shm", "Non-SP_Shm"); /* print kthread buff_module_guard_work */ seq_printf(seq, "%-8s %-8s %-9ld %-9ld\n", "guard", "-", byte2kb(kthread_stat.alloc_size),
From: Xie XiuQi xiexiuqi@huawei.com
hulk inclusion category: bugfix bugzilla: 46892 CVE: NA
Add get_idle_time stub function when CONFIG_PROC_FS disabled, to avoid this compile error.
$ make tinyconfig && make
kernel/sched/cputime.o: In function `sched_idle_time_adjust': cputime.c:(.text+0x3bb): undefined reference to `get_idle_time' make: *** [vmlinux] Error 1
Signed-off-by: Xie XiuQi xiexiuqi@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/sched/cputime.h | 5 +++++ kernel/sched/cputime.c | 6 ++++++ 2 files changed, 11 insertions(+)
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h index 1ebbeec02051..6b1793606fc9 100644 --- a/include/linux/sched/cputime.h +++ b/include/linux/sched/cputime.h @@ -189,6 +189,11 @@ task_sched_runtime(struct task_struct *task); extern int use_sched_idle_time; extern int sched_idle_time_adjust(int cpu, u64 *utime, u64 *stime); extern unsigned long long sched_get_idle_time(int cpu); + +#ifdef CONFIG_PROC_FS extern u64 get_idle_time(int cpu); +#else +static inline u64 get_idle_time(int cpu) { return -1ULL; } +#endif
#endif /* _LINUX_SCHED_CPUTIME_H */ diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index a10b51259b6a..159e4c467773 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -593,6 +593,12 @@ int sched_idle_time_adjust(int cpu, u64 *utime, u64 *stime)
raw_spin_lock(&rq_cputime->lock);
+ /* If failed to get idle time, drop adjustment */ + if (get_idle_time(cpu) == -1ULL) { + raw_spin_unlock(&rq_cputime->lock); + return 0; + } + #ifdef CONFIG_IRQ_TIME_ACCOUNTING if (sched_clock_irqtime) { hi = kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
From: qinyu qinyu16@huawei.com
euleros inclusion category: bugfix bugzilla: 46887
--------------------------------
this patch fixes following warnings:
1. tools/perf/perf-completion.sh:268:10: error: Can only return 0-255. Other data should be written to stdout. [SC2152] 2. tools/testing/selftests/ntb/ntb_test.sh:558:11: error: Argument to implicit -n is always true due to literal strings. [SC2157] 3. tools/testing/selftests/cpufreq/cpufreq.sh:225:10: error: -n doesn't work with unquoted arguments. Quote or use [[ ]]. [SC2070] 4. tools/testing/selftests/efivarfs/efivarfs.sh:80:16: error: You need a space before the ]. [SC1020] 5. tools/testing/selftests/bpf/test_lirc_mode2.sh:27:9: error: -n doesn't work with unquoted arguments. Quote or use [[ ]]. [SC2070]
Signed-off-by: luochunsheng luochunsheng@huawei.com Signed-off-by: qinyu qinyu16@huawei.com Reviewed-by: Zou Wei zou_wei@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- tools/perf/perf-completion.sh | 2 +- tools/testing/selftests/bpf/test_lirc_mode2.sh | 2 +- tools/testing/selftests/cpufreq/cpufreq.sh | 2 +- tools/testing/selftests/efivarfs/efivarfs.sh | 2 +- tools/testing/selftests/ntb/ntb_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh index fdf75d45efff..b1673abe5efe 100644 --- a/tools/perf/perf-completion.sh +++ b/tools/perf/perf-completion.sh @@ -265,7 +265,7 @@ if [[ -n ${ZSH_VERSION-} ]]; then let cword=CURRENT-1 emulate ksh -c __perf_main let _ret && _default && _ret=0 - return _ret + return $_ret }
compdef _perf perf diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh index 677686198df3..52e994987d76 100755 --- a/tools/testing/selftests/bpf/test_lirc_mode2.sh +++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh @@ -24,7 +24,7 @@ do fi done
-if [ -n $LIRCDEV ]; +if [[ -n $LIRCDEV ]]; then TYPE=lirc_mode2 ./test_lirc_mode2_user $LIRCDEV diff --git a/tools/testing/selftests/cpufreq/cpufreq.sh b/tools/testing/selftests/cpufreq/cpufreq.sh index b583a2fb4504..3143d8ecf5e0 100755 --- a/tools/testing/selftests/cpufreq/cpufreq.sh +++ b/tools/testing/selftests/cpufreq/cpufreq.sh @@ -222,7 +222,7 @@ do_suspend() return 1 fi
- if [ -n $filename ]; then + if [[ -n $filename ]]; then present=$(cat $SYSFS/power/state | grep $filename)
if [ -z "$present" ]; then diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh index d3866100e884..359113e4abb8 100755 --- a/tools/testing/selftests/efivarfs/efivarfs.sh +++ b/tools/testing/selftests/efivarfs/efivarfs.sh @@ -77,7 +77,7 @@ test_create_empty() test_create_read() { local file=$efivarfs_mount/$FUNCNAME-$test_guid - if [ -f $file]; then + if [[ -f $file ]]; then chattr -i $file rm -rf $file fi diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh index 17ca36403d04..4cc7a08007bb 100755 --- a/tools/testing/selftests/ntb/ntb_test.sh +++ b/tools/testing/selftests/ntb/ntb_test.sh @@ -555,7 +555,7 @@ function cleanup()
cleanup
-if ! [[ $$DONT_CLEANUP ]]; then +if ! [[ $DONT_CLEANUP ]]; then trap cleanup EXIT fi