hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7CGGT CVE: NA
--------------------------------
Add anon/file to memory.reclaim interface to limit only reclaim one type pages. The lru algorithm can reclaim cold pages and balance between file and anon. But it didn't consider the speed of backend device. For example, if there is zram device, reclaim anon pages might has less impact on performance. So extend memory.reclaim interface to reclaim one type pages. Usage: "echo <size> type=anon > memory.reclaim" "echo <size> type=file > memory.reclaim"
Also compatible with the previous format.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- Documentation/admin-guide/cgroup-v2.rst | 9 +++--- include/linux/swap.h | 1 + mm/memcontrol.c | 38 +++++++++++++++++++++++-- mm/vmscan.c | 9 ++++++ 4 files changed, 51 insertions(+), 6 deletions(-)
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index f120c53d8d16..394fb18c9e3d 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1196,15 +1196,16 @@ PAGE_SIZE multiple when read back. target cgroup.
This file accepts a single key, the number of bytes to reclaim. - No nested keys are currently supported.
Example::
echo "1G" > memory.reclaim
- The interface can be later extended with nested keys to - configure the reclaim behavior. For example, specify the - type of memory to reclaim from (anon, file, ..). + This file also accepts nested keys, the number of bytes to reclaim + with the type of memory to reclaim. + + Example:: + echo "1G type=file" > memory.reclaim
Please note that the kernel can over or under reclaim from the target cgroup. If less bytes are reclaimed than the diff --git a/include/linux/swap.h b/include/linux/swap.h index fc96ee27eb42..65626521ae2b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -379,6 +379,7 @@ extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
#define MEMCG_RECLAIM_MAY_SWAP (1 << 1) #define MEMCG_RECLAIM_PROACTIVE (1 << 2) +#define MEMCG_RECLAIM_NOT_FILE (1 << 3) extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 90ffeb1ae6f4..612acf6466e3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5264,6 +5264,35 @@ static int memcg_events_local_show(struct seq_file *m, void *v) return 0; }
+static int reclaim_param_parse(char *buf, unsigned long *nr_pages, + unsigned int *reclaim_options) +{ + char *endp; + u64 bytes; + + if (!strcmp(buf, "")) { + *nr_pages = PAGE_COUNTER_MAX; + return 0; + } + + bytes = memparse(buf, &endp); + if (*endp == ' ') { + buf = endp + 1; + buf = strim(buf); + if (!strcmp(buf, "type=anon")) + *reclaim_options |= MEMCG_RECLAIM_NOT_FILE; + else if (!strcmp(buf, "type=file")) + *reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP; + else + return -EINVAL; + } else if (*endp != '\0') + return -EINVAL; + + *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX); + + return 0; +} + static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -5273,8 +5302,9 @@ static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, unsigned int reclaim_options; int err;
+ reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE; buf = strstrip(buf); - err = page_counter_memparse(buf, "", &nr_to_reclaim); + err = reclaim_param_parse(buf, &nr_to_reclaim, &reclaim_options); if (err) return err;
@@ -5282,13 +5312,17 @@ static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, mem_cgroup_is_root(memcg)) return -EINVAL;
- reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE; while (nr_reclaimed < nr_to_reclaim) { unsigned long reclaimed;
if (signal_pending(current)) return -EINTR;
+ /* If only reclaim swap pages, check swap space at first. */ + if ((reclaim_options & MEMCG_RECLAIM_NOT_FILE) && + (mem_cgroup_get_nr_swap_pages(memcg) <= 0)) + return -EAGAIN; + /* This is the final attempt, drain percpu lru caches in the * hope of introducing more evictable pages for * try_to_free_mem_cgroup_pages(). diff --git a/mm/vmscan.c b/mm/vmscan.c index d0ea4e251096..6416737f5206 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -103,6 +103,9 @@ struct scan_control { /* Can pages be swapped as part of reclaim? */ unsigned int may_swap:1;
+ /* Should skip file pages? */ + unsigned int not_file:1; + /* Proactive reclaim invoked by userspace through memory.reclaim */ unsigned int proactive:1;
@@ -2464,6 +2467,11 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, unsigned long ap, fp; enum lru_list lru;
+ if (sc->not_file) { + scan_balance = SCAN_ANON; + goto out; + } + /* If we have no swap space, do not bother scanning anon pages. */ if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) { scan_balance = SCAN_FILE; @@ -3583,6 +3591,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, .may_unmap = 1, .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP), .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE), + .not_file = !!(reclaim_options & MEMCG_RECLAIM_NOT_FILE), }; /* * Traverse the ZONELIST_FALLBACK zonelist of the current node to put