From: liubo liubo254@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4QVXW CVE: NA
-------------------------------------------------
etmem, the memory vertical expansion technology, uses DRAM and high-performance storage new media to form multi-level memory storage. By grading the stored data, etmem migrates the classified cold storage data from the storage medium to the high-performance storage medium, so as to achieve the purpose of memory capacity expansion and memory cost reduction.
When the memory expansion function etmem is running, the native swap function of the kernel needs to be disabled in certain scenarios to avoid the impact of kernel swap.
This feature provides the preceding functions.
The /sys/kernel/mm/swap/ directory provides the kernel_swap_enable sys interface to enable or disable the native swap function of the kernel.
The default value of /sys/kernel/mm/swap/kernel_swap_enable is true, that is, kernel swap is enabled by default.
Turn on kernel swap: echo true > /sys/kernel/mm/swap/kernel_swap_enable
Turn off kernel swap: echo false > /sys/kernel/mm/swap/kernel_swap_enable
Signed-off-by: liubo liubo254@huawei.com Reviewed-by: Miaohe Lin linmiaohe@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/swap.h | 1 + mm/swap_state.c | 29 +++++++++++++++++++++++++++++ mm/vmscan.c | 18 ++++++++++++++++++ 3 files changed, 48 insertions(+)
diff --git a/include/linux/swap.h b/include/linux/swap.h index b7cfad35987a2..23549741336a4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -476,6 +476,7 @@ extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, struct vm_fault *vmf); extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, struct vm_fault *vmf); +extern bool kernel_swap_enabled(void);
/* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; diff --git a/mm/swap_state.c b/mm/swap_state.c index 2137e2d571965..1527ac72928b6 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -40,6 +40,7 @@ static const struct address_space_operations swap_aops = { struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; static bool enable_vma_readahead __read_mostly = true; +static bool enable_kernel_swap __read_mostly = true;
#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) @@ -326,6 +327,11 @@ static inline bool swap_use_vma_readahead(void) return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); }
+bool kernel_swap_enabled(void) +{ + return READ_ONCE(enable_kernel_swap); +} + /* * Lookup a swap entry in the swap cache. A found page will be returned * unlocked and with its refcount incremented - we rely on the kernel @@ -828,8 +834,31 @@ static struct kobj_attribute vma_ra_enabled_attr = __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, vma_ra_enabled_store);
+static ssize_t kernel_swap_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", enable_kernel_swap ? "true" : "false"); +} +static ssize_t kernel_swap_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) + WRITE_ONCE(enable_kernel_swap, true); + else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) + WRITE_ONCE(enable_kernel_swap, false); + else + return -EINVAL; + + return count; +} +static struct kobj_attribute kernel_swap_enable_attr = + __ATTR(kernel_swap_enable, 0644, kernel_swap_enable_show, + kernel_swap_enable_store); + static struct attribute *swap_attrs[] = { &vma_ra_enabled_attr.attr, + &kernel_swap_enable_attr.attr, NULL, };
diff --git a/mm/vmscan.c b/mm/vmscan.c index e8befe70d2800..2676d6cf2ccac 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3248,6 +3248,16 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, return false; }
+/* + * Check if original kernel swap is enabled + * turn off kernel swap,but leave page cache reclaim on + */ +static inline void kernel_swap_check(struct scan_control *sc) +{ + if (sc != NULL && !kernel_swap_enabled()) + sc->may_swap = 0; +} + unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) { @@ -3264,6 +3274,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, .may_swap = 1, };
+ kernel_swap_check(&sc); /* * scan_control uses s8 fields for order, priority, and reclaim_idx. * Confirm they are large enough for max values. @@ -3548,6 +3559,8 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
count_vm_event(PAGEOUTRUN);
+ kernel_swap_check(&sc); + #ifdef CONFIG_SHRINK_PAGECACHE if (vm_cache_limit_mbytes && page_cache_over_limit()) shrink_page_cache(GFP_KERNEL); @@ -3963,6 +3976,8 @@ static unsigned long __shrink_page_cache(gfp_t mask)
struct zonelist *zonelist = node_zonelist(numa_node_id(), mask);
+ kernel_swap_check(&sc); + return do_try_to_free_pages(zonelist, &sc); }
@@ -4282,6 +4297,9 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
cond_resched(); fs_reclaim_acquire(sc.gfp_mask); + + kernel_swap_check(&sc); + /* * We need to be able to allocate from the reserves for RECLAIM_UNMAP * and we also need to be able to write out pages for RECLAIM_WRITE