From: Chen Wandun chenwandun@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S CVE: NA
--------------------------------
This reverts commit 925368d878b5c446d9f871796ca27bc0d29102fb. Page cache fallback statistic will be replaced by other method.
Signed-off-by: Chen Wandun chenwandun@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Reviewed-by: Kefeng Wangwangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/mem_reliable.h | 3 --- mm/filemap.c | 6 +----- mm/mem_reliable.c | 29 ----------------------------- 3 files changed, 1 insertion(+), 37 deletions(-)
diff --git a/include/linux/mem_reliable.h b/include/linux/mem_reliable.h index 2d3577ce71134..4c15db9c19443 100644 --- a/include/linux/mem_reliable.h +++ b/include/linux/mem_reliable.h @@ -21,11 +21,9 @@ extern bool reliable_allow_fallback; extern bool shmem_reliable; extern struct percpu_counter reliable_shmem_used_nr_page; extern bool pagecache_use_reliable_mem; -extern atomic_long_t page_cache_fallback; DECLARE_PER_CPU(long, nr_reliable_buddy_pages); extern unsigned long nr_reliable_reserve_pages __read_mostly; extern long shmem_reliable_nr_page __read_mostly; -extern void page_cache_fallback_inc(gfp_t gfp, struct page *page);
extern void add_reliable_mem_size(long sz); extern void mem_reliable_init(bool has_unmirrored_mem, @@ -166,7 +164,6 @@ static inline bool shmem_reliable_is_enabled(void) { return false; } static inline void shmem_reliable_page_counter(struct page *page, int nr_page) { } -static inline void page_cache_fallback_inc(gfp_t gfp, struct page *page) {}
static inline bool pagecache_reliable_is_enabled(void) { return false; } static inline bool mem_reliable_status(void) { return false; } diff --git a/mm/filemap.c b/mm/filemap.c index 2ac6ddf630d80..a89d70097e686 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1050,13 +1050,9 @@ struct page *__page_cache_alloc(gfp_t gfp) page = __alloc_pages_node(n, gfp, 0); } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
- page_cache_fallback_inc(gfp, page); return page; } - page = alloc_pages(gfp, 0); - page_cache_fallback_inc(gfp, page); - - return page; + return alloc_pages(gfp, 0); } EXPORT_SYMBOL(__page_cache_alloc); #endif diff --git a/mm/mem_reliable.c b/mm/mem_reliable.c index f27c79bbb0f4c..dcbe3f58f6927 100644 --- a/mm/mem_reliable.c +++ b/mm/mem_reliable.c @@ -35,7 +35,6 @@ unsigned long nr_reliable_reserve_pages = MEM_RELIABLE_RESERVE_MIN / PAGE_SIZE; long shmem_reliable_nr_page = LONG_MAX;
bool pagecache_use_reliable_mem __read_mostly = true; -atomic_long_t page_cache_fallback = ATOMIC_LONG_INIT(0); DEFINE_PER_CPU(long, pagecache_reliable_pages);
static unsigned long zero; @@ -57,19 +56,6 @@ bool page_reliable(struct page *page) return mem_reliable_is_enabled() && page_zonenum(page) < ZONE_MOVABLE; }
-static bool is_fallback_page(gfp_t gfp, struct page *page) -{ - bool ret = false; - - if (!page) - return ret; - - if ((gfp & ___GFP_RELIABILITY) && !page_reliable(page)) - ret = true; - - return ret; -} - static bool reliable_and_lru_check(enum lru_list lru, struct page *page) { if (!page || !page_reliable(page)) @@ -89,21 +75,6 @@ void page_cache_reliable_lru_add(enum lru_list lru, struct page *page, int val) this_cpu_add(pagecache_reliable_pages, val); }
-void page_cache_fallback_inc(gfp_t gfp, struct page *page) -{ - long num; - - if (!pagecache_reliable_is_enabled()) - return; - - if (!is_fallback_page(gfp, page)) - return; - - num = atomic_long_inc_return(&page_cache_fallback); - if (num < 0) - atomic_long_set(&page_cache_fallback, 0); -} - static int reliable_mem_notifier(struct notifier_block *nb, unsigned long action, void *arg) {