hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8Q3P9
--------------------------------
KFENCE pool requires linear map to be mapped at page granularity, this must done in very early time in arm64. To support the late initialisation of kfence for arm64, all mappings are turned into page-level mappings, which incurs memory consumption.To save memory of page table, arm64 can only map the pages in KFENCE pool itself at page granularity. Thus, the kfence pool could not allocated by buddy system.
For the flexibility of KFENCE, by setting "kfence.sample_interval" to -1, the kfence pool memory will be allocated from the early memory, kfence disabled by default, after system startup(re-enabling), you can set "kfence.sample_interval" to a non-zero value to enable kfence, and set "kfence.sample_interval" to 0 or -1 to turn kfence off. Note that disabling kfence will not free the memory associated with kfence.
Note: Whether the config KFENCE_MUST_EARLY_INIT is enabled or not, kfence.sample_interval being set to -1 has the same function as being 0 for non-ARM64 architectures.
Signed-off-by: Ze Zuo zuoze1@huawei.com --- arch/arm64/include/asm/kfence.h | 3 +++ arch/arm64/mm/mmu.c | 5 ++++ include/linux/kfence.h | 4 +++ lib/Kconfig.kfence | 16 ++++++++++++ mm/kfence/core.c | 45 +++++++++++++++++++++++---------- 5 files changed, 60 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index a81937fae9f6..36052893433f 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -23,6 +23,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) extern bool kfence_early_init; static inline bool arm64_kfence_can_set_direct_map(void) { + if (IS_ENABLED(CONFIG_KFENCE_MUST_EARLY_INIT)) + return false; + return !kfence_early_init; } #else /* CONFIG_KFENCE */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 47781bec6171..58d228de4808 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -520,6 +520,11 @@ static int __init parse_kfence_early_init(char *arg)
if (get_option(&arg, &val)) kfence_early_init = !!val; + +#if IS_ENABLED(CONFIG_KFENCE_MUST_EARLY_INIT) + kfence_must_early_init = (val == -1) ? true : false; +#endif + return 0; } early_param("kfence.sample_interval", parse_kfence_early_init); diff --git a/include/linux/kfence.h b/include/linux/kfence.h index d228e0a4676d..62dad350b906 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -19,6 +19,10 @@
extern unsigned long kfence_sample_interval;
+#if IS_ENABLED(CONFIG_KFENCE_MUST_EARLY_INIT) +extern bool __ro_after_init kfence_must_early_init; +#endif + #ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS extern int kfence_num_objects; #define KFENCE_NR_OBJECTS kfence_num_objects diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence index 999be97173f9..f40df4b11ed3 100644 --- a/lib/Kconfig.kfence +++ b/lib/Kconfig.kfence @@ -69,6 +69,22 @@ config KFENCE_DYNAMIC_OBJECTS
Say N if you are unsure.
+config KFENCE_MUST_EARLY_INIT + bool "Require kfence_pool to be pre-allocated on arm64." + depends on ARM64 + help + To support KFENCE late init, arm64 will convert block mapping to + page-level mappings, which leads to performance degradation and + increased memory consumption. + + If this config is enabled, only KFENCE memory early init for arm64 + is supported, extending sample_interval to implement late enable. When + "kfence.sample_interval" is set to -1 or 0, KFENCE will not be enabled. + Only when "kfence.sample_interval" is set to -1, it can be enabled by + setting it to a non-zero value. + + Say N if you are unsure. + config KFENCE_STATIC_KEYS bool "Use static keys to set up allocations" if EXPERT depends on JUMP_LABEL diff --git a/mm/kfence/core.c b/mm/kfence/core.c index d39ebe647670..def02ef3625e 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -50,6 +50,11 @@
static bool kfence_enabled __read_mostly; static bool disabled_by_warn __read_mostly; +#if IS_ENABLED(CONFIG_KFENCE_MUST_EARLY_INIT) +bool __ro_after_init kfence_must_early_init; +#else +#define kfence_must_early_init 0 +#endif
unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ @@ -62,19 +67,28 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ static int kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { - unsigned long num; - int ret = kstrtoul(val, 0, &num); + long num; + int ret = kstrtol(val, 0, &num);
if (ret < 0) return ret;
+ if (num < -1) + return -ERANGE; + + /* + * For architecture that don't require early allocation, always support + * re-enabling. So only need to set num to 0 if num < 0. + */ + num = max_t(long, 0, num); + /* Using 0 to indicate KFENCE is disabled. */ if (!num && READ_ONCE(kfence_enabled)) { pr_info("disabled\n"); WRITE_ONCE(kfence_enabled, false); }
- *((unsigned long *)kp->arg) = num; + *((unsigned long *)kp->arg) = (unsigned long)num;
if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) return disabled_by_warn ? -EINVAL : kfence_enable_late(); @@ -861,7 +875,7 @@ static int kfence_debugfs_init(void) { struct dentry *kfence_dir;
- if (!READ_ONCE(kfence_enabled)) + if (!READ_ONCE(kfence_enabled) && !kfence_must_early_init) return 0;
kfence_dir = debugfs_create_dir("kfence", NULL); @@ -946,7 +960,7 @@ static void toggle_allocation_gate(struct work_struct *work)
void __init kfence_alloc_pool_and_metadata(void) { - if (!kfence_sample_interval) + if (!kfence_sample_interval && !kfence_must_early_init) return;
if (kfence_dynamic_init()) @@ -987,12 +1001,13 @@ static void kfence_init_enable(void) if (kfence_check_on_panic) atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
- WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); - - pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, - KFENCE_NR_OBJECTS, (void *)__kfence_pool, - (void *)(__kfence_pool + KFENCE_POOL_SIZE)); + if (!kfence_must_early_init) { + WRITE_ONCE(kfence_enabled, true); + queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, + KFENCE_NR_OBJECTS, (void *)__kfence_pool, + (void *)(__kfence_pool + KFENCE_POOL_SIZE)); + } }
void __init kfence_init(void) @@ -1000,7 +1015,7 @@ void __init kfence_init(void) stack_hash_seed = get_random_u32();
/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ - if (!kfence_sample_interval) + if (!kfence_sample_interval && !kfence_must_early_init) return;
if (!kfence_init_pool_early()) { @@ -1089,8 +1104,12 @@ static int kfence_init_late(void)
static int kfence_enable_late(void) { - if (!__kfence_pool) + if (!__kfence_pool) { + if (IS_ENABLED(CONFIG_KFENCE_MUST_EARLY_INIT)) + return 0; + return kfence_init_late(); + }
WRITE_ONCE(kfence_enabled, true); queue_delayed_work(system_unbound_wq, &kfence_timer, 0);