From: Jason Yan yanaijie@huawei.com
maillist inclusion category: feature feature: PowerPC64 kaslr support bugzilla: 109306 CVE: NA
Reference: https://patchwork.ozlabs.org/project/linuxppc-dev/patch/20200330022023.3691-...
-------------------------------------------------
Some code refactor in kaslr_legal_offset() and kaslr_early_init(). No functional change. This is a preparation for KASLR fsl_booke64.
Signed-off-by: Jason Yan yanaijie@huawei.com Cc: Scott Wood oss@buserror.net Cc: Diana Craciun diana.craciun@nxp.com Cc: Michael Ellerman mpe@ellerman.id.au Cc: Christophe Leroy christophe.leroy@c-s.fr Cc: Benjamin Herrenschmidt benh@kernel.crashing.org Cc: Paul Mackerras paulus@samba.org Cc: Nicholas Piggin npiggin@gmail.com Cc: Kees Cook keescook@chromium.org Signed-off-by: Cui GaoSheng cuigaosheng1@huawei.com Signed-off-by: GUO Zihua guozihua@huawei.com --- arch/powerpc/mm/nohash/kaslr_booke.c | 34 +++++++++++++++------------- 1 file changed, 18 insertions(+), 16 deletions(-)
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 2fb3edafe9ab..08b5b90653ce 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -24,6 +24,7 @@ struct regions { unsigned long pa_start; unsigned long pa_end; unsigned long kernel_size; + unsigned long linear_sz; unsigned long dtb_start; unsigned long dtb_end; unsigned long initrd_start; @@ -254,11 +255,23 @@ static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells, *size_cells = fdt32_to_cpu(*prop); }
-static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index, - unsigned long offset) +static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long random) { unsigned long koffset = 0; unsigned long start; + unsigned long index; + unsigned long offset; + + /* + * Decide which 64M we want to start + * Only use the low 8 bits of the random seed + */ + index = random & 0xFF; + index %= regions.linear_sz / SZ_64M; + + /* Decide offset inside 64M */ + offset = random % (SZ_64M - regions.kernel_size); + offset = round_down(offset, SZ_16K);
while ((long)index >= 0) { offset = memstart_addr + index * SZ_64M + offset; @@ -283,10 +296,9 @@ static inline __init bool kaslr_disabled(void) static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size, unsigned long kernel_sz) { - unsigned long offset, random; + unsigned long random; unsigned long ram, linear_sz; u64 seed; - unsigned long index;
kaslr_get_cmdline(dt_ptr); if (kaslr_disabled()) @@ -327,22 +339,12 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size regions.dtb_start = __pa(dt_ptr); regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr); regions.kernel_size = kernel_sz; + regions.linear_sz = linear_sz;
get_initrd_range(dt_ptr); get_crash_kernel(dt_ptr, ram);
- /* - * Decide which 64M we want to start - * Only use the low 8 bits of the random seed - */ - index = random & 0xFF; - index %= linear_sz / SZ_64M; - - /* Decide offset inside 64M */ - offset = random % (SZ_64M - kernel_sz); - offset = round_down(offset, SZ_16K); - - return kaslr_legal_offset(dt_ptr, index, offset); + return kaslr_legal_offset(dt_ptr, random); }
/*