summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-08-23 12:05:46 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-08-23 12:05:46 -0700
commit2acf097f16abba684012cca670a61d94178bd1ab (patch)
treed09ac6e6a6082596ae5eed0754258f626557a6f3 /arch
parenta67ca1e9bd07c05548bf7c2d6f065ca9db11e9bf (diff)
parenta067d94d37ed590fd17684d18c3edf52110d305a (diff)
downloadlinux-0-day-2acf097f16abba684012cca670a61d94178bd1ab.tar.gz
linux-0-day-2acf097f16abba684012cca670a61d94178bd1ab.tar.xz
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "Late arm64 fixes. They fix very early boot failures with KASLR where the early mapping of the kernel is incorrect, so the failure mode looks like a hang with no output. There's also a signal-handling fix when a uaccess routine faults with a fatal signal pending, which could be used to create unkillable user tasks using userfaultfd and finally a state leak fix for the floating pointer registers across a call to exec(). We're still seeing some random issues crop up (inode memory corruption and spinlock recursion) but we've not managed to reproduce things reliably enough to debug or bisect them yet. Summary: - Fix very early boot failures with KASLR enabled - Fix fatal signal handling on userspace access from kernel - Fix leakage of floating point register state across exec()" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: kaslr: Adjust the offset to avoid Image across alignment boundary arm64: kaslr: ignore modulo offset when validating virtual displacement arm64: mm: abort uaccess retries upon fatal signal arm64: fpsimd: Prevent registers leaking across exec
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/head.S1
-rw-r--r--arch/arm64/kernel/kaslr.c20
-rw-r--r--arch/arm64/mm/fault.c5
4 files changed, 17 insertions, 11 deletions
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 06da8ea16bbe5..c7b4995868e14 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
{
if (!system_supports_fpsimd())
return;
+ preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
+ preempt_enable();
}
/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 973df7de7bf8d..adb0910b88f5f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -354,7 +354,6 @@ __primary_switched:
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
b.ne 0f
mov x0, x21 // pass FDT address in x0
- mov x1, x23 // pass modulo offset in x1
bl kaslr_early_init // parse FDT for KASLR options
cbz x0, 0f // KASLR disabled? just proceed
orr x23, x23, x0 // record KASLR offset
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index a9710efb8c017..47080c49cc7e7 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -75,7 +75,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
* containing function pointers) to be reinitialized, and zero-initialized
* .bss variables will be reset to 0.
*/
-u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
+u64 __init kaslr_early_init(u64 dt_phys)
{
void *fdt;
u64 seed, offset, mask, module_range;
@@ -131,15 +131,17 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
/*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
- * happens, increase the KASLR offset by the size of the kernel image
- * rounded up by SWAPPER_BLOCK_SIZE.
+ * happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
+ *
+ * NOTE: The references to _text and _end below will already take the
+ * modulo offset (the physical displacement modulo 2 MB) into
+ * account, given that the physical placement is controlled by
+ * the loader, and will not change as a result of the virtual
+ * mapping we choose.
*/
- if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
- u64 kimg_sz = _end - _text;
- offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
- & mask;
- }
+ if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
+ (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
+ offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
if (IS_ENABLED(CONFIG_KASAN))
/*
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2509e4fe69922..1f22a41565a38 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -435,8 +435,11 @@ retry:
* the mmap_sem because it would already be released
* in __lock_page_or_retry in mm/filemap.c.
*/
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of