summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLaura Abbott <labbott@redhat.com>2017-01-13 22:51:45 +0100
committerRussell King <rmk+kernel@armlinux.org.uk>2017-02-28 11:05:28 +0000
commit985626564eedc470ce2866e53938303368ad41b7 (patch)
tree8b702fe72ff1dfa91d98a53f42b582030ea672b9 /arch
parent374d446d25d6271ee615952a3b7f123ba4983c35 (diff)
downloadlinux-985626564eedc470ce2866e53938303368ad41b7.tar.gz
linux-985626564eedc470ce2866e53938303368ad41b7.tar.xz
ARM: 8637/1: Adjust memory boundaries after reservations
adjust_lowmem_bounds is responsible for setting up the boundary for lowmem/highmem. This needs to be setup before memblock reservations can occur. At the time memblock reservations can occur, memory can also be removed from the system. The lowmem/highmem boundary and end of memory may be affected by this but it is currently not recalculated. On some systems this may be harmless, on others this may result in incorrect ranges being passed to the main memory allocator. Correct this by recalculating the lowmem/highmem boundary after all reservations have been made. Tested-by: Magnus Lilja <lilja.magnus@gmail.com> Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/setup.c6
-rw-r--r--arch/arm/mm/mmu.c9
2 files changed, 12 insertions, 3 deletions
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 8a8051cf57d1..f4e54503afa9 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p)
setup_dma_zone(mdesc);
xen_early_init();
efi_init();
+ /*
+ * Make sure the calculation for lowmem/highmem is set appropriately
+ * before reserving/allocating any mmeory
+ */
adjust_lowmem_bounds();
arm_memblock_init(mdesc);
+ /* Memory may have been removed so recalculate the bounds. */
+ adjust_lowmem_bounds();
early_ioremap_reset();
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index b8f70a3bb7f8..5cbfd9f86412 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1157,6 +1157,7 @@ void __init adjust_lowmem_bounds(void)
phys_addr_t memblock_limit = 0;
u64 vmalloc_limit;
struct memblock_region *reg;
+ phys_addr_t lowmem_limit = 0;
/*
* Let's use our own (unoptimized) equivalent of __pa() that is
@@ -1172,14 +1173,14 @@ void __init adjust_lowmem_bounds(void)
phys_addr_t block_end = reg->base + reg->size;
if (reg->base < vmalloc_limit) {
- if (block_end > arm_lowmem_limit)
+ if (block_end > lowmem_limit)
/*
* Compare as u64 to ensure vmalloc_limit does
* not get truncated. block_end should always
* fit in phys_addr_t so there should be no
* issue with assignment.
*/
- arm_lowmem_limit = min_t(u64,
+ lowmem_limit = min_t(u64,
vmalloc_limit,
block_end);
@@ -1200,12 +1201,14 @@ void __init adjust_lowmem_bounds(void)
if (!IS_ALIGNED(block_start, PMD_SIZE))
memblock_limit = block_start;
else if (!IS_ALIGNED(block_end, PMD_SIZE))
- memblock_limit = arm_lowmem_limit;
+ memblock_limit = lowmem_limit;
}
}
}
+ arm_lowmem_limit = lowmem_limit;
+
high_memory = __va(arm_lowmem_limit - 1) + 1;
/*