summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-26 18:08:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-26 18:08:18 -0800
commite57d9f638af9673f38d9f09de66fa0a28303127d (patch)
tree1948825bba57b4563ef0a5e7dd7e90634441b66e /include/asm-generic
parentd6e867a6ae13bc02cd01c535764e5b051d26cf28 (diff)
parent6848ac7ca39a226ede5df7af0efcc4ef0611e99c (diff)
downloadlinux-0-day-e57d9f638af9673f38d9f09de66fa0a28303127d.tar.gz
linux-0-day-e57d9f638af9673f38d9f09de66fa0a28303127d.tar.xz
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: "The main changes in this cycle were: - Update and clean up x86 fault handling, by Andy Lutomirski. - Drop usage of __flush_tlb_all() in kernel_physical_mapping_init() and related fallout, by Dan Williams. - CPA cleanups and reorganization by Peter Zijlstra: simplify the flow and remove a few warts. - Other misc cleanups" * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (29 commits) x86/mm/dump_pagetables: Use DEFINE_SHOW_ATTRIBUTE() x86/mm/cpa: Rename @addrinarray to @numpages x86/mm/cpa: Better use CLFLUSHOPT x86/mm/cpa: Fold cpa_flush_range() and cpa_flush_array() into a single cpa_flush() function x86/mm/cpa: Make cpa_data::numpages invariant x86/mm/cpa: Optimize cpa_flush_array() TLB invalidation x86/mm/cpa: Simplify the code after making cpa->vaddr invariant x86/mm/cpa: Make cpa_data::vaddr invariant x86/mm/cpa: Add __cpa_addr() helper x86/mm/cpa: Add ARRAY and PAGES_ARRAY selftests x86/mm: Drop usage of __flush_tlb_all() in kernel_physical_mapping_init() x86/mm: Validate kernel_physical_mapping_init() PTE population generic/pgtable: Introduce set_pte_safe() generic/pgtable: Introduce {p4d,pgd}_same() generic/pgtable: Make {pmd, pud}_same() unconditionally available x86/fault: Clean up the page fault oops decoder a bit x86/fault: Decode page fault OOPSes better x86/vsyscall/64: Use X86_PF constants in the simulated #PF error code x86/oops: Show the correct CS value in show_regs() x86/fault: Don't try to recover from an implicit supervisor access ...
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/5level-fixup.h1
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h1
-rw-r--r--include/asm-generic/pgtable-nop4d.h1
-rw-r--r--include/asm-generic/pgtable-nopud.h1
-rw-r--r--include/asm-generic/pgtable.h56
5 files changed, 51 insertions, 9 deletions
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 73474bb52344d..bb6cb347018c0 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -26,6 +26,7 @@
#define p4d_clear(p4d) pgd_clear(p4d)
#define p4d_val(p4d) pgd_val(p4d)
#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
+#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud)
#define p4d_page(p4d) pgd_page(p4d)
#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 1d6dd38c0e5ea..829bdb0d6327d 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -31,6 +31,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
#define pgd_populate(mm, pgd, pud) do { } while (0)
+#define pgd_populate_safe(mm, pgd, pud) do { } while (0)
/*
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 04cb913797bc0..aebab905e6cd0 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -26,6 +26,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
#define pgd_populate(mm, pgd, p4d) do { } while (0)
+#define pgd_populate_safe(mm, pgd, p4d) do { } while (0)
/*
* (p4ds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index 9bef475db6fef..c77a1d301155c 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -35,6 +35,7 @@ static inline void p4d_clear(p4d_t *p4d) { }
#define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
#define p4d_populate(mm, p4d, pud) do { } while (0)
+#define p4d_populate_safe(mm, p4d, pud) do { } while (0)
/*
* (puds are folded into p4ds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 359fb935ded6a..a9cac82e9a7ae 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -375,7 +375,6 @@ static inline int pte_unused(pte_t pte)
#endif
#ifndef __HAVE_ARCH_PMD_SAME
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
@@ -385,21 +384,60 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b)
{
return pud_val(pud_a) == pud_val(pud_b);
}
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+#endif
+
+#ifndef __HAVE_ARCH_P4D_SAME
+static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
{
- BUILD_BUG();
- return 0;
+ return p4d_val(p4d_a) == p4d_val(p4d_b);
}
+#endif
-static inline int pud_same(pud_t pud_a, pud_t pud_b)
+#ifndef __HAVE_ARCH_PGD_SAME
+static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
{
- BUILD_BUG();
- return 0;
+ return pgd_val(pgd_a) == pgd_val(pgd_b);
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+ set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+ set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+ set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
+
#ifndef __HAVE_ARCH_DO_SWAP_PAGE
/*
* Some architectures support metadata associated with a page. When a