summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2018-12-04 13:37:16 -0800
committerIngo Molnar <mingo@kernel.org>2018-12-05 09:03:06 +0100
commit4369deaa2f022ef92da45a0e7eec8a4a52e8e8a4 (patch)
tree6732d7f5dd26688e96eaf9d93fd406cadf54e8c8 /include/asm-generic
parent0cebbb60f759a709dabb3c87b9704f9844878850 (diff)
downloadlinux-0-day-4369deaa2f022ef92da45a0e7eec8a4a52e8e8a4.tar.gz
linux-0-day-4369deaa2f022ef92da45a0e7eec8a4a52e8e8a4.tar.xz
generic/pgtable: Introduce set_pte_safe()
Commit: f77084d96355 "x86/mm/pat: Disable preemption around __flush_tlb_all()" introduced a warning to capture cases __flush_tlb_all() is called without pre-emption disabled. It triggers a false positive warning in the memory hotplug path. On investigation it was found that the __flush_tlb_all() calls are not necessary. However, they are only "not necessary" in practice provided the ptes are being initially populated from the !present state. Introduce set_pte_safe() as a sanity check that the pte is being updated in a way that does not require a TLB flush. Forgive the macro, the availability of the various of set_pte() levels is hit and miss across architectures. [ mingo: Minor readability edits. ] Suggested-by: Peter Zijlstra <peterz@infradead.org> Suggested-by: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Rik van Riel <riel@surriel.com> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/279dadae-9148-465c-7ec6-3f37e026c6c9@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/pgtable.h38
1 files changed, 38 insertions, 0 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index dae7f98babede..a9cac82e9a7ae 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -400,6 +400,44 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
}
#endif
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+ set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+ set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+ set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
+
#ifndef __HAVE_ARCH_DO_SWAP_PAGE
/*
* Some architectures support metadata associated with a page. When a