summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2019-07-10 19:01:19 +0000
committerChristoph Hellwig <hch@lst.de>2019-07-16 22:15:46 +0200
commit9087c37584fb7d8315877bb55f85e4268cc0b4f4 (patch)
tree11f256332491057cf19db8aa447d44465bec79da /kernel
parent9637d517347e80ee2fe1c5d8ce45ba1b88d8b5cd (diff)
downloadlinux-0-day-9087c37584fb7d8315877bb55f85e4268cc0b4f4.tar.gz
linux-0-day-9087c37584fb7d8315877bb55f85e4268cc0b4f4.tar.xz
dma-direct: Force unencrypted DMA under SME for certain DMA masks
If a device doesn't support DMA to a physical address that includes the encryption bit (currently bit 47, so 48-bit DMA), then the DMA must occur to unencrypted memory. SWIOTLB is used to satisfy that requirement if an IOMMU is not active (enabled or configured in passthrough mode). However, commit fafadcd16595 ("swiotlb: don't dip into swiotlb pool for coherent allocations") modified the coherent allocation support in SWIOTLB to use the DMA direct coherent allocation support. When an IOMMU is not active, this resulted in dma_alloc_coherent() failing for devices that didn't support DMA addresses that included the encryption bit. Addressing this requires changes to the force_dma_unencrypted() function in kernel/dma/direct.c. Since the function is now non-trivial and SME/SEV specific, update the DMA direct support to add an arch override for the force_dma_unencrypted() function. The arch override is selected when CONFIG_AMD_MEM_ENCRYPT is set. The arch override function resides in the arch/x86/mm/mem_encrypt.c file and forces unencrypted DMA when either SEV is active or SME is active and the device does not support DMA to physical addresses that include the encryption bit. Fixes: fafadcd16595 ("swiotlb: don't dip into swiotlb pool for coherent allocations") Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> [hch: moved the force_dma_unencrypted declaration to dma-mapping.h, fold the s390 fix from Halil Pasic] Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/Kconfig3
-rw-r--r--kernel/dma/direct.c16
2 files changed, 7 insertions, 12 deletions
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 70f8f8d9200ea..9decbba255fc8 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -48,6 +48,9 @@ config ARCH_HAS_DMA_COHERENT_TO_PFN
config ARCH_HAS_DMA_MMAP_PGPROT
bool
+config ARCH_HAS_FORCE_DMA_UNENCRYPTED
+ bool
+
config DMA_NONCOHERENT_CACHE_SYNC
bool
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index b90e1aede7434..d7cec866d16b5 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -23,14 +23,6 @@
#define ARCH_ZONE_DMA_BITS 24
#endif
-/*
- * For AMD SEV all DMA must be to unencrypted addresses.
- */
-static inline bool force_dma_unencrypted(void)
-{
- return sev_active();
-}
-
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
{
if (!dev->dma_mask) {
@@ -46,7 +38,7 @@ static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
phys_addr_t phys)
{
- if (force_dma_unencrypted())
+ if (force_dma_unencrypted(dev))
return __phys_to_dma(dev, phys);
return phys_to_dma(dev, phys);
}
@@ -67,7 +59,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
dma_mask = dev->bus_dma_mask;
- if (force_dma_unencrypted())
+ if (force_dma_unencrypted(dev))
*phys_mask = __dma_to_phys(dev, dma_mask);
else
*phys_mask = dma_to_phys(dev, dma_mask);
@@ -159,7 +151,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
}
ret = page_address(page);
- if (force_dma_unencrypted()) {
+ if (force_dma_unencrypted(dev)) {
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
} else {
@@ -192,7 +184,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
return;
}
- if (force_dma_unencrypted())
+ if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&