summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig15
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/apbh_dma.c519
-rw-r--r--drivers/dma/debug.c201
-rw-r--r--drivers/dma/debug.h56
-rw-r--r--drivers/dma/map.c47
-rw-r--r--drivers/dma/of_fixups.c40
7 files changed, 367 insertions, 515 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 4802bf522d..e7516466d9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -3,8 +3,21 @@ menu "DMA support"
config MXS_APBH_DMA
tristate "MXS APBH DMA ENGINE"
- depends on ARCH_IMX23 || ARCH_IMX28 || ARCH_IMX6
+ depends on ARCH_IMX23 || ARCH_IMX28 || ARCH_IMX6 || ARCH_IMX7
select STMP_DEVICE
help
Experimental!
+
+config OF_DMA_COHERENCY
+ bool "Respect device tree DMA coherency settings" if COMPILE_TEST
+ depends on HAS_DMA && OFDEVICE
+ help
+ For most platforms supported, either all DMA is coherent or it isn't.
+ Platforms that have DMA masters of mixed coherency or that differ
+ from the architecture default will select this option to parse
+ DMA coherency out of the DT. This allows barebox to choose the
+ correct cache maintenance operation during runtime and will cause
+ barebox to fix up its own DMA coherency setting into the kernel
+ DT if it differs.
+
endmenu
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 39829cab50..77bd8abba5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MXS_APBH_DMA) += apbh_dma.o
obj-$(CONFIG_HAS_DMA) += map.o
+obj-$(CONFIG_DMA_API_DEBUG) += debug.o
+obj-$(CONFIG_MXS_APBH_DMA) += apbh_dma.o
+obj-$(CONFIG_OF_DMA_COHERENCY) += of_fixups.o
diff --git a/drivers/dma/apbh_dma.c b/drivers/dma/apbh_dma.c
index 83bd783d34..2f19033aaf 100644
--- a/drivers/dma/apbh_dma.c
+++ b/drivers/dma/apbh_dma.c
@@ -24,41 +24,12 @@
#include <init.h>
#include <io.h>
-
-#define HW_APBHX_CTRL0 0x000
-#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
-#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
-#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
-#define BP_APBH_CTRL0_RESET_CHANNEL 16
-#define HW_APBHX_CTRL1 0x010
-#define BP_APBHX_CTRL1_CH_CMDCMPLT_IRQ_EN 16
-#define HW_APBHX_CTRL2 0x020
-#define HW_APBHX_CHANNEL_CTRL 0x030
-#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
-#define BP_APBHX_VERSION_MAJOR 24
-#define HW_APBHX_CHn_NXTCMDAR_MX23(n) (0x050 + (n) * 0x70)
-#define HW_APBHX_CHn_NXTCMDAR_MX28(n) (0x110 + (n) * 0x70)
-#define HW_APBHX_CHn_SEMA_MX23(n) (0x080 + (n) * 0x70)
-#define HW_APBHX_CHn_SEMA_MX28(n) (0x140 + (n) * 0x70)
-#define BM_APBHX_CHn_SEMA_PHORE (0xff << 16)
-#define BP_APBHX_CHn_SEMA_PHORE 16
-
-static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
-
-enum mxs_dma_id {
- UNKNOWN_DMA_ID,
- IMX23_DMA,
- IMX28_DMA,
-};
-
struct apbh_dma {
void __iomem *regs;
struct clk *clk;
enum mxs_dma_id id;
};
-#define apbh_dma_is_imx23(aphb) ((apbh)->id == IMX23_DMA)
-
static struct apbh_dma *apbh_dma;
/*
@@ -66,185 +37,9 @@ static struct apbh_dma *apbh_dma;
*/
static int mxs_dma_validate_chan(int channel)
{
- struct mxs_dma_chan *pchan;
-
if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
return -EINVAL;
- pchan = mxs_dma_channels + channel;
- if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * Return the address of the command within a descriptor.
- */
-static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
-{
- return desc->address + offsetof(struct mxs_dma_desc, cmd);
-}
-
-/*
- * Read a DMA channel's hardware semaphore.
- *
- * As used by the MXS platform's DMA software, the DMA channel's hardware
- * semaphore reflects the number of DMA commands the hardware will process, but
- * has not yet finished. This is a volatile value read directly from hardware,
- * so it must be be viewed as immediately stale.
- *
- * If the channel is not marked busy, or has finished processing all its
- * commands, this value should be zero.
- *
- * See mxs_dma_append() for details on how DMA command blocks must be configured
- * to maintain the expected behavior of the semaphore's value.
- */
-static int mxs_dma_read_semaphore(int channel)
-{
- struct apbh_dma *apbh = apbh_dma;
- uint32_t tmp;
- int ret;
-
- ret = mxs_dma_validate_chan(channel);
- if (ret)
- return ret;
-
- if (apbh_dma_is_imx23(apbh))
- tmp = readl(apbh->regs + HW_APBHX_CHn_SEMA_MX23(channel));
- else
- tmp = readl(apbh->regs + HW_APBHX_CHn_SEMA_MX28(channel));
-
- tmp &= BM_APBHX_CHn_SEMA_PHORE;
- tmp >>= BP_APBHX_CHn_SEMA_PHORE;
-
- return tmp;
-}
-
-/*
- * Enable a DMA channel.
- *
- * If the given channel has any DMA descriptors on its active list, this
- * function causes the DMA hardware to begin processing them.
- *
- * This function marks the DMA channel as "busy," whether or not there are any
- * descriptors to process.
- */
-static int mxs_dma_enable(int channel)
-{
- struct apbh_dma *apbh = apbh_dma;
- unsigned int sem;
- struct mxs_dma_chan *pchan;
- struct mxs_dma_desc *pdesc;
- int channel_bit, ret;
-
- ret = mxs_dma_validate_chan(channel);
- if (ret)
- return ret;
-
- pchan = mxs_dma_channels + channel;
-
- if (pchan->pending_num == 0) {
- pchan->flags |= MXS_DMA_FLAGS_BUSY;
- return 0;
- }
-
- pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
- if (pdesc == NULL)
- return -EFAULT;
-
- if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
- if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
- return 0;
-
- sem = mxs_dma_read_semaphore(channel);
- if (sem == 0)
- return 0;
-
- if (sem == 1) {
- pdesc = list_entry(pdesc->node.next,
- struct mxs_dma_desc, node);
- if (apbh_dma_is_imx23(apbh))
- writel(mxs_dma_cmd_address(pdesc),
- apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX23(channel));
- else
- writel(mxs_dma_cmd_address(pdesc),
- apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX28(channel));
- }
-
- if (apbh_dma_is_imx23(apbh))
- writel(pchan->pending_num,
- apbh->regs + HW_APBHX_CHn_SEMA_MX23(channel));
- else
- writel(pchan->pending_num,
- apbh->regs + HW_APBHX_CHn_SEMA_MX28(channel));
-
- pchan->active_num += pchan->pending_num;
- pchan->pending_num = 0;
- } else {
- pchan->active_num += pchan->pending_num;
- pchan->pending_num = 0;
- if (apbh_dma_is_imx23(apbh)) {
- writel(mxs_dma_cmd_address(pdesc),
- apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX23(channel));
- writel(pchan->active_num,
- apbh->regs + HW_APBHX_CHn_SEMA_MX23(channel));
- channel_bit = channel + BP_APBH_CTRL0_CLKGATE_CHANNEL;
- } else {
- writel(mxs_dma_cmd_address(pdesc),
- apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX28(channel));
- writel(pchan->active_num,
- apbh->regs + HW_APBHX_CHn_SEMA_MX28(channel));
- channel_bit = channel;
- }
- writel(1 << channel_bit, apbh->regs + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
- }
-
- pchan->flags |= MXS_DMA_FLAGS_BUSY;
- return 0;
-}
-
-/*
- * Disable a DMA channel.
- *
- * This function shuts down a DMA channel and marks it as "not busy." Any
- * descriptors on the active list are immediately moved to the head of the
- * "done" list, whether or not they have actually been processed by the
- * hardware. The "ready" flags of these descriptors are NOT cleared, so they
- * still appear to be active.
- *
- * This function immediately shuts down a DMA channel's hardware, aborting any
- * I/O that may be in progress, potentially leaving I/O hardware in an undefined
- * state. It is unwise to call this function if there is ANY chance the hardware
- * is still processing a command.
- */
-static int mxs_dma_disable(int channel)
-{
- struct mxs_dma_chan *pchan;
- struct apbh_dma *apbh = apbh_dma;
- int channel_bit, ret;
-
- ret = mxs_dma_validate_chan(channel);
- if (ret)
- return ret;
-
- pchan = mxs_dma_channels + channel;
-
- if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
- return -EINVAL;
-
- if (apbh_dma_is_imx23(apbh))
- channel_bit = channel + BP_APBH_CTRL0_CLKGATE_CHANNEL;
- else
- channel_bit = channel + 0;
-
- writel(1 << channel_bit, apbh->regs + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
-
- pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
- pchan->active_num = 0;
- pchan->pending_num = 0;
- list_splice_init(&pchan->active, &pchan->done);
-
return 0;
}
@@ -254,11 +49,6 @@ static int mxs_dma_disable(int channel)
static int mxs_dma_reset(int channel)
{
struct apbh_dma *apbh = apbh_dma;
- int ret;
-
- ret = mxs_dma_validate_chan(channel);
- if (ret)
- return ret;
if (apbh_dma_is_imx23(apbh))
writel(1 << (channel + BP_APBH_CTRL0_RESET_CHANNEL),
@@ -271,30 +61,6 @@ static int mxs_dma_reset(int channel)
}
/*
- * Enable or disable DMA interrupt.
- *
- * This function enables the given DMA channel to interrupt the CPU.
- */
-static int mxs_dma_enable_irq(int channel, int enable)
-{
- struct apbh_dma *apbh = apbh_dma;
- int ret;
-
- ret = mxs_dma_validate_chan(channel);
- if (ret)
- return ret;
-
- if (enable)
- writel(1 << (channel + BP_APBHX_CTRL1_CH_CMDCMPLT_IRQ_EN),
- apbh->regs + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
- else
- writel(1 << (channel + BP_APBHX_CTRL1_CH_CMDCMPLT_IRQ_EN),
- apbh->regs + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
-
- return 0;
-}
-
-/*
* Clear DMA interrupt.
*
* The software that is using the DMA channel must register to receive its
@@ -303,11 +69,6 @@ static int mxs_dma_enable_irq(int channel, int enable)
static int mxs_dma_ack_irq(int channel)
{
struct apbh_dma *apbh = apbh_dma;
- int ret;
-
- ret = mxs_dma_validate_chan(channel);
- if (ret)
- return ret;
writel(1 << channel, apbh->regs + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
writel(1 << channel, apbh->regs + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
@@ -316,229 +77,11 @@ static int mxs_dma_ack_irq(int channel)
}
/*
- * Request to reserve a DMA channel
- */
-static int mxs_dma_request(int channel)
-{
- struct mxs_dma_chan *pchan;
-
- if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
- return -EINVAL;
-
- pchan = mxs_dma_channels + channel;
- if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
- return -ENODEV;
-
- if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
- return -EBUSY;
-
- pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
- pchan->active_num = 0;
- pchan->pending_num = 0;
-
- INIT_LIST_HEAD(&pchan->active);
- INIT_LIST_HEAD(&pchan->done);
-
- return 0;
-}
-
-/*
- * Release a DMA channel.
- *
- * This function releases a DMA channel from its current owner.
- *
- * The channel will NOT be released if it's marked "busy" (see
- * mxs_dma_enable()).
- */
-static int mxs_dma_release(int channel)
-{
- struct mxs_dma_chan *pchan;
- int ret;
-
- ret = mxs_dma_validate_chan(channel);
- if (ret)
- return ret;
-
- pchan = mxs_dma_channels + channel;
-
- if (pchan->flags & MXS_DMA_FLAGS_BUSY)
- return -EBUSY;
-
- pchan->dev = 0;
- pchan->active_num = 0;
- pchan->pending_num = 0;
- pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
-
- return 0;
-}
-
-/*
- * Allocate DMA descriptor
- */
-struct mxs_dma_desc *mxs_dma_desc_alloc(void)
-{
- struct mxs_dma_desc *pdesc;
- dma_addr_t dma_address;
-
- pdesc = dma_alloc_coherent(sizeof(struct mxs_dma_desc),
- &dma_address);
-
- if (pdesc == NULL)
- return NULL;
-
- pdesc->address = dma_address;
-
- return pdesc;
-};
-
-/*
- * Free DMA descriptor
- */
-void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
-{
- if (pdesc == NULL)
- return;
-
- free(pdesc);
-}
-
-/*
- * Add a DMA descriptor to a channel.
- *
- * If the descriptor list for this channel is not empty, this function sets the
- * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
- * it will chain to the new descriptor's command.
- *
- * Then, this function marks the new descriptor as "ready," adds it to the end
- * of the active descriptor list, and increments the count of pending
- * descriptors.
- *
- * The MXS platform DMA software imposes some rules on DMA commands to maintain
- * important invariants. These rules are NOT checked, but they must be carefully
- * applied by software that uses MXS DMA channels.
- *
- * Invariant:
- * The DMA channel's hardware semaphore must reflect the number of DMA
- * commands the hardware will process, but has not yet finished.
- *
- * Explanation:
- * A DMA channel begins processing commands when its hardware semaphore is
- * written with a value greater than zero, and it stops processing commands
- * when the semaphore returns to zero.
- *
- * When a channel finishes a DMA command, it will decrement its semaphore if
- * the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
- *
- * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
- * unless it suits the purposes of the software. For example, one could
- * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
- * bit set only in the last one. Then, setting the DMA channel's hardware
- * semaphore to one would cause the entire series of five commands to be
- * processed. However, this example would violate the invariant given above.
- *
- * Rule:
- * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
- * channel's hardware semaphore will be decremented EVERY time a command is
- * processed.
- */
-int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
-{
- struct mxs_dma_chan *pchan;
- struct mxs_dma_desc *last;
- int ret;
-
- ret = mxs_dma_validate_chan(channel);
- if (ret)
- return ret;
-
- pchan = mxs_dma_channels + channel;
-
- pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
- pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
-
- if (!list_empty(&pchan->active)) {
- last = list_entry(pchan->active.prev, struct mxs_dma_desc,
- node);
-
- pdesc->flags &= ~MXS_DMA_DESC_FIRST;
- last->flags &= ~MXS_DMA_DESC_LAST;
-
- last->cmd.next = mxs_dma_cmd_address(pdesc);
- last->cmd.data |= MXS_DMA_DESC_CHAIN;
- }
- pdesc->flags |= MXS_DMA_DESC_READY;
- if (pdesc->flags & MXS_DMA_DESC_FIRST)
- pchan->pending_num++;
- list_add_tail(&pdesc->node, &pchan->active);
-
- return ret;
-}
-
-/*
- * Clean up processed DMA descriptors.
- *
- * This function removes processed DMA descriptors from the "active" list. Pass
- * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
- * to get the descriptors moved to the channel's "done" list. Descriptors on
- * the "done" list can be retrieved with mxs_dma_get_finished().
- *
- * This function marks the DMA channel as "not busy" if no unprocessed
- * descriptors remain on the "active" list.
- */
-static int mxs_dma_finish(int channel, struct list_head *head)
-{
- int sem;
- struct mxs_dma_chan *pchan;
- struct list_head *p, *q;
- struct mxs_dma_desc *pdesc;
- int ret;
-
- ret = mxs_dma_validate_chan(channel);
- if (ret)
- return ret;
-
- pchan = mxs_dma_channels + channel;
-
- sem = mxs_dma_read_semaphore(channel);
- if (sem < 0)
- return sem;
-
- if (sem == pchan->active_num)
- return 0;
-
- list_for_each_safe(p, q, &pchan->active) {
- if ((pchan->active_num) <= sem)
- break;
-
- pdesc = list_entry(p, struct mxs_dma_desc, node);
- pdesc->flags &= ~MXS_DMA_DESC_READY;
-
- if (head)
- list_move_tail(p, head);
- else
- list_move_tail(p, &pchan->done);
-
- if (pdesc->flags & MXS_DMA_DESC_LAST)
- pchan->active_num--;
- }
-
- if (sem == 0)
- pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
-
- return 0;
-}
-
-/*
* Wait for DMA channel to complete
*/
static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
{
struct apbh_dma *apbh = apbh_dma;
- int ret;
-
- ret = mxs_dma_validate_chan(chan);
- if (ret)
- return ret;
while (--timeout) {
if (readl(apbh->regs + HW_APBHX_CTRL1) & (1 << chan))
@@ -546,38 +89,47 @@ static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
udelay(1);
}
- if (timeout == 0) {
- ret = -ETIMEDOUT;
- mxs_dma_reset(chan);
- }
+ if (!timeout)
+ return -ETIMEDOUT;
- return ret;
+ return 0;
}
/*
* Execute the DMA channel
*/
-int mxs_dma_go(int chan)
+int mxs_dma_go(int chan, struct mxs_dma_cmd *cmd, int ncmds)
{
+ struct apbh_dma *apbh = apbh_dma;
uint32_t timeout = 10000;
- int ret;
+ int i, ret, channel_bit;
- LIST_HEAD(tmp_desc_list);
+ ret = mxs_dma_validate_chan(chan);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ncmds - 1; i++) {
+ cmd[i].next = (unsigned long)(&cmd[i + 1]);
+ cmd[i].data |= MXS_DMA_DESC_CHAIN;
+ }
- mxs_dma_enable_irq(chan, 1);
- mxs_dma_enable(chan);
+ if (apbh_dma_is_imx23(apbh)) {
+ writel(cmd, apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX23(chan));
+ writel(1, apbh->regs + HW_APBHX_CHn_SEMA_MX23(chan));
+ channel_bit = chan + BP_APBH_CTRL0_CLKGATE_CHANNEL;
+ } else {
+ writel(cmd, apbh->regs + HW_APBHX_CHn_NXTCMDAR_MX28(chan));
+ writel(1, apbh->regs + HW_APBHX_CHn_SEMA_MX28(chan));
+ channel_bit = chan;
+ }
+ writel(1 << channel_bit, apbh->regs + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
/* Wait for DMA to finish. */
ret = mxs_dma_wait_complete(timeout, chan);
- /* Clear out the descriptors we just ran. */
- mxs_dma_finish(chan, &tmp_desc_list);
-
/* Shut the DMA channel down. */
mxs_dma_ack_irq(chan);
mxs_dma_reset(chan);
- mxs_dma_enable_irq(chan, 0);
- mxs_dma_disable(chan);
return ret;
}
@@ -585,11 +137,10 @@ int mxs_dma_go(int chan)
/*
* Initialize the DMA hardware
*/
-static int apbh_dma_probe(struct device_d *dev)
+static int apbh_dma_probe(struct device *dev)
{
struct resource *iores;
struct apbh_dma *apbh;
- struct mxs_dma_chan *pchan;
enum mxs_dma_id id;
int ret, channel;
@@ -627,28 +178,11 @@ static int apbh_dma_probe(struct device_d *dev)
apbh->regs + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
for (channel = 0; channel < MXS_MAX_DMA_CHANNELS; channel++) {
- pchan = mxs_dma_channels + channel;
- pchan->flags = MXS_DMA_FLAGS_VALID;
-
- ret = mxs_dma_request(channel);
-
- if (ret) {
- printf("MXS DMA: Can't acquire DMA channel %i\n",
- channel);
-
- goto err;
- }
-
mxs_dma_reset(channel);
mxs_dma_ack_irq(channel);
}
return 0;
-
-err:
- while (--channel >= 0)
- mxs_dma_release(channel);
- return ret;
}
static struct platform_device_id apbh_ids[] = {
@@ -674,8 +208,9 @@ static __maybe_unused struct of_device_id apbh_dt_ids[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, apbh_dt_ids);
-static struct driver_d apbh_dma_driver = {
+static struct driver apbh_dma_driver = {
.name = "dma-apbh",
.id_table = apbh_ids,
.of_compatible = DRV_OF_COMPAT(apbh_dt_ids),
diff --git a/drivers/dma/debug.c b/drivers/dma/debug.c
new file mode 100644
index 0000000000..e524dc4127
--- /dev/null
+++ b/drivers/dma/debug.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <dma.h>
+#include <linux/list.h>
+#include "debug.h"
+
+static LIST_HEAD(dma_mappings);
+
+struct dma_debug_entry {
+ struct list_head list;
+ struct device *dev;
+ dma_addr_t dev_addr;
+ size_t size;
+ int direction;
+ bool dev_owned;
+};
+
+static const char *dir2name[] = {
+ [DMA_BIDIRECTIONAL] = "bidirectional",
+ [DMA_TO_DEVICE] = "to-device",
+ [DMA_FROM_DEVICE] = "from-device",
+ [DMA_NONE] = "none",
+};
+
+#define dma_dev_printf(level, args...) do { \
+ if (level > LOGLEVEL) \
+ break; \
+ dev_printf((level), args); \
+ if ((level) <= MSG_WARNING) \
+ dump_stack(); \
+} while (0)
+
+#define dma_dev_warn(args...) dma_dev_printf(MSG_WARNING, args)
+
+static void dma_printf(int level, struct dma_debug_entry *entry,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ dma_dev_printf(level, entry->dev, "%s mapping 0x%llx+0x%zx: %pV\n",
+ dir2name[(entry)->direction], (u64)(entry)->dev_addr,
+ (entry)->size, &vaf);
+
+ va_end(va);
+}
+
+#define dma_warn(args...) dma_printf(MSG_WARNING, args)
+#define dma_debug(args...) dma_printf(MSG_DEBUG, args)
+
+static inline int region_contains(struct dma_debug_entry *entry,
+ dma_addr_t buf_start, size_t buf_size)
+{
+ dma_addr_t dev_addr_end = entry->dev_addr + entry->size - 1;
+ dma_addr_t buf_end = buf_start + buf_size - 1;
+
+ /* Is the buffer completely within the mapping? */
+ if (entry->dev_addr <= buf_start && dev_addr_end >= buf_end)
+ return 1;
+
+ /* Does the buffer partially overlap the mapping? */
+ if (entry->dev_addr <= buf_end && dev_addr_end >= buf_start)
+ return -1;
+
+ return 0;
+}
+
+static struct dma_debug_entry *
+dma_debug_entry_find(struct device *dev, dma_addr_t dev_addr, size_t size)
+{
+ struct dma_debug_entry *entry;
+
+ /*
+ * DMA functions should be called with a device argument to support
+ * non-1:1 device mappings.
+ */
+ if (!dev)
+ dma_dev_warn(NULL, "unportable NULL device passed with buffer 0x%llx+0x%zx!\n",
+ (u64)dev_addr, size);
+
+ list_for_each_entry(entry, &dma_mappings, list) {
+ if (dev != entry->dev)
+ continue;
+
+ switch (region_contains(entry, dev_addr, size)) {
+ case 1:
+ return entry;
+ case -1:
+ /* The same device shouldn't have two mappings for the same address */
+ dma_warn(entry, "unexpected partial overlap looking for 0x%llx+0x%zx!\n",
+ (u64)dev_addr, size);
+ fallthrough;
+ case 0:
+ continue;
+ }
+ }
+
+ return NULL;
+}
+
+void debug_dma_map(struct device *dev, void *addr,
+ size_t size,
+ int direction, dma_addr_t dev_addr)
+{
+ struct dma_debug_entry *entry;
+
+ entry = dma_debug_entry_find(dev, dev_addr, size);
+ if (entry) {
+ /* The same device shouldn't have two mappings for the same address */
+ dma_warn(entry, "duplicate mapping\n");
+ return;
+ }
+
+ entry = xmalloc(sizeof(*entry));
+
+ entry->dev = dev;
+ entry->dev_addr = dev_addr;
+ entry->size = size;
+ entry->direction = direction;
+ entry->dev_owned = true;
+
+ list_add(&entry->list, &dma_mappings);
+
+ dma_debug(entry, "allocated\n");
+}
+
+void debug_dma_unmap(struct device *dev, dma_addr_t addr,
+ size_t size, int direction)
+{
+ struct dma_debug_entry *entry;
+
+ entry = dma_debug_entry_find(dev, addr, size);
+ if (!entry) {
+ /* Potential double free */
+ dma_dev_warn(dev, "Unmapping non-mapped %s buffer 0x%llx+0x%zx!\n",
+ dir2name[direction], (u64)addr, size);
+ return;
+ }
+
+ /* Mismatched size or direction may result in memory corruption */
+ if (entry->size != size)
+ dma_warn(entry, "mismatch unmapping 0x%zx bytes\n", size);
+ if (entry->direction != direction)
+ dma_warn(entry, "mismatch unmapping %s\n",
+ dir2name[direction]);
+
+ dma_debug(entry, "deallocating\n");
+ list_del(&entry->list);
+ free(entry);
+}
+
+void debug_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ int direction)
+{
+ struct dma_debug_entry *entry;
+
+ entry = dma_debug_entry_find(dev, dma_handle, size);
+ if (!entry) {
+ dma_dev_warn(dev, "sync for CPU of never-mapped %s buffer 0x%llx+0x%zx!\n",
+ dir2name[direction], (u64)dma_handle, size);
+ return;
+ }
+
+ if (!entry->dev_owned)
+ dma_dev_warn(dev, "unexpected sync for CPU of already CPU-mapped %s buffer 0x%llx+0x%zx!\n",
+ dir2name[direction], (u64)dma_handle, size);
+
+ entry->dev_owned = false;
+}
+
+void debug_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ struct dma_debug_entry *entry;
+
+ /*
+ * If dma_map_single was omitted, CPU cache may contain dirty cache lines
+ * for a buffer used for DMA. These lines may be evicted and written back
+ * after device DMA and before consumption by CPU, resulting in memory
+ * corruption
+ */
+ entry = dma_debug_entry_find(dev, dma_handle, size);
+ if (!entry) {
+ dma_dev_warn(dev, "Syncing for device of never-mapped %s buffer 0x%llx+0x%zx!\n",
+ dir2name[direction], (u64)dma_handle, size);
+ return;
+ }
+
+ if (entry->dev_owned)
+ dma_dev_warn(dev, "unexpected sync for device of already device-mapped %s buffer 0x%llx+0x%zx!\n",
+ dir2name[direction], (u64)dma_handle, size);
+
+ entry->dev_owned = true;
+}
diff --git a/drivers/dma/debug.h b/drivers/dma/debug.h
new file mode 100644
index 0000000000..020bb5c196
--- /dev/null
+++ b/drivers/dma/debug.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ */
+
+#ifndef _KERNEL_DMA_DEBUG_H
+#define _KERNEL_DMA_DEBUG_H
+
+#include <linux/types.h>
+
+struct device;
+
+#ifdef CONFIG_DMA_API_DEBUG
+extern void debug_dma_map(struct device *dev, void *addr,
+ size_t size,
+ int direction, dma_addr_t dma_addr);
+
+extern void debug_dma_unmap(struct device *dev, dma_addr_t addr,
+ size_t size, int direction);
+
+extern void debug_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+
+extern void debug_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction);
+
+#else /* CONFIG_DMA_API_DEBUG */
+static inline void debug_dma_map(struct device *dev, void *addr,
+ size_t size,
+ int direction, dma_addr_t dma_addr)
+{
+}
+
+static inline void debug_dma_unmap(struct device *dev, dma_addr_t addr,
+ size_t size, int direction)
+{
+}
+
+static inline void debug_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+}
+
+static inline void debug_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+}
+
+#endif /* CONFIG_DMA_API_DEBUG */
+#endif /* _KERNEL_DMA_DEBUG_H */
diff --git a/drivers/dma/map.c b/drivers/dma/map.c
index a3e1b3b5b5..ab86a8c7b1 100644
--- a/drivers/dma/map.c
+++ b/drivers/dma/map.c
@@ -1,42 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* SPDX-FileCopyrightText: 2012 Marc Kleine-Budde <mkl@pengutronix.de> */
-
#include <dma.h>
+#include "debug.h"
-static inline dma_addr_t cpu_to_dma(struct device_d *dev, unsigned long cpu_addr)
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t address,
+ size_t size, enum dma_data_direction dir)
{
- dma_addr_t dma_addr = cpu_addr;
+ void *ptr = dma_to_cpu(dev, address);
- if (dev)
- dma_addr -= dev->dma_offset;
+ debug_dma_sync_single_for_cpu(dev, address, size, dir);
- return dma_addr;
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(ptr, size, dir);
}
-static inline unsigned long dma_to_cpu(struct device_d *dev, dma_addr_t addr)
+void dma_sync_single_for_device(struct device *dev, dma_addr_t address,
+ size_t size, enum dma_data_direction dir)
{
- unsigned long cpu_addr = addr;
+ void *ptr = dma_to_cpu(dev, address);
- if (dev)
- cpu_addr += dev->dma_offset;
+ debug_dma_sync_single_for_device(dev, address, size, dir);
- return cpu_addr;
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_device(ptr, size, dir);
}
-dma_addr_t dma_map_single(struct device_d *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
+dma_addr_t dma_map_single(struct device *dev, void *ptr,
+ size_t size, enum dma_data_direction dir)
{
- unsigned long addr = (unsigned long)ptr;
+ dma_addr_t dma_addr = cpu_to_dma(dev, ptr);
- dma_sync_single_for_device(addr, size, dir);
+ debug_dma_map(dev, ptr, size, dir, dma_addr);
- return cpu_to_dma(dev, addr);
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_device(ptr, size, dir);
+
+ return dma_addr;
}
-void dma_unmap_single(struct device_d *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
{
- unsigned long addr = dma_to_cpu(dev, dma_addr);
+ if (!dev_is_dma_coherent(dev))
+ dma_sync_single_for_cpu(dev, dma_addr, size, dir);
- dma_sync_single_for_cpu(addr, size, dir);
+ debug_dma_unmap(dev, dma_addr, size, dir);
}
diff --git a/drivers/dma/of_fixups.c b/drivers/dma/of_fixups.c
new file mode 100644
index 0000000000..668313bbfb
--- /dev/null
+++ b/drivers/dma/of_fixups.c
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <of.h>
+#include <of_address.h>
+#include <driver.h>
+
+static int of_dma_coherent_fixup(struct device_node *root, void *data)
+{
+ struct device_node *soc;
+ enum dev_dma_coherence coherency = (enum dev_dma_coherence)(uintptr_t)data;
+
+ soc = of_find_node_by_path_from(root, "/soc");
+ if (!soc)
+ return -ENOENT;
+
+ of_property_write_bool(soc, "dma-noncoherent", coherency == DEV_DMA_NON_COHERENT);
+ of_property_write_bool(soc, "dma-coherent", coherency == DEV_DMA_COHERENT);
+
+ return 0;
+}
+
+static int of_dma_coherent_fixup_register(void)
+{
+ struct device_node *soc;
+ enum dev_dma_coherence soc_dma_coherency;
+
+ soc = of_find_node_by_path("/soc");
+ if (!soc)
+ return -ENOENT;
+
+ if (of_property_read_bool(soc, "dma-coherent"))
+ soc_dma_coherency = DEV_DMA_COHERENT;
+ else if (of_property_read_bool(soc, "dma-noncoherent"))
+ soc_dma_coherency = DEV_DMA_NON_COHERENT;
+ else
+ soc_dma_coherency = DEV_DMA_COHERENCE_DEFAULT;
+
+ return of_register_fixup(of_dma_coherent_fixup, (void *)(uintptr_t)soc_dma_coherency);
+}
+coredevice_initcall(of_dma_coherent_fixup_register);