summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 10:05:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 10:05:17 -0800
commit041c79514af9080c75197078283134f538f46b44 (patch)
treed5e465d5967d84adb37d735fddec48ee0509b93c
parent7d884710bb3635f94dac152ae226ca54a585a223 (diff)
parent34635b1accb99b3c3ad3b35a210be198701aac7e (diff)
downloadlinux-041c79514af9080c75197078283134f538f46b44.tar.gz
linux-041c79514af9080c75197078283134f538f46b44.tar.xz
Merge tag 'dmaengine-4.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: "This time we have a very typical update which is mostly fixes and updates to drivers and no new drivers. - the biggest change is coming from Peter for edma cleanup which even caused some last minute regression, things seem settled now - idma64 and dw updates - iotdma updates - module autoload fixes for various drivers - scatter gather support for hdmac" * tag 'dmaengine-4.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (77 commits) dmaengine: edma: Add dummy driver skeleton for edma3-tptc Revert "ARM: DTS: am33xx: Use the new DT bindings for the eDMA3" Revert "ARM: DTS: am437x: Use the new DT bindings for the eDMA3" dmaengine: dw: some Intel devices has no memcpy support dmaengine: dw: platform: provide platform data for Intel dmaengine: dw: don't override platform data with autocfg dmaengine: hdmac: Add scatter-gathered memset support dmaengine: hdmac: factorise memset descriptor allocation dmaengine: virt-dma: Fix kernel-doc annotations ARM: DTS: am437x: Use the new DT bindings for the eDMA3 ARM: DTS: am33xx: Use the new DT bindings for the eDMA3 dmaengine: edma: New device tree binding dmaengine: Kconfig: edma: Select TI_DMA_CROSSBAR in case of ARCH_OMAP dmaengine: ti-dma-crossbar: Add support for crossbar on AM33xx/AM43xx dmaengine: edma: Merge the of parsing functions dmaengine: edma: Do not allocate memory for edma_rsv_info in case of DT boot dmaengine: edma: Refactor the dma device and channel struct initialization dmaengine: edma: Get qDMA channel information from HW also dmaengine: edma: Merge map_dmach_to_queue into assign_channel_eventq dmaengine: edma: Correct PaRAM access function names (_parm_ to _param_) ...
-rw-r--r--Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt15
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt117
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/edma.c1876
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c122
-rw-r--r--arch/arm/mach-davinci/dm355.c40
-rw-r--r--arch/arm/mach-davinci/dm365.c25
-rw-r--r--arch/arm/mach-davinci/dm644x.c40
-rw-r--r--arch/arm/mach-davinci/dm646x.c44
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c9
-rw-r--r--drivers/dma/Kconfig4
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/acpi-dma.c11
-rw-r--r--drivers/dma/at_hdmac.c168
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/at_xdmac.c106
-rw-r--r--drivers/dma/dmaengine.c6
-rw-r--r--drivers/dma/dw/core.c75
-rw-r--r--drivers/dma/dw/pci.c20
-rw-r--r--drivers/dma/dw/platform.c17
-rw-r--r--drivers/dma/edma.c1858
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/idma64.c22
-rw-r--r--drivers/dma/idma64.h14
-rw-r--r--drivers/dma/imx-sdma.c5
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/dma.h6
-rw-r--r--drivers/dma/ioat/init.c114
-rw-r--r--drivers/dma/ioat/prep.c34
-rw-r--r--drivers/dma/moxart-dma.c1
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/omap-dma.c6
-rw-r--r--drivers/dma/sirf-dma.c1
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/dma/sun6i-dma.c1
-rw-r--r--drivers/dma/ti-dma-crossbar.c251
-rw-r--r--drivers/dma/virt-dma.h18
-rw-r--r--drivers/dma/xgene-dma.c63
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c1
-rw-r--r--drivers/dma/zx296702_dma.c2
-rw-r--r--include/linux/of_dma.h2
-rw-r--r--include/linux/platform_data/dma-dw.h2
-rw-r--r--include/linux/platform_data/edma.h104
46 files changed, 2533 insertions, 2684 deletions
diff --git a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
index 63a48928f3a8..b152a75dceae 100644
--- a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
+++ b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
@@ -2,9 +2,10 @@ Texas Instruments DMA Crossbar (DMA request router)
Required properties:
- compatible: "ti,dra7-dma-crossbar" for DRA7xx DMA crossbar
+ "ti,am335x-edma-crossbar" for AM335x and AM437x
- reg: Memory map for accessing module
-- #dma-cells: Should be set to <1>.
- Clients should use the crossbar request number (input)
+- #dma-cells: Should be set to to match with the DMA controller's dma-cells
+ for ti,dra7-dma-crossbar and <3> for ti,am335x-edma-crossbar.
- dma-requests: Number of DMA requests the crossbar can receive
- dma-masters: phandle pointing to the DMA controller
@@ -14,6 +15,15 @@ The DMA controller node need to have the following poroperties:
Optional properties:
- ti,dma-safe-map: Safe routing value for unused request lines
+Notes:
+When requesting channel via ti,dra7-dma-crossbar, the DMA clinet must request
+the DMA event number as crossbar ID (input to the DMA crossbar).
+
+For ti,am335x-edma-crossbar: the meaning of parameters of dmas for clients:
+dmas = <&edma_xbar 12 0 1>; where <12> is the DMA request number, <0> is the TC
+the event should be assigned and <1> is the mux selection for in the crossbar.
+When mux 0 is used the DMA channel can be requested directly from edma node.
+
Example:
/* DMA controller */
@@ -47,6 +57,7 @@ uart1: serial@4806a000 {
ti,hwmods = "uart1";
clock-frequency = <48000000>;
status = "disabled";
+ /* Requesting crossbar input 49 and 50 */
dmas = <&sdma_xbar 49>, <&sdma_xbar 50>;
dma-names = "tx", "rx";
};
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 5ba525a10035..d3d0a4fb1c73 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -1,4 +1,119 @@
-TI EDMA
+Texas Instruments eDMA
+
+The eDMA3 consists of two components: Channel controller (CC) and Transfer
+Controller(s) (TC). The CC is the main entry for DMA users since it is
+responsible for the DMA channel handling, while the TCs are responsible to
+execute the actual DMA tansfer.
+
+------------------------------------------------------------------------------
+eDMA3 Channel Controller
+
+Required properties:
+- compatible: "ti,edma3-tpcc" for the channel controller(s)
+- #dma-cells: Should be set to <2>. The first number is the DMA request
+ number and the second is the TC the channel is serviced on.
+- reg: Memory map of eDMA CC
+- reg-names: "edma3_cc"
+- interrupts: Interrupt lines for CCINT, MPERR and CCERRINT.
+- interrupt-names: "edma3_ccint", "emda3_mperr" and "edma3_ccerrint"
+- ti,tptcs: List of TPTCs associated with the eDMA in the following form:
+ <&tptc_phandle TC_priority_number>. The highest priority is 0.
+
+Optional properties:
+- ti,hwmods: Name of the hwmods associated to the eDMA CC
+- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
+ these channels will be SW triggered channels. The list must
+ contain 16 bits numbers, see example.
+- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
+ the driver, they are allocated to be used by for example the
+ DSP. See example.
+
+------------------------------------------------------------------------------
+eDMA3 Transfer Controller
+
+Required properties:
+- compatible: "ti,edma3-tptc" for the transfer controller(s)
+- reg: Memory map of eDMA TC
+- interrupts: Interrupt number for TCerrint.
+
+Optional properties:
+- ti,hwmods: Name of the hwmods associated to the given eDMA TC
+- interrupt-names: "edma3_tcerrint"
+
+------------------------------------------------------------------------------
+Example:
+
+edma: edma@49000000 {
+ compatible = "ti,edma3-tpcc";
+ ti,hwmods = "tpcc";
+ reg = <0x49000000 0x10000>;
+ reg-names = "edma3_cc";
+ interrupts = <12 13 14>;
+ interrupt-names = "edma3_ccint", "emda3_mperr", "edma3_ccerrint";
+ dma-requests = <64>;
+ #dma-cells = <2>;
+
+ ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
+
+ /* Channel 20 and 21 is allocated for memcpy */
+ ti,edma-memcpy-channels = /bits/ 16 <20 21>;
+ /* The following PaRAM slots are reserved: 35-45 and 100-110 */
+ ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
+ /bits/ 16 <100 10>;
+};
+
+edma_tptc0: tptc@49800000 {
+ compatible = "ti,edma3-tptc";
+ ti,hwmods = "tptc0";
+ reg = <0x49800000 0x100000>;
+ interrupts = <112>;
+ interrupt-names = "edm3_tcerrint";
+};
+
+edma_tptc1: tptc@49900000 {
+ compatible = "ti,edma3-tptc";
+ ti,hwmods = "tptc1";
+ reg = <0x49900000 0x100000>;
+ interrupts = <113>;
+ interrupt-names = "edm3_tcerrint";
+};
+
+edma_tptc2: tptc@49a00000 {
+ compatible = "ti,edma3-tptc";
+ ti,hwmods = "tptc2";
+ reg = <0x49a00000 0x100000>;
+ interrupts = <114>;
+ interrupt-names = "edm3_tcerrint";
+};
+
+sham: sham@53100000 {
+ compatible = "ti,omap4-sham";
+ ti,hwmods = "sham";
+ reg = <0x53100000 0x200>;
+ interrupts = <109>;
+ /* DMA channel 36 executed on eDMA TC0 - low priority queue */
+ dmas = <&edma 36 0>;
+ dma-names = "rx";
+};
+
+mcasp0: mcasp@48038000 {
+ compatible = "ti,am33xx-mcasp-audio";
+ ti,hwmods = "mcasp0";
+ reg = <0x48038000 0x2000>,
+ <0x46000000 0x400000>;
+ reg-names = "mpu", "dat";
+ interrupts = <80>, <81>;
+ interrupt-names = "tx", "rx";
+ status = "disabled";
+ /* DMA channels 8 and 9 executed on eDMA TC2 - high priority queue */
+ dmas = <&edma 8 2>,
+ <&edma 9 2>;
+ dma-names = "tx", "rx";
+};
+
+------------------------------------------------------------------------------
+DEPRECATED binding, new DTS files must use the ti,edma3-tpcc/ti,edma3-tptc
+binding.
Required properties:
- compatible : "ti,edma3"
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f1ed1109f488..9246bd7cc3cf 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -737,7 +737,6 @@ config ARCH_DAVINCI
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select HAVE_IDE
- select TI_PRIV_EDMA
select USE_OF
select ZONE_DMA
help
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index c3a4e9ceba34..9353184d730d 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -17,6 +17,3 @@ config SHARP_PARAM
config SHARP_SCOOP
bool
-
-config TI_PRIV_EDMA
- bool
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 6ee5959a813b..27f23b15b1ea 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -15,6 +15,5 @@ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
CFLAGS_REMOVE_mcpm_entry.o = -pg
AFLAGS_mcpm_head.o := -march=armv7-a
AFLAGS_vlock.o := -march=armv7-a
-obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o
obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
deleted file mode 100644
index 873dbfcc7dc9..000000000000
--- a/arch/arm/common/edma.c
+++ /dev/null
@@ -1,1876 +0,0 @@
-/*
- * EDMA3 support for DaVinci
- *
- * Copyright (C) 2006-2009 Texas Instruments.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/edma.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_dma.h>
-#include <linux/of_irq.h>
-#include <linux/pm_runtime.h>
-
-#include <linux/platform_data/edma.h>
-
-/* Offsets matching "struct edmacc_param" */
-#define PARM_OPT 0x00
-#define PARM_SRC 0x04
-#define PARM_A_B_CNT 0x08
-#define PARM_DST 0x0c
-#define PARM_SRC_DST_BIDX 0x10
-#define PARM_LINK_BCNTRLD 0x14
-#define PARM_SRC_DST_CIDX 0x18
-#define PARM_CCNT 0x1c
-
-#define PARM_SIZE 0x20
-
-/* Offsets for EDMA CC global channel registers and their shadows */
-#define SH_ER 0x00 /* 64 bits */
-#define SH_ECR 0x08 /* 64 bits */
-#define SH_ESR 0x10 /* 64 bits */
-#define SH_CER 0x18 /* 64 bits */
-#define SH_EER 0x20 /* 64 bits */
-#define SH_EECR 0x28 /* 64 bits */
-#define SH_EESR 0x30 /* 64 bits */
-#define SH_SER 0x38 /* 64 bits */
-#define SH_SECR 0x40 /* 64 bits */
-#define SH_IER 0x50 /* 64 bits */
-#define SH_IECR 0x58 /* 64 bits */
-#define SH_IESR 0x60 /* 64 bits */
-#define SH_IPR 0x68 /* 64 bits */
-#define SH_ICR 0x70 /* 64 bits */
-#define SH_IEVAL 0x78
-#define SH_QER 0x80
-#define SH_QEER 0x84
-#define SH_QEECR 0x88
-#define SH_QEESR 0x8c
-#define SH_QSER 0x90
-#define SH_QSECR 0x94
-#define SH_SIZE 0x200
-
-/* Offsets for EDMA CC global registers */
-#define EDMA_REV 0x0000
-#define EDMA_CCCFG 0x0004
-#define EDMA_QCHMAP 0x0200 /* 8 registers */
-#define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
-#define EDMA_QDMAQNUM 0x0260
-#define EDMA_QUETCMAP 0x0280
-#define EDMA_QUEPRI 0x0284
-#define EDMA_EMR 0x0300 /* 64 bits */
-#define EDMA_EMCR 0x0308 /* 64 bits */
-#define EDMA_QEMR 0x0310
-#define EDMA_QEMCR 0x0314
-#define EDMA_CCERR 0x0318
-#define EDMA_CCERRCLR 0x031c
-#define EDMA_EEVAL 0x0320
-#define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
-#define EDMA_QRAE 0x0380 /* 4 registers */
-#define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
-#define EDMA_QSTAT 0x0600 /* 2 registers */
-#define EDMA_QWMTHRA 0x0620
-#define EDMA_QWMTHRB 0x0624
-#define EDMA_CCSTAT 0x0640
-
-#define EDMA_M 0x1000 /* global channel registers */
-#define EDMA_ECR 0x1008
-#define EDMA_ECRH 0x100C
-#define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
-#define EDMA_PARM 0x4000 /* 128 param entries */
-
-#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
-
-#define EDMA_DCHMAP 0x0100 /* 64 registers */
-
-/* CCCFG register */
-#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
-#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
-#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
-#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
-#define CHMAP_EXIST BIT(24)
-
-#define EDMA_MAX_DMACH 64
-#define EDMA_MAX_PARAMENTRY 512
-
-/*****************************************************************************/
-
-static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
-
-static inline unsigned int edma_read(unsigned ctlr, int offset)
-{
- return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
-}
-
-static inline void edma_write(unsigned ctlr, int offset, int val)
-{
- __raw_writel(val, edmacc_regs_base[ctlr] + offset);
-}
-static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
- unsigned or)
-{
- unsigned val = edma_read(ctlr, offset);
- val &= and;
- val |= or;
- edma_write(ctlr, offset, val);
-}
-static inline void edma_and(unsigned ctlr, int offset, unsigned and)
-{
- unsigned val = edma_read(ctlr, offset);
- val &= and;
- edma_write(ctlr, offset, val);
-}
-static inline void edma_or(unsigned ctlr, int offset, unsigned or)
-{
- unsigned val = edma_read(ctlr, offset);
- val |= or;
- edma_write(ctlr, offset, val);
-}
-static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
-{
- return edma_read(ctlr, offset + (i << 2));
-}
-static inline void edma_write_array(unsigned ctlr, int offset, int i,
- unsigned val)
-{
- edma_write(ctlr, offset + (i << 2), val);
-}
-static inline void edma_modify_array(unsigned ctlr, int offset, int i,
- unsigned and, unsigned or)
-{
- edma_modify(ctlr, offset + (i << 2), and, or);
-}
-static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
-{
- edma_or(ctlr, offset + (i << 2), or);
-}
-static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
- unsigned or)
-{
- edma_or(ctlr, offset + ((i*2 + j) << 2), or);
-}
-static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
- unsigned val)
-{
- edma_write(ctlr, offset + ((i*2 + j) << 2), val);
-}
-static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
-{
- return edma_read(ctlr, EDMA_SHADOW0 + offset);
-}
-static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
- int i)
-{
- return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
-}
-static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
-{
- edma_write(ctlr, EDMA_SHADOW0 + offset, val);
-}
-static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
- unsigned val)
-{
- edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
-}
-static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
- int param_no)
-{
- return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
-}
-static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
- unsigned val)
-{
- edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
-}
-static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
- unsigned and, unsigned or)
-{
- edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
-}
-static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
- unsigned and)
-{
- edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
-}
-static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
- unsigned or)
-{
- edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
-}
-
-static inline void set_bits(int offset, int len, unsigned long *p)
-{
- for (; len > 0; len--)
- set_bit(offset + (len - 1), p);
-}
-
-static inline void clear_bits(int offset, int len, unsigned long *p)
-{
- for (; len > 0; len--)
- clear_bit(offset + (len - 1), p);
-}
-
-/*****************************************************************************/
-
-/* actual number of DMA channels and slots on this silicon */
-struct edma {
- /* how many dma resources of each type */
- unsigned num_channels;
- unsigned num_region;
- unsigned num_slots;
- unsigned num_tc;
- enum dma_event_q default_queue;
-
- /* list of channels with no even trigger; terminated by "-1" */
- const s8 *noevent;
-
- struct edma_soc_info *info;
-
- /* The edma_inuse bit for each PaRAM slot is clear unless the
- * channel is in use ... by ARM or DSP, for QDMA, or whatever.
- */
- DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
-
- /* The edma_unused bit for each channel is clear unless
- * it is not being used on this platform. It uses a bit
- * of SOC-specific initialization code.
- */
- DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
-
- unsigned irq_res_start;
- unsigned irq_res_end;
-
- struct dma_interrupt_data {
- void (*callback)(unsigned channel, unsigned short ch_status,
- void *data);
- void *data;
- } intr_data[EDMA_MAX_DMACH];
-};
-
-static struct edma *edma_cc[EDMA_MAX_CC];
-static int arch_num_cc;
-
-/* dummy param set used to (re)initialize parameter RAM slots */
-static const struct edmacc_param dummy_paramset = {
- .link_bcntrld = 0xffff,
- .ccnt = 1,
-};
-
-static const struct of_device_id edma_of_ids[] = {
- { .compatible = "ti,edma3", },
- {}
-};
-
-/*****************************************************************************/
-
-static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
- enum dma_event_q queue_no)
-{
- int bit = (ch_no & 0x7) * 4;
-
- /* default to low priority queue */
- if (queue_no == EVENTQ_DEFAULT)
- queue_no = edma_cc[ctlr]->default_queue;
-
- queue_no &= 7;
- edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
- ~(0x7 << bit), queue_no << bit);
-}
-
-static void assign_priority_to_queue(unsigned ctlr, int queue_no,
- int priority)
-{
- int bit = queue_no * 4;
- edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
- ((priority & 0x7) << bit));
-}
-
-/**
- * map_dmach_param - Maps channel number to param entry number
- *
- * This maps the dma channel number to param entry numberter. In
- * other words using the DMA channel mapping registers a param entry
- * can be mapped to any channel
- *
- * Callers are responsible for ensuring the channel mapping logic is
- * included in that particular EDMA variant (Eg : dm646x)
- *
- */
-static void map_dmach_param(unsigned ctlr)
-{
- int i;
- for (i = 0; i < EDMA_MAX_DMACH; i++)
- edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
-}
-
-static inline void
-setup_dma_interrupt(unsigned lch,
- void (*callback)(unsigned channel, u16 ch_status, void *data),
- void *data)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(lch);
- lch = EDMA_CHAN_SLOT(lch);
-
- if (!callback)
- edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
- BIT(lch & 0x1f));
-
- edma_cc[ctlr]->intr_data[lch].callback = callback;
- edma_cc[ctlr]->intr_data[lch].data = data;
-
- if (callback) {
- edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
- BIT(lch & 0x1f));
- edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
- BIT(lch & 0x1f));
- }
-}
-
-static int irq2ctlr(int irq)
-{
- if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
- return 0;
- else if (irq >= edma_cc[1]->irq_res_start &&
- irq <= edma_cc[1]->irq_res_end)
- return 1;
-
- return -1;
-}
-
-/******************************************************************************
- *
- * DMA interrupt handler
- *
- *****************************************************************************/
-static irqreturn_t dma_irq_handler(int irq, void *data)
-{
- int ctlr;
- u32 sh_ier;
- u32 sh_ipr;
- u32 bank;
-
- ctlr = irq2ctlr(irq);
- if (ctlr < 0)
- return IRQ_NONE;
-
- dev_dbg(data, "dma_irq_handler\n");
-
- sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
- if (!sh_ipr) {
- sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
- if (!sh_ipr)
- return IRQ_NONE;
- sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
- bank = 1;
- } else {
- sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
- bank = 0;
- }
-
- do {
- u32 slot;
- u32 channel;
-
- dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
-
- slot = __ffs(sh_ipr);
- sh_ipr &= ~(BIT(slot));
-
- if (sh_ier & BIT(slot)) {
- channel = (bank << 5) | slot;
- /* Clear the corresponding IPR bits */
- edma_shadow0_write_array(ctlr, SH_ICR, bank,
- BIT(slot));
- if (edma_cc[ctlr]->intr_data[channel].callback)
- edma_cc[ctlr]->intr_data[channel].callback(
- channel, EDMA_DMA_COMPLETE,
- edma_cc[ctlr]->intr_data[channel].data);
- }
- } while (sh_ipr);
-
- edma_shadow0_write(ctlr, SH_IEVAL, 1);
- return IRQ_HANDLED;
-}
-
-/******************************************************************************
- *
- * DMA error interrupt handler
- *
- *****************************************************************************/
-static irqreturn_t dma_ccerr_handler(int irq, void *data)
-{
- int i;
- int ctlr;
- unsigned int cnt = 0;
-
- ctlr = irq2ctlr(irq);
- if (ctlr < 0)
- return IRQ_NONE;
-
- dev_dbg(data, "dma_ccerr_handler\n");
-
- if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
- (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
- (edma_read(ctlr, EDMA_QEMR) == 0) &&
- (edma_read(ctlr, EDMA_CCERR) == 0))
- return IRQ_NONE;
-
- while (1) {
- int j = -1;
- if (edma_read_array(ctlr, EDMA_EMR, 0))
- j = 0;
- else if (edma_read_array(ctlr, EDMA_EMR, 1))
- j = 1;
- if (j >= 0) {
- dev_dbg(data, "EMR%d %08x\n", j,
- edma_read_array(ctlr, EDMA_EMR, j));
- for (i = 0; i < 32; i++) {
- int k = (j << 5) + i;
- if (edma_read_array(ctlr, EDMA_EMR, j) &
- BIT(i)) {
- /* Clear the corresponding EMR bits */
- edma_write_array(ctlr, EDMA_EMCR, j,
- BIT(i));
- /* Clear any SER */
- edma_shadow0_write_array(ctlr, SH_SECR,
- j, BIT(i));
- if (edma_cc[ctlr]->intr_data[k].
- callback) {
- edma_cc[ctlr]->intr_data[k].
- callback(k,
- EDMA_DMA_CC_ERROR,
- edma_cc[ctlr]->intr_data
- [k].data);
- }
- }
- }
- } else if (edma_read(ctlr, EDMA_QEMR)) {
- dev_dbg(data, "QEMR %02x\n",
- edma_read(ctlr, EDMA_QEMR));
- for (i = 0; i < 8; i++) {
- if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
- /* Clear the corresponding IPR bits */
- edma_write(ctlr, EDMA_QEMCR, BIT(i));
- edma_shadow0_write(ctlr, SH_QSECR,
- BIT(i));
-
- /* NOTE: not reported!! */
- }
- }
- } else if (edma_read(ctlr, EDMA_CCERR)) {
- dev_dbg(data, "CCERR %08x\n",
- edma_read(ctlr, EDMA_CCERR));
- /* FIXME: CCERR.BIT(16) ignored! much better
- * to just write CCERRCLR with CCERR value...
- */
- for (i = 0; i < 8; i++) {
- if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
- /* Clear the corresponding IPR bits */
- edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
-
- /* NOTE: not reported!! */
- }
- }
- }
- if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
- (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
- (edma_read(ctlr, EDMA_QEMR) == 0) &&
- (edma_read(ctlr, EDMA_CCERR) == 0))
- break;
- cnt++;
- if (cnt > 10)
- break;
- }
- edma_write(ctlr, EDMA_EEVAL, 1);
- return IRQ_HANDLED;
-}
-
-static int reserve_contiguous_slots(int ctlr, unsigned int id,
- unsigned int num_slots,
- unsigned int start_slot)
-{
- int i, j;
- unsigned int count = num_slots;
- int stop_slot = start_slot;
- DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
-
- for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
- j = EDMA_CHAN_SLOT(i);
- if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
- /* Record our current beginning slot */
- if (count == num_slots)
- stop_slot = i;
-
- count--;
- set_bit(j, tmp_inuse);
-
- if (count == 0)
- break;
- } else {
- clear_bit(j, tmp_inuse);
-
- if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
- stop_slot = i;
- break;
- } else {
- count = num_slots;
- }
- }
- }
-
- /*
- * We have to clear any bits that we set
- * if we run out parameter RAM slots, i.e we do find a set
- * of contiguous parameter RAM slots but do not find the exact number
- * requested as we may reach the total number of parameter RAM slots
- */
- if (i == edma_cc[ctlr]->num_slots)
- stop_slot = i;
-
- j = start_slot;
- for_each_set_bit_from(j, tmp_inuse, stop_slot)
- clear_bit(j, edma_cc[ctlr]->edma_inuse);
-
- if (count)
- return -EBUSY;
-
- for (j = i - num_slots + 1; j <= i; ++j)
- memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
- &dummy_paramset, PARM_SIZE);
-
- return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
-}
-
-static int prepare_unused_channel_list(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- int i, count, ctlr;
- struct of_phandle_args dma_spec;
-
- if (dev->of_node) {
- count = of_property_count_strings(dev->of_node, "dma-names");
- if (count < 0)
- return 0;
- for (i = 0; i < count; i++) {
- if (of_parse_phandle_with_args(dev->of_node, "dmas",
- "#dma-cells", i,
- &dma_spec))
- continue;
-
- if (!of_match_node(edma_of_ids, dma_spec.np)) {
- of_node_put(dma_spec.np);
- continue;
- }
-
- clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
- edma_cc[0]->edma_unused);
- of_node_put(dma_spec.np);
- }
- return 0;
- }
-
- /* For non-OF case */
- for (i = 0; i < pdev->num_resources; i++) {
- if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
- (int)pdev->resource[i].start >= 0) {
- ctlr = EDMA_CTLR(pdev->resource[i].start);
- clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
- edma_cc[ctlr]->edma_unused);
- }
- }
-
- return 0;
-}
-
-/*-----------------------------------------------------------------------*/
-
-static bool unused_chan_list_done;
-
-/* Resource alloc/free: dma channels, parameter RAM slots */
-
-/**
- * edma_alloc_channel - allocate DMA channel and paired parameter RAM
- * @channel: specific channel to allocate; negative for "any unmapped channel"
- * @callback: optional; to be issued on DMA completion or errors
- * @data: passed to callback
- * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
- * Controller (TC) executes requests using this channel. Use
- * EVENTQ_DEFAULT unless you really need a high priority queue.
- *
- * This allocates a DMA channel and its associated parameter RAM slot.
- * The parameter RAM is initialized to hold a dummy transfer.
- *
- * Normal use is to pass a specific channel number as @channel, to make
- * use of hardware events mapped to that channel. When the channel will
- * be used only for software triggering or event chaining, channels not
- * mapped to hardware events (or mapped to unused events) are preferable.
- *
- * DMA transfers start from a channel using edma_start(), or by
- * chaining. When the transfer described in that channel's parameter RAM
- * slot completes, that slot's data may be reloaded through a link.
- *
- * DMA errors are only reported to the @callback associated with the
- * channel driving that transfer, but transfer completion callbacks can
- * be sent to another channel under control of the TCC field in
- * the option word of the transfer's parameter RAM set. Drivers must not
- * use DMA transfer completion callbacks for channels they did not allocate.
- * (The same applies to TCC codes used in transfer chaining.)
- *
- * Returns the number of the channel, else negative errno.
- */
-int edma_alloc_channel(int channel,
- void (*callback)(unsigned channel, u16 ch_status, void *data),
- void *data,
- enum dma_event_q eventq_no)
-{
- unsigned i, done = 0, ctlr = 0;
- int ret = 0;
-
- if (!unused_chan_list_done) {
- /*
- * Scan all the platform devices to find out the EDMA channels
- * used and clear them in the unused list, making the rest
- * available for ARM usage.
- */
- ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
- prepare_unused_channel_list);
- if (ret < 0)
- return ret;
-
- unused_chan_list_done = true;
- }
-
- if (channel >= 0) {
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
- }
-
- if (channel < 0) {
- for (i = 0; i < arch_num_cc; i++) {
- channel = 0;
- for (;;) {
- channel = find_next_bit(edma_cc[i]->edma_unused,
- edma_cc[i]->num_channels,
- channel);
- if (channel == edma_cc[i]->num_channels)
- break;
- if (!test_and_set_bit(channel,
- edma_cc[i]->edma_inuse)) {
- done = 1;
- ctlr = i;
- break;
- }
- channel++;
- }
- if (done)
- break;
- }
- if (!done)
- return -ENOMEM;
- } else if (channel >= edma_cc[ctlr]->num_channels) {
- return -EINVAL;
- } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
- return -EBUSY;
- }
-
- /* ensure access through shadow region 0 */
- edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
-
- /* ensure no events are pending */
- edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
- memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
- &dummy_paramset, PARM_SIZE);
-
- if (callback)
- setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
- callback, data);
-
- map_dmach_queue(ctlr, channel, eventq_no);
-
- return EDMA_CTLR_CHAN(ctlr, channel);
-}
-EXPORT_SYMBOL(edma_alloc_channel);
-
-
-/**
- * edma_free_channel - deallocate DMA channel
- * @channel: dma channel returned from edma_alloc_channel()
- *
- * This deallocates the DMA channel and associated parameter RAM slot
- * allocated by edma_alloc_channel().
- *
- * Callers are responsible for ensuring the channel is inactive, and
- * will not be reactivated by linking, chaining, or software calls to
- * edma_start().
- */
-void edma_free_channel(unsigned channel)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel >= edma_cc[ctlr]->num_channels)
- return;
-
- setup_dma_interrupt(channel, NULL, NULL);
- /* REVISIT should probably take out of shadow region 0 */
-
- memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
- &dummy_paramset, PARM_SIZE);
- clear_bit(channel, edma_cc[ctlr]->edma_inuse);
-}
-EXPORT_SYMBOL(edma_free_channel);
-
-/**
- * edma_alloc_slot - allocate DMA parameter RAM
- * @slot: specific slot to allocate; negative for "any unused slot"
- *
- * This allocates a parameter RAM slot, initializing it to hold a
- * dummy transfer. Slots allocated using this routine have not been
- * mapped to a hardware DMA channel, and will normally be used by
- * linking to them from a slot associated with a DMA channel.
- *
- * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
- * slots may be allocated on behalf of DSP firmware.
- *
- * Returns the number of the slot, else negative errno.
- */
-int edma_alloc_slot(unsigned ctlr, int slot)
-{
- if (!edma_cc[ctlr])
- return -EINVAL;
-
- if (slot >= 0)
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot < 0) {
- slot = edma_cc[ctlr]->num_channels;
- for (;;) {
- slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
- edma_cc[ctlr]->num_slots, slot);
- if (slot == edma_cc[ctlr]->num_slots)
- return -ENOMEM;
- if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
- break;
- }
- } else if (slot < edma_cc[ctlr]->num_channels ||
- slot >= edma_cc[ctlr]->num_slots) {
- return -EINVAL;
- } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
- return -EBUSY;
- }
-
- memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
- &dummy_paramset, PARM_SIZE);
-
- return EDMA_CTLR_CHAN(ctlr, slot);
-}
-EXPORT_SYMBOL(edma_alloc_slot);
-
-/**
- * edma_free_slot - deallocate DMA parameter RAM
- * @slot: parameter RAM slot returned from edma_alloc_slot()
- *
- * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
- * Callers are responsible for ensuring the slot is inactive, and will
- * not be activated.
- */
-void edma_free_slot(unsigned slot)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(slot);
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot < edma_cc[ctlr]->num_channels ||
- slot >= edma_cc[ctlr]->num_slots)
- return;
-
- memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
- &dummy_paramset, PARM_SIZE);
- clear_bit(slot, edma_cc[ctlr]->edma_inuse);
-}
-EXPORT_SYMBOL(edma_free_slot);
-
-
-/**
- * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
- * The API will return the starting point of a set of
- * contiguous parameter RAM slots that have been requested
- *
- * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
- * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
- * @count: number of contiguous Paramter RAM slots
- * @slot - the start value of Parameter RAM slot that should be passed if id
- * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
- *
- * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
- * contiguous Parameter RAM slots from parameter RAM 64 in the case of
- * DaVinci SOCs and 32 in the case of DA8xx SOCs.
- *
- * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
- * set of contiguous parameter RAM slots from the "slot" that is passed as an
- * argument to the API.
- *
- * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
- * starts looking for a set of contiguous parameter RAMs from the "slot"
- * that is passed as an argument to the API. On failure the API will try to
- * find a set of contiguous Parameter RAM slots from the remaining Parameter
- * RAM slots
- */
-int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
-{
- /*
- * The start slot requested should be greater than
- * the number of channels and lesser than the total number
- * of slots
- */
- if ((id != EDMA_CONT_PARAMS_ANY) &&
- (slot < edma_cc[ctlr]->num_channels ||
- slot >= edma_cc[ctlr]->num_slots))
- return -EINVAL;
-
- /*
- * The number of parameter RAM slots requested cannot be less than 1
- * and cannot be more than the number of slots minus the number of
- * channels
- */
- if (count < 1 || count >
- (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
- return -EINVAL;
-
- switch (id) {
- case EDMA_CONT_PARAMS_ANY:
- return reserve_contiguous_slots(ctlr, id, count,
- edma_cc[ctlr]->num_channels);
- case EDMA_CONT_PARAMS_FIXED_EXACT:
- case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
- return reserve_contiguous_slots(ctlr, id, count, slot);
- default:
- return -EINVAL;
- }
-
-}
-EXPORT_SYMBOL(edma_alloc_cont_slots);
-
-/**
- * edma_free_cont_slots - deallocate DMA parameter RAM slots
- * @slot: first parameter RAM of a set of parameter RAM slots to be freed
- * @count: the number of contiguous parameter RAM slots to be freed
- *
- * This deallocates the parameter RAM slots allocated by
- * edma_alloc_cont_slots.
- * Callers/applications need to keep track of sets of contiguous
- * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
- * API.
- * Callers are responsible for ensuring the slots are inactive, and will
- * not be activated.
- */
-int edma_free_cont_slots(unsigned slot, int count)
-{
- unsigned ctlr, slot_to_free;
- int i;
-
- ctlr = EDMA_CTLR(slot);
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot < edma_cc[ctlr]->num_channels ||
- slot >= edma_cc[ctlr]->num_slots ||
- count < 1)
- return -EINVAL;
-
- for (i = slot; i < slot + count; ++i) {
- ctlr = EDMA_CTLR(i);
- slot_to_free = EDMA_CHAN_SLOT(i);
-
- memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
- &dummy_paramset, PARM_SIZE);
- clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(edma_free_cont_slots);
-
-/*-----------------------------------------------------------------------*/
-
-/* Parameter RAM operations (i) -- read/write partial slots */
-
-/**
- * edma_set_src - set initial DMA source address in parameter RAM slot
- * @slot: parameter RAM slot being configured
- * @src_port: physical address of source (memory, controller FIFO, etc)
- * @addressMode: INCR, except in very rare cases
- * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
- * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
- *
- * Note that the source address is modified during the DMA transfer
- * according to edma_set_src_index().
- */
-void edma_set_src(unsigned slot, dma_addr_t src_port,
- enum address_mode mode, enum fifo_width width)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(slot);
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot < edma_cc[ctlr]->num_slots) {
- unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
-
- if (mode) {
- /* set SAM and program FWID */
- i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
- } else {
- /* clear SAM */
- i &= ~SAM;
- }
- edma_parm_write(ctlr, PARM_OPT, slot, i);
-
- /* set the source port address
- in source register of param structure */
- edma_parm_write(ctlr, PARM_SRC, slot, src_port);
- }
-}
-EXPORT_SYMBOL(edma_set_src);
-
-/**
- * edma_set_dest - set initial DMA destination address in parameter RAM slot
- * @slot: parameter RAM slot being configured
- * @dest_port: physical address of destination (memory, controller FIFO, etc)
- * @addressMode: INCR, except in very rare cases
- * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
- * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
- *
- * Note that the destination address is modified during the DMA transfer
- * according to edma_set_dest_index().
- */
-void edma_set_dest(unsigned slot, dma_addr_t dest_port,
- enum address_mode mode, enum fifo_width width)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(slot);
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot < edma_cc[ctlr]->num_slots) {
- unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
-
- if (mode) {
- /* set DAM and program FWID */
- i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
- } else {
- /* clear DAM */
- i &= ~DAM;
- }
- edma_parm_write(ctlr, PARM_OPT, slot, i);
- /* set the destination port address
- in dest register of param structure */
- edma_parm_write(ctlr, PARM_DST, slot, dest_port);
- }
-}
-EXPORT_SYMBOL(edma_set_dest);
-
-/**
- * edma_get_position - returns the current transfer point
- * @slot: parameter RAM slot being examined
- * @dst: true selects the dest position, false the source
- *
- * Returns the position of the current active slot
- */
-dma_addr_t edma_get_position(unsigned slot, bool dst)
-{
- u32 offs, ctlr = EDMA_CTLR(slot);
-
- slot = EDMA_CHAN_SLOT(slot);
-
- offs = PARM_OFFSET(slot);
- offs += dst ? PARM_DST : PARM_SRC;
-
- return edma_read(ctlr, offs);
-}
-
-/**
- * edma_set_src_index - configure DMA source address indexing
- * @slot: parameter RAM slot being configured
- * @src_bidx: byte offset between source arrays in a frame
- * @src_cidx: byte offset between source frames in a block
- *
- * Offsets are specified to support either contiguous or discontiguous
- * memory transfers, or repeated access to a hardware register, as needed.
- * When accessing hardware registers, both offsets are normally zero.
- */
-void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(slot);
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot < edma_cc[ctlr]->num_slots) {
- edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
- 0xffff0000, src_bidx);
- edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
- 0xffff0000, src_cidx);
- }
-}
-EXPORT_SYMBOL(edma_set_src_index);
-
-/**
- * edma_set_dest_index - configure DMA destination address indexing
- * @slot: parameter RAM slot being configured
- * @dest_bidx: byte offset between destination arrays in a frame
- * @dest_cidx: byte offset between destination frames in a block
- *
- * Offsets are specified to support either contiguous or discontiguous
- * memory transfers, or repeated access to a hardware register, as needed.
- * When accessing hardware registers, both offsets are normally zero.
- */
-void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(slot);
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot < edma_cc[ctlr]->num_slots) {
- edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
- 0x0000ffff, dest_bidx << 16);
- edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
- 0x0000ffff, dest_cidx << 16);
- }
-}
-EXPORT_SYMBOL(edma_set_dest_index);
-
-/**
- * edma_set_transfer_params - configure DMA transfer parameters
- * @slot: parameter RAM slot being configured
- * @acnt: how many bytes per array (at least one)
- * @bcnt: how many arrays per frame (at least one)
- * @ccnt: how many frames per block (at least one)
- * @bcnt_rld: used only for A-Synchronized transfers; this specifies
- * the value to reload into bcnt when it decrements to zero
- * @sync_mode: ASYNC or ABSYNC
- *
- * See the EDMA3 documentation to understand how to configure and link
- * transfers using the fields in PaRAM slots. If you are not doing it
- * all at once with edma_write_slot(), you will use this routine
- * plus two calls each for source and destination, setting the initial
- * address and saying how to index that address.
- *
- * An example of an A-Synchronized transfer is a serial link using a
- * single word shift register. In that case, @acnt would be equal to
- * that word size; the serial controller issues a DMA synchronization
- * event to transfer each word, and memory access by the DMA transfer
- * controller will be word-at-a-time.
- *
- * An example of an AB-Synchronized transfer is a device using a FIFO.
- * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
- * The controller with the FIFO issues DMA synchronization events when
- * the FIFO threshold is reached, and the DMA transfer controller will
- * transfer one frame to (or from) the FIFO. It will probably use
- * efficient burst modes to access memory.
- */
-void edma_set_transfer_params(unsigned slot,
- u16 acnt, u16 bcnt, u16 ccnt,
- u16 bcnt_rld, enum sync_dimension sync_mode)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(slot);
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot < edma_cc[ctlr]->num_slots) {
- edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
- 0x0000ffff, bcnt_rld << 16);
- if (sync_mode == ASYNC)
- edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
- else
- edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
- /* Set the acount, bcount, ccount registers */
- edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
- edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
- }
-}
-EXPORT_SYMBOL(edma_set_transfer_params);
-
-/**
- * edma_link - link one parameter RAM slot to another
- * @from: parameter RAM slot originating the link
- * @to: parameter RAM slot which is the link target
- *
- * The originating slot should not be part of any active DMA transfer.
- */
-void edma_link(unsigned from, unsigned to)
-{
- unsigned ctlr_from, ctlr_to;
-
- ctlr_from = EDMA_CTLR(from);
- from = EDMA_CHAN_SLOT(from);
- ctlr_to = EDMA_CTLR(to);
- to = EDMA_CHAN_SLOT(to);
-
- if (from >= edma_cc[ctlr_from]->num_slots)
- return;
- if (to >= edma_cc[ctlr_to]->num_slots)
- return;
- edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
- PARM_OFFSET(to));
-}
-EXPORT_SYMBOL(edma_link);
-
-/**
- * edma_unlink - cut link from one parameter RAM slot
- * @from: parameter RAM slot originating the link
- *
- * The originating slot should not be part of any active DMA transfer.
- * Its link is set to 0xffff.
- */
-void edma_unlink(unsigned from)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(from);
- from = EDMA_CHAN_SLOT(from);
-
- if (from >= edma_cc[ctlr]->num_slots)
- return;
- edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
-}
-EXPORT_SYMBOL(edma_unlink);
-
-/*-----------------------------------------------------------------------*/
-
-/* Parameter RAM operations (ii) -- read/write whole parameter sets */
-
-/**
- * edma_write_slot - write parameter RAM data for slot
- * @slot: number of parameter RAM slot being modified
- * @param: data to be written into parameter RAM slot
- *
- * Use this to assign all parameters of a transfer at once. This
- * allows more efficient setup of transfers than issuing multiple
- * calls to set up those parameters in small pieces, and provides
- * complete control over all transfer options.
- */
-void edma_write_slot(unsigned slot, const struct edmacc_param *param)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(slot);
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot >= edma_cc[ctlr]->num_slots)
- return;
- memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
- PARM_SIZE);
-}
-EXPORT_SYMBOL(edma_write_slot);
-
-/**
- * edma_read_slot - read parameter RAM data from slot
- * @slot: number of parameter RAM slot being copied
- * @param: where to store copy of parameter RAM data
- *
- * Use this to read data from a parameter RAM slot, perhaps to
- * save them as a template for later reuse.
- */
-void edma_read_slot(unsigned slot, struct edmacc_param *param)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(slot);
- slot = EDMA_CHAN_SLOT(slot);
-
- if (slot >= edma_cc[ctlr]->num_slots)
- return;
- memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
- PARM_SIZE);
-}
-EXPORT_SYMBOL(edma_read_slot);
-
-/*-----------------------------------------------------------------------*/
-
-/* Various EDMA channel control operations */
-
-/**
- * edma_pause - pause dma on a channel
- * @channel: on which edma_start() has been called
- *
- * This temporarily disables EDMA hardware events on the specified channel,
- * preventing them from triggering new transfers on its behalf
- */
-void edma_pause(unsigned channel)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel < edma_cc[ctlr]->num_channels) {
- unsigned int mask = BIT(channel & 0x1f);
-
- edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
- }
-}
-EXPORT_SYMBOL(edma_pause);
-
-/**
- * edma_resume - resumes dma on a paused channel
- * @channel: on which edma_pause() has been called
- *
- * This re-enables EDMA hardware events on the specified channel.
- */
-void edma_resume(unsigned channel)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel < edma_cc[ctlr]->num_channels) {
- unsigned int mask = BIT(channel & 0x1f);
-
- edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
- }
-}
-EXPORT_SYMBOL(edma_resume);
-
-int edma_trigger_channel(unsigned channel)
-{
- unsigned ctlr;
- unsigned int mask;
-
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
- mask = BIT(channel & 0x1f);
-
- edma_shadow0_write_array(ctlr, SH_ESR, (channel >> 5), mask);
-
- pr_debug("EDMA: ESR%d %08x\n", (channel >> 5),
- edma_shadow0_read_array(ctlr, SH_ESR, (channel >> 5)));
- return 0;
-}
-EXPORT_SYMBOL(edma_trigger_channel);
-
-/**
- * edma_start - start dma on a channel
- * @channel: channel being activated
- *
- * Channels with event associations will be triggered by their hardware
- * events, and channels without such associations will be triggered by
- * software. (At this writing there is no interface for using software
- * triggers except with channels that don't support hardware triggers.)
- *
- * Returns zero on success, else negative errno.
- */
-int edma_start(unsigned channel)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel < edma_cc[ctlr]->num_channels) {
- int j = channel >> 5;
- unsigned int mask = BIT(channel & 0x1f);
-
- /* EDMA channels without event association */
- if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
- pr_debug("EDMA: ESR%d %08x\n", j,
- edma_shadow0_read_array(ctlr, SH_ESR, j));
- edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
- return 0;
- }
-
- /* EDMA channel with event association */
- pr_debug("EDMA: ER%d %08x\n", j,
- edma_shadow0_read_array(ctlr, SH_ER, j));
- /* Clear any pending event or error */
- edma_write_array(ctlr, EDMA_ECR, j, mask);
- edma_write_array(ctlr, EDMA_EMCR, j, mask);
- /* Clear any SER */
- edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
- edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
- pr_debug("EDMA: EER%d %08x\n", j,
- edma_shadow0_read_array(ctlr, SH_EER, j));
- return 0;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(edma_start);
-
-/**
- * edma_stop - stops dma on the channel passed
- * @channel: channel being deactivated
- *
- * When @lch is a channel, any active transfer is paused and
- * all pending hardware events are cleared. The current transfer
- * may not be resumed, and the channel's Parameter RAM should be
- * reinitialized before being reused.
- */
-void edma_stop(unsigned channel)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel < edma_cc[ctlr]->num_channels) {
- int j = channel >> 5;
- unsigned int mask = BIT(channel & 0x1f);
-
- edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
- edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
- edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
- edma_write_array(ctlr, EDMA_EMCR, j, mask);
-
- /* clear possibly pending completion interrupt */
- edma_shadow0_write_array(ctlr, SH_ICR, j, mask);
-
- pr_debug("EDMA: EER%d %08x\n", j,
- edma_shadow0_read_array(ctlr, SH_EER, j));
-
- /* REVISIT: consider guarding against inappropriate event
- * chaining by overwriting with dummy_paramset.
- */
- }
-}
-EXPORT_SYMBOL(edma_stop);
-
-/******************************************************************************
- *
- * It cleans ParamEntry qand bring back EDMA to initial state if media has
- * been removed before EDMA has finished.It is usedful for removable media.
- * Arguments:
- * ch_no - channel no
- *
- * Return: zero on success, or corresponding error no on failure
- *
- * FIXME this should not be needed ... edma_stop() should suffice.
- *
- *****************************************************************************/
-
-void edma_clean_channel(unsigned channel)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel < edma_cc[ctlr]->num_channels) {
- int j = (channel >> 5);
- unsigned int mask = BIT(channel & 0x1f);
-
- pr_debug("EDMA: EMR%d %08x\n", j,
- edma_read_array(ctlr, EDMA_EMR, j));
- edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
- /* Clear the corresponding EMR bits */
- edma_write_array(ctlr, EDMA_EMCR, j, mask);
- /* Clear any SER */
- edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
- edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
- }
-}
-EXPORT_SYMBOL(edma_clean_channel);
-
-/*
- * edma_clear_event - clear an outstanding event on the DMA channel
- * Arguments:
- * channel - channel number
- */
-void edma_clear_event(unsigned channel)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel >= edma_cc[ctlr]->num_channels)
- return;
- if (channel < 32)
- edma_write(ctlr, EDMA_ECR, BIT(channel));
- else
- edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
-}
-EXPORT_SYMBOL(edma_clear_event);
-
-/*
- * edma_assign_channel_eventq - move given channel to desired eventq
- * Arguments:
- * channel - channel number
- * eventq_no - queue to move the channel
- *
- * Can be used to move a channel to a selected event queue.
- */
-void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no)
-{
- unsigned ctlr;
-
- ctlr = EDMA_CTLR(channel);
- channel = EDMA_CHAN_SLOT(channel);
-
- if (channel >= edma_cc[ctlr]->num_channels)
- return;
-
- /* default to low priority queue */
- if (eventq_no == EVENTQ_DEFAULT)
- eventq_no = edma_cc[ctlr]->default_queue;
- if (eventq_no >= edma_cc[ctlr]->num_tc)
- return;
-
- map_dmach_queue(ctlr, channel, eventq_no);
-}
-EXPORT_SYMBOL(edma_assign_channel_eventq);
-
-static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
- struct edma *edma_cc, int cc_id)
-{
- int i;
- u32 value, cccfg;
- s8 (*queue_priority_map)[2];
-
- /* Decode the eDMA3 configuration from CCCFG register */
- cccfg = edma_read(cc_id, EDMA_CCCFG);
-
- value = GET_NUM_REGN(cccfg);
- edma_cc->num_region = BIT(value);
-
- value = GET_NUM_DMACH(cccfg);
- edma_cc->num_channels = BIT(value + 1);
-
- value = GET_NUM_PAENTRY(cccfg);
- edma_cc->num_slots = BIT(value + 4);
-
- value = GET_NUM_EVQUE(cccfg);
- edma_cc->num_tc = value + 1;
-
- dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id,
- cccfg);
- dev_dbg(dev, "num_region: %u\n", edma_cc->num_region);
- dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels);
- dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots);
- dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc);
-
- /* Nothing need to be done if queue priority is provided */
- if (pdata->queue_priority_mapping)
- return 0;
-
- /*
- * Configure TC/queue priority as follows:
- * Q0 - priority 0
- * Q1 - priority 1
- * Q2 - priority 2
- * ...
- * The meaning of priority numbers: 0 highest priority, 7 lowest
- * priority. So Q0 is the highest priority queue and the last queue has
- * the lowest priority.
- */
- queue_priority_map = devm_kzalloc(dev,
- (edma_cc->num_tc + 1) * sizeof(s8),
- GFP_KERNEL);
- if (!queue_priority_map)
- return -ENOMEM;
-
- for (i = 0; i < edma_cc->num_tc; i++) {
- queue_priority_map[i][0] = i;
- queue_priority_map[i][1] = i;
- }
- queue_priority_map[i][0] = -1;
- queue_priority_map[i][1] = -1;
-
- pdata->queue_priority_mapping = queue_priority_map;
- /* Default queue has the lowest priority */
- pdata->default_queue = i - 1;
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
-
-static int edma_xbar_event_map(struct device *dev, struct device_node *node,
- struct edma_soc_info *pdata, size_t sz)
-{
- const char pname[] = "ti,edma-xbar-event-map";
- struct resource res;
- void __iomem *xbar;
- s16 (*xbar_chans)[2];
- size_t nelm = sz / sizeof(s16);
- u32 shift, offset, mux;
- int ret, i;
-
- xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
- if (!xbar_chans)
- return -ENOMEM;
-
- ret = of_address_to_resource(node, 1, &res);
- if (ret)
- return -ENOMEM;
-
- xbar = devm_ioremap(dev, res.start, resource_size(&res));
- if (!xbar)
- return -ENOMEM;
-
- ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
- if (ret)
- return -EIO;
-
- /* Invalidate last entry for the other user of this mess */
- nelm >>= 1;
- xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
-
- for (i = 0; i < nelm; i++) {
- shift = (xbar_chans[i][1] & 0x03) << 3;
- offset = xbar_chans[i][1] & 0xfffffffc;
- mux = readl(xbar + offset);
- mux &= ~(0xff << shift);
- mux |= xbar_chans[i][0] << shift;
- writel(mux, (xbar + offset));
- }
-
- pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
- return 0;
-}
-
-static int edma_of_parse_dt(struct device *dev,
- struct device_node *node,
- struct edma_soc_info *pdata)
-{
- int ret = 0;
- struct property *prop;
- size_t sz;
- struct edma_rsv_info *rsv_info;
-
- rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL);
- if (!rsv_info)
- return -ENOMEM;
- pdata->rsv = rsv_info;
-
- prop = of_find_property(node, "ti,edma-xbar-event-map", &sz);
- if (prop)
- ret = edma_xbar_event_map(dev, node, pdata, sz);
-
- return ret;
-}
-
-static struct of_dma_filter_info edma_filter_info = {
- .filter_fn = edma_filter_fn,
-};
-
-static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
- struct device_node *node)
-{
- struct edma_soc_info *info;
- int ret;
-
- info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
- if (!info)
- return ERR_PTR(-ENOMEM);
-
- ret = edma_of_parse_dt(dev, node, info);
- if (ret)
- return ERR_PTR(ret);
-
- dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap);
- dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap);
- of_dma_controller_register(dev->of_node, of_dma_simple_xlate,
- &edma_filter_info);
-
- return info;
-}
-#else
-static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
- struct device_node *node)
-{
- return ERR_PTR(-ENOSYS);
-}
-#endif
-
-static int edma_probe(struct platform_device *pdev)
-{
- struct edma_soc_info **info = pdev->dev.platform_data;
- struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL};
- s8 (*queue_priority_mapping)[2];
- int i, j, off, ln, found = 0;
- int status = -1;
- const s16 (*rsv_chans)[2];
- const s16 (*rsv_slots)[2];
- const s16 (*xbar_chans)[2];
- int irq[EDMA_MAX_CC] = {0, 0};
- int err_irq[EDMA_MAX_CC] = {0, 0};
- struct resource *r[EDMA_MAX_CC] = {NULL};
- struct resource res[EDMA_MAX_CC];
- char res_name[10];
- struct device_node *node = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- int ret;
- struct platform_device_info edma_dev_info = {
- .name = "edma-dma-engine",
- .dma_mask = DMA_BIT_MASK(32),
- .parent = &pdev->dev,
- };
-
- if (node) {
- /* Check if this is a second instance registered */
- if (arch_num_cc) {
- dev_err(dev, "only one EDMA instance is supported via DT\n");
- return -ENODEV;
- }
-
- ninfo[0] = edma_setup_info_from_dt(dev, node);
- if (IS_ERR(ninfo[0])) {
- dev_err(dev, "failed to get DT data\n");
- return PTR_ERR(ninfo[0]);
- }
-
- info = ninfo;
- }
-
- if (!info)
- return -ENODEV;
-
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- return ret;
- }
-
- for (j = 0; j < EDMA_MAX_CC; j++) {
- if (!info[j]) {
- if (!found)
- return -ENODEV;
- break;
- }
- if (node) {
- ret = of_address_to_resource(node, j, &res[j]);
- if (!ret)
- r[j] = &res[j];
- } else {
- sprintf(res_name, "edma_cc%d", j);
- r[j] = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- res_name);
- }
- if (!r[j]) {
- if (found)
- break;
- else
- return -ENODEV;
- } else {
- found = 1;
- }
-
- edmacc_regs_base[j] = devm_ioremap_resource(&pdev->dev, r[j]);
- if (IS_ERR(edmacc_regs_base[j]))
- return PTR_ERR(edmacc_regs_base[j]);
-
- edma_cc[j] = devm_kzalloc(&pdev->dev, sizeof(struct edma),
- GFP_KERNEL);
- if (!edma_cc[j])
- return -ENOMEM;
-
- /* Get eDMA3 configuration from IP */
- ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j);
- if (ret)
- return ret;
-
- edma_cc[j]->default_queue = info[j]->default_queue;
-
- dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
- edmacc_regs_base[j]);
-
- for (i = 0; i < edma_cc[j]->num_slots; i++)
- memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
- &dummy_paramset, PARM_SIZE);
-
- /* Mark all channels as unused */
- memset(edma_cc[j]->edma_unused, 0xff,
- sizeof(edma_cc[j]->edma_unused));
-
- if (info[j]->rsv) {
-
- /* Clear the reserved channels in unused list */
- rsv_chans = info[j]->rsv->rsv_chans;
- if (rsv_chans) {
- for (i = 0; rsv_chans[i][0] != -1; i++) {
- off = rsv_chans[i][0];
- ln = rsv_chans[i][1];
- clear_bits(off, ln,
- edma_cc[j]->edma_unused);
- }
- }
-
- /* Set the reserved slots in inuse list */
- rsv_slots = info[j]->rsv->rsv_slots;
- if (rsv_slots) {
- for (i = 0; rsv_slots[i][0] != -1; i++) {
- off = rsv_slots[i][0];
- ln = rsv_slots[i][1];
- set_bits(off, ln,
- edma_cc[j]->edma_inuse);
- }
- }
- }
-
- /* Clear the xbar mapped channels in unused list */
- xbar_chans = info[j]->xbar_chans;
- if (xbar_chans) {
- for (i = 0; xbar_chans[i][1] != -1; i++) {
- off = xbar_chans[i][1];
- clear_bits(off, 1,
- edma_cc[j]->edma_unused);
- }
- }
-
- if (node) {
- irq[j] = irq_of_parse_and_map(node, 0);
- err_irq[j] = irq_of_parse_and_map(node, 2);
- } else {
- char irq_name[10];
-
- sprintf(irq_name, "edma%d", j);
- irq[j] = platform_get_irq_byname(pdev, irq_name);
-
- sprintf(irq_name, "edma%d_err", j);
- err_irq[j] = platform_get_irq_byname(pdev, irq_name);
- }
- edma_cc[j]->irq_res_start = irq[j];
- edma_cc[j]->irq_res_end = err_irq[j];
-
- status = devm_request_irq(dev, irq[j], dma_irq_handler, 0,
- "edma", dev);
- if (status < 0) {
- dev_dbg(&pdev->dev,
- "devm_request_irq %d failed --> %d\n",
- irq[j], status);
- return status;
- }
-
- status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0,
- "edma_error", dev);
- if (status < 0) {
- dev_dbg(&pdev->dev,
- "devm_request_irq %d failed --> %d\n",
- err_irq[j], status);
- return status;
- }
-
- for (i = 0; i < edma_cc[j]->num_channels; i++)
- map_dmach_queue(j, i, info[j]->default_queue);
-
- queue_priority_mapping = info[j]->queue_priority_mapping;
-
- /* Event queue priority mapping */
- for (i = 0; queue_priority_mapping[i][0] != -1; i++)
- assign_priority_to_queue(j,
- queue_priority_mapping[i][0],
- queue_priority_mapping[i][1]);
-
- /* Map the channel to param entry if channel mapping logic
- * exist
- */
- if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
- map_dmach_param(j);
-
- for (i = 0; i < edma_cc[j]->num_region; i++) {
- edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
- edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
- edma_write_array(j, EDMA_QRAE, i, 0x0);
- }
- edma_cc[j]->info = info[j];
- arch_num_cc++;
-
- edma_dev_info.id = j;
- platform_device_register_full(&edma_dev_info);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int edma_pm_resume(struct device *dev)
-{
- int i, j;
-
- for (j = 0; j < arch_num_cc; j++) {
- struct edma *cc = edma_cc[j];
-
- s8 (*queue_priority_mapping)[2];
-
- queue_priority_mapping = cc->info->queue_priority_mapping;
-
- /* Event queue priority mapping */
- for (i = 0; queue_priority_mapping[i][0] != -1; i++)
- assign_priority_to_queue(j,
- queue_priority_mapping[i][0],
- queue_priority_mapping[i][1]);
-
- /*
- * Map the channel to param entry if channel mapping logic
- * exist
- */
- if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
- map_dmach_param(j);
-
- for (i = 0; i < cc->num_channels; i++) {
- if (test_bit(i, cc->edma_inuse)) {
- /* ensure access through shadow region 0 */
- edma_or_array2(j, EDMA_DRAE, 0, i >> 5,
- BIT(i & 0x1f));
-
- setup_dma_interrupt(i,
- cc->intr_data[i].callback,
- cc->intr_data[i].data);
- }
- }
- }
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops edma_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume)
-};
-
-static struct platform_driver edma_driver = {
- .driver = {
- .name = "edma",
- .pm = &edma_pm_ops,
- .of_match_table = edma_of_ids,
- },
- .probe = edma_probe,
-};
-
-static int __init edma_init(void)
-{
- return platform_driver_probe(&edma_driver, edma_probe);
-}
-arch_initcall(edma_init);
-
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 29e08aac8294..28c90bc372bd 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -147,150 +147,118 @@ static s8 da850_queue_priority_mapping[][2] = {
{-1, -1}
};
-static struct edma_soc_info da830_edma_cc0_info = {
+static struct edma_soc_info da8xx_edma0_pdata = {
.queue_priority_mapping = da8xx_queue_priority_mapping,
.default_queue = EVENTQ_1,
};
-static struct edma_soc_info *da830_edma_info[EDMA_MAX_CC] = {
- &da830_edma_cc0_info,
+static struct edma_soc_info da850_edma1_pdata = {
+ .queue_priority_mapping = da850_queue_priority_mapping,
+ .default_queue = EVENTQ_0,
};
-static struct edma_soc_info da850_edma_cc_info[] = {
+static struct resource da8xx_edma0_resources[] = {
{
- .queue_priority_mapping = da8xx_queue_priority_mapping,
- .default_queue = EVENTQ_1,
- },
- {
- .queue_priority_mapping = da850_queue_priority_mapping,
- .default_queue = EVENTQ_0,
- },
-};
-
-static struct edma_soc_info *da850_edma_info[EDMA_MAX_CC] = {
- &da850_edma_cc_info[0],
- &da850_edma_cc_info[1],
-};
-
-static struct resource da830_edma_resources[] = {
- {
- .name = "edma_cc0",
+ .name = "edma3_cc",
.start = DA8XX_TPCC_BASE,
.end = DA8XX_TPCC_BASE + SZ_32K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc0",
+ .name = "edma3_tc0",
.start = DA8XX_TPTC0_BASE,
.end = DA8XX_TPTC0_BASE + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc1",
+ .name = "edma3_tc1",
.start = DA8XX_TPTC1_BASE,
.end = DA8XX_TPTC1_BASE + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma0",
+ .name = "edma3_ccint",
.start = IRQ_DA8XX_CCINT0,
.flags = IORESOURCE_IRQ,
},
{
- .name = "edma0_err",
+ .name = "edma3_ccerrint",
.start = IRQ_DA8XX_CCERRINT,
.flags = IORESOURCE_IRQ,
},
};
-static struct resource da850_edma_resources[] = {
- {
- .name = "edma_cc0",
- .start = DA8XX_TPCC_BASE,
- .end = DA8XX_TPCC_BASE + SZ_32K - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "edma_tc0",
- .start = DA8XX_TPTC0_BASE,
- .end = DA8XX_TPTC0_BASE + SZ_1K - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .name = "edma_tc1",
- .start = DA8XX_TPTC1_BASE,
- .end = DA8XX_TPTC1_BASE + SZ_1K - 1,
- .flags = IORESOURCE_MEM,
- },
+static struct resource da850_edma1_resources[] = {
{
- .name = "edma_cc1",
+ .name = "edma3_cc",
.start = DA850_TPCC1_BASE,
.end = DA850_TPCC1_BASE + SZ_32K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc2",
+ .name = "edma3_tc0",
.start = DA850_TPTC2_BASE,
.end = DA850_TPTC2_BASE + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma0",
- .start = IRQ_DA8XX_CCINT0,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "edma0_err",
- .start = IRQ_DA8XX_CCERRINT,
- .flags = IORESOURCE_IRQ,
- },
- {
- .name = "edma1",
+ .name = "edma3_ccint",
.start = IRQ_DA850_CCINT1,
.flags = IORESOURCE_IRQ,
},
{
- .name = "edma1_err",
+ .name = "edma3_ccerrint",
.start = IRQ_DA850_CCERRINT1,
.flags = IORESOURCE_IRQ,
},
};
-static struct platform_device da830_edma_device = {
+static const struct platform_device_info da8xx_edma0_device __initconst = {
.name = "edma",
- .id = -1,
- .dev = {
- .platform_data = da830_edma_info,
- },
- .num_resources = ARRAY_SIZE(da830_edma_resources),
- .resource = da830_edma_resources,
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+ .res = da8xx_edma0_resources,
+ .num_res = ARRAY_SIZE(da8xx_edma0_resources),
+ .data = &da8xx_edma0_pdata,
+ .size_data = sizeof(da8xx_edma0_pdata),
};
-static struct platform_device da850_edma_device = {
+static const struct platform_device_info da850_edma1_device __initconst = {
.name = "edma",
- .id = -1,
- .dev = {
- .platform_data = da850_edma_info,
- },
- .num_resources = ARRAY_SIZE(da850_edma_resources),
- .resource = da850_edma_resources,
+ .id = 1,
+ .dma_mask = DMA_BIT_MASK(32),
+ .res = da850_edma1_resources,
+ .num_res = ARRAY_SIZE(da850_edma1_resources),
+ .data = &da850_edma1_pdata,
+ .size_data = sizeof(da850_edma1_pdata),
};
int __init da830_register_edma(struct edma_rsv_info *rsv)
{
- da830_edma_cc0_info.rsv = rsv;
+ struct platform_device *edma_pdev;
+
+ da8xx_edma0_pdata.rsv = rsv;
- return platform_device_register(&da830_edma_device);
+ edma_pdev = platform_device_register_full(&da8xx_edma0_device);
+ return IS_ERR(edma_pdev) ? PTR_ERR(edma_pdev) : 0;
}
int __init da850_register_edma(struct edma_rsv_info *rsv[2])
{
+ struct platform_device *edma_pdev;
+
if (rsv) {
- da850_edma_cc_info[0].rsv = rsv[0];
- da850_edma_cc_info[1].rsv = rsv[1];
+ da8xx_edma0_pdata.rsv = rsv[0];
+ da850_edma1_pdata.rsv = rsv[1];
}
- return platform_device_register(&da850_edma_device);
+ edma_pdev = platform_device_register_full(&da8xx_edma0_device);
+ if (IS_ERR(edma_pdev)) {
+ pr_warn("%s: Failed to register eDMA0\n", __func__);
+ return PTR_ERR(edma_pdev);
+ }
+ edma_pdev = platform_device_register_full(&da850_edma1_device);
+ return IS_ERR(edma_pdev) ? PTR_ERR(edma_pdev) : 0;
}
static struct resource da8xx_i2c_resources0[] = {
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 567dc56fe8cd..609950b8c191 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -569,61 +569,58 @@ static u8 dm355_default_priorities[DAVINCI_N_AINTC_IRQ] = {
/*----------------------------------------------------------------------*/
-static s8
-queue_priority_mapping[][2] = {
+static s8 queue_priority_mapping[][2] = {
/* {event queue no, Priority} */
{0, 3},
{1, 7},
{-1, -1},
};
-static struct edma_soc_info edma_cc0_info = {
+static struct edma_soc_info dm355_edma_pdata = {
.queue_priority_mapping = queue_priority_mapping,
.default_queue = EVENTQ_1,
};
-static struct edma_soc_info *dm355_edma_info[EDMA_MAX_CC] = {
- &edma_cc0_info,
-};
-
static struct resource edma_resources[] = {
{
- .name = "edma_cc0",
+ .name = "edma3_cc",
.start = 0x01c00000,
.end = 0x01c00000 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc0",
+ .name = "edma3_tc0",
.start = 0x01c10000,
.end = 0x01c10000 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc1",
+ .name = "edma3_tc1",
.start = 0x01c10400,
.end = 0x01c10400 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma0",
+ .name = "edma3_ccint",
.start = IRQ_CCINT0,
.flags = IORESOURCE_IRQ,
},
{
- .name = "edma0_err",
+ .name = "edma3_ccerrint",
.start = IRQ_CCERRINT,
.flags = IORESOURCE_IRQ,
},
/* not using (or muxing) TC*_ERR */
};
-static struct platform_device dm355_edma_device = {
- .name = "edma",
- .id = 0,
- .dev.platform_data = dm355_edma_info,
- .num_resources = ARRAY_SIZE(edma_resources),
- .resource = edma_resources,
+static const struct platform_device_info dm355_edma_device __initconst = {
+ .name = "edma",
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+ .res = edma_resources,
+ .num_res = ARRAY_SIZE(edma_resources),
+ .data = &dm355_edma_pdata,
+ .size_data = sizeof(dm355_edma_pdata),
};
static struct resource dm355_asp1_resources[] = {
@@ -1062,13 +1059,18 @@ int __init dm355_init_video(struct vpfe_config *vpfe_cfg,
static int __init dm355_init_devices(void)
{
+ struct platform_device *edma_pdev;
int ret = 0;
if (!cpu_is_davinci_dm355())
return 0;
davinci_cfg_reg(DM355_INT_EDMA_CC);
- platform_device_register(&dm355_edma_device);
+ edma_pdev = platform_device_register_full(&dm355_edma_device);
+ if (IS_ERR(edma_pdev)) {
+ pr_warn("%s: Failed to register eDMA\n", __func__);
+ return PTR_ERR(edma_pdev);
+ }
ret = davinci_init_wdt();
if (ret)
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 6a890a8486d0..2068cbeaeb03 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -853,8 +853,7 @@ static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = {
};
/* Four Transfer Controllers on DM365 */
-static s8
-dm365_queue_priority_mapping[][2] = {
+static s8 dm365_queue_priority_mapping[][2] = {
/* {event queue no, Priority} */
{0, 7},
{1, 7},
@@ -863,53 +862,49 @@ dm365_queue_priority_mapping[][2] = {
{-1, -1},
};
-static struct edma_soc_info edma_cc0_info = {
+static struct edma_soc_info dm365_edma_pdata = {
.queue_priority_mapping = dm365_queue_priority_mapping,
.default_queue = EVENTQ_3,
};
-static struct edma_soc_info *dm365_edma_info[EDMA_MAX_CC] = {
- &edma_cc0_info,
-};
-
static struct resource edma_resources[] = {
{
- .name = "edma_cc0",
+ .name = "edma3_cc",
.start = 0x01c00000,
.end = 0x01c00000 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc0",
+ .name = "edma3_tc0",
.start = 0x01c10000,
.end = 0x01c10000 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc1",
+ .name = "edma3_tc1",
.start = 0x01c10400,
.end = 0x01c10400 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc2",
+ .name = "edma3_tc2",
.start = 0x01c10800,
.end = 0x01c10800 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc3",
+ .name = "edma3_tc3",
.start = 0x01c10c00,
.end = 0x01c10c00 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma0",
+ .name = "edma3_ccint",
.start = IRQ_CCINT0,
.flags = IORESOURCE_IRQ,
},
{
- .name = "edma0_err",
+ .name = "edma3_ccerrint",
.start = IRQ_CCERRINT,
.flags = IORESOURCE_IRQ,
},
@@ -919,7 +914,7 @@ static struct resource edma_resources[] = {
static struct platform_device dm365_edma_device = {
.name = "edma",
.id = 0,
- .dev.platform_data = dm365_edma_info,
+ .dev.platform_data = &dm365_edma_pdata,
.num_resources = ARRAY_SIZE(edma_resources),
.resource = edma_resources,
};
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index dc52657909c4..d38f5049d56e 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -498,61 +498,58 @@ static u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
/*----------------------------------------------------------------------*/
-static s8
-queue_priority_mapping[][2] = {
+static s8 queue_priority_mapping[][2] = {
/* {event queue no, Priority} */
{0, 3},
{1, 7},
{-1, -1},
};
-static struct edma_soc_info edma_cc0_info = {
+static struct edma_soc_info dm644x_edma_pdata = {
.queue_priority_mapping = queue_priority_mapping,
.default_queue = EVENTQ_1,
};
-static struct edma_soc_info *dm644x_edma_info[EDMA_MAX_CC] = {
- &edma_cc0_info,
-};
-
static struct resource edma_resources[] = {
{
- .name = "edma_cc0",
+ .name = "edma3_cc",
.start = 0x01c00000,
.end = 0x01c00000 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc0",
+ .name = "edma3_tc0",
.start = 0x01c10000,
.end = 0x01c10000 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc1",
+ .name = "edma3_tc1",
.start = 0x01c10400,
.end = 0x01c10400 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma0",
+ .name = "edma3_ccint",
.start = IRQ_CCINT0,
.flags = IORESOURCE_IRQ,
},
{
- .name = "edma0_err",
+ .name = "edma3_ccerrint",
.start = IRQ_CCERRINT,
.flags = IORESOURCE_IRQ,
},
/* not using TC*_ERR */
};
-static struct platform_device dm644x_edma_device = {
- .name = "edma",
- .id = 0,
- .dev.platform_data = dm644x_edma_info,
- .num_resources = ARRAY_SIZE(edma_resources),
- .resource = edma_resources,
+static const struct platform_device_info dm644x_edma_device __initconst = {
+ .name = "edma",
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+ .res = edma_resources,
+ .num_res = ARRAY_SIZE(edma_resources),
+ .data = &dm644x_edma_pdata,
+ .size_data = sizeof(dm644x_edma_pdata),
};
/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */
@@ -950,12 +947,17 @@ int __init dm644x_init_video(struct vpfe_config *vpfe_cfg,
static int __init dm644x_init_devices(void)
{
+ struct platform_device *edma_pdev;
int ret = 0;
if (!cpu_is_davinci_dm644x())
return 0;
- platform_device_register(&dm644x_edma_device);
+ edma_pdev = platform_device_register_full(&dm644x_edma_device);
+ if (IS_ERR(edma_pdev)) {
+ pr_warn("%s: Failed to register eDMA\n", __func__);
+ return PTR_ERR(edma_pdev);
+ }
platform_device_register(&dm644x_mdio_device);
platform_device_register(&dm644x_emac_device);
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 3f842bb266d6..70eb42725eec 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -531,8 +531,7 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
/*----------------------------------------------------------------------*/
/* Four Transfer Controllers on DM646x */
-static s8
-dm646x_queue_priority_mapping[][2] = {
+static s8 dm646x_queue_priority_mapping[][2] = {
/* {event queue no, Priority} */
{0, 4},
{1, 0},
@@ -541,65 +540,63 @@ dm646x_queue_priority_mapping[][2] = {
{-1, -1},
};
-static struct edma_soc_info edma_cc0_info = {
+static struct edma_soc_info dm646x_edma_pdata = {
.queue_priority_mapping = dm646x_queue_priority_mapping,
.default_queue = EVENTQ_1,
};
-static struct edma_soc_info *dm646x_edma_info[EDMA_MAX_CC] = {
- &edma_cc0_info,
-};
-
static struct resource edma_resources[] = {
{
- .name = "edma_cc0",
+ .name = "edma3_cc",
.start = 0x01c00000,
.end = 0x01c00000 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc0",
+ .name = "edma3_tc0",
.start = 0x01c10000,
.end = 0x01c10000 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc1",
+ .name = "edma3_tc1",
.start = 0x01c10400,
.end = 0x01c10400 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc2",
+ .name = "edma3_tc2",
.start = 0x01c10800,
.end = 0x01c10800 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma_tc3",
+ .name = "edma3_tc3",
.start = 0x01c10c00,
.end = 0x01c10c00 + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
- .name = "edma0",
+ .name = "edma3_ccint",
.start = IRQ_CCINT0,
.flags = IORESOURCE_IRQ,
},
{
- .name = "edma0_err",
+ .name = "edma3_ccerrint",
.start = IRQ_CCERRINT,
.flags = IORESOURCE_IRQ,
},
/* not using TC*_ERR */
};
-static struct platform_device dm646x_edma_device = {
- .name = "edma",
- .id = 0,
- .dev.platform_data = dm646x_edma_info,
- .num_resources = ARRAY_SIZE(edma_resources),
- .resource = edma_resources,
+static const struct platform_device_info dm646x_edma_device __initconst = {
+ .name = "edma",
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+ .res = edma_resources,
+ .num_res = ARRAY_SIZE(edma_resources),
+ .data = &dm646x_edma_pdata,
+ .size_data = sizeof(dm646x_edma_pdata),
};
static struct resource dm646x_mcasp0_resources[] = {
@@ -936,9 +933,12 @@ void dm646x_setup_vpif(struct vpif_display_config *display_config,
int __init dm646x_init_edma(struct edma_rsv_info *rsv)
{
- edma_cc0_info.rsv = rsv;
+ struct platform_device *edma_pdev;
+
+ dm646x_edma_pdata.rsv = rsv;
- return platform_device_register(&dm646x_edma_device);
+ edma_pdev = platform_device_register_full(&dm646x_edma_device);
+ return IS_ERR(edma_pdev) ? PTR_ERR(edma_pdev) : 0;
}
void __init dm646x_init(void)
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 33d1460a5639..ddf912406ce8 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -96,7 +96,6 @@ config ARCH_OMAP2PLUS
select OMAP_GPMC
select PINCTRL
select SOC_BUS
- select TI_PRIV_EDMA
select OMAP_IRQCHIP
help
Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 1d8b147282cf..b4cb3bd89d8a 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -603,18 +603,11 @@ static void __init genclk_init_parent(struct clk *clk)
clk->parent = parent;
}
-static struct dw_dma_platform_data dw_dmac0_data = {
- .nr_channels = 3,
- .block_size = 4095U,
- .nr_masters = 2,
- .data_width = { 2, 2 },
-};
-
static struct resource dw_dmac0_resource[] = {
PBMEM(0xff200000),
IRQ(2),
};
-DEFINE_DEV_DATA(dw_dmac, 0);
+DEFINE_DEV(dw_dmac, 0);
DEV_CLK(hclk, dw_dmac0, hsb, 10);
/* --------------------------------------------------------------------
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b4584757dae0..e6cd1a32025a 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -229,7 +229,7 @@ config IMX_SDMA
Support the i.MX SDMA engine. This engine is integrated into
Freescale i.MX25/31/35/51/53/6 chips.
-config IDMA64
+config INTEL_IDMA64
tristate "Intel integrated DMA 64-bit support"
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -486,7 +486,7 @@ config TI_EDMA
depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
- select TI_PRIV_EDMA
+ select TI_DMA_CROSSBAR if ARCH_OMAP
default n
help
Enable support for the TI EDMA controller. This DMA
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7711a7180726..ef9c099bd2b6 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
-obj-$(CONFIG_IDMA64) += idma64.o
+obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 981a38fc4cb8..16d0daa058a5 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -161,10 +161,8 @@ int acpi_dma_controller_register(struct device *dev,
return -EINVAL;
/* Check if the device was enumerated by ACPI */
- if (!ACPI_HANDLE(dev))
- return -EINVAL;
-
- if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
return -EINVAL;
adma = kzalloc(sizeof(*adma), GFP_KERNEL);
@@ -359,10 +357,11 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
int found;
/* Check if the device was enumerated by ACPI */
- if (!dev || !ACPI_HANDLE(dev))
+ if (!dev)
return ERR_PTR(-ENODEV);
- if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
return ERR_PTR(-ENODEV);
memset(&pdata, 0, sizeof(pdata));
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 58d406230d89..4e55239c7a30 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -458,10 +458,10 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
dma_cookie_complete(txd);
/* If the transfer was a memset, free our temporary buffer */
- if (desc->memset) {
+ if (desc->memset_buffer) {
dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
desc->memset_paddr);
- desc->memset = false;
+ desc->memset_buffer = false;
}
/* move children to free_list */
@@ -881,6 +881,46 @@ err_desc_get:
return NULL;
}
+static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
+ dma_addr_t psrc,
+ dma_addr_t pdst,
+ size_t len)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_desc *desc;
+ size_t xfer_count;
+
+ u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
+ u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
+ ATC_SRC_ADDR_MODE_FIXED |
+ ATC_DST_ADDR_MODE_INCR |
+ ATC_FC_MEM2MEM;
+
+ xfer_count = len >> 2;
+ if (xfer_count > ATC_BTSIZE_MAX) {
+ dev_err(chan2dev(chan), "%s: buffer is too big\n",
+ __func__);
+ return NULL;
+ }
+
+ desc = atc_desc_get(atchan);
+ if (!desc) {
+ dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
+ __func__);
+ return NULL;
+ }
+
+ desc->lli.saddr = psrc;
+ desc->lli.daddr = pdst;
+ desc->lli.ctrla = ctrla | xfer_count;
+ desc->lli.ctrlb = ctrlb;
+
+ desc->txd.cookie = 0;
+ desc->len = len;
+
+ return desc;
+}
+
/**
* atc_prep_dma_memset - prepare a memcpy operation
* @chan: the channel to prepare operation on
@@ -893,12 +933,10 @@ static struct dma_async_tx_descriptor *
atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
size_t len, unsigned long flags)
{
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
- struct at_desc *desc = NULL;
- size_t xfer_count;
- u32 ctrla;
- u32 ctrlb;
+ struct at_desc *desc;
+ void __iomem *vaddr;
+ dma_addr_t paddr;
dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
dest, value, len, flags);
@@ -914,61 +952,117 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
return NULL;
}
- xfer_count = len >> 2;
- if (xfer_count > ATC_BTSIZE_MAX) {
- dev_err(chan2dev(chan), "%s: buffer is too big\n",
+ vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
+ if (!vaddr) {
+ dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
__func__);
return NULL;
}
+ *(u32*)vaddr = value;
- ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
- | ATC_SRC_ADDR_MODE_FIXED
- | ATC_DST_ADDR_MODE_INCR
- | ATC_FC_MEM2MEM;
+ desc = atc_create_memset_desc(chan, paddr, dest, len);
+ if (!desc) {
+ dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
+ __func__);
+ goto err_free_buffer;
+ }
- ctrla = ATC_SRC_WIDTH(2) |
- ATC_DST_WIDTH(2);
+ desc->memset_paddr = paddr;
+ desc->memset_vaddr = vaddr;
+ desc->memset_buffer = true;
- desc = atc_desc_get(atchan);
- if (!desc) {
- dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
+ desc->txd.cookie = -EBUSY;
+ desc->total_len = len;
+
+ /* set end-of-link on the descriptor */
+ set_desc_eol(desc);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+
+err_free_buffer:
+ dma_pool_free(atdma->memset_pool, vaddr, paddr);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+atc_prep_dma_memset_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len, int value,
+ unsigned long flags)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma *atdma = to_at_dma(chan->device);
+ struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
+ struct scatterlist *sg;
+ void __iomem *vaddr;
+ dma_addr_t paddr;
+ size_t total_len = 0;
+ int i;
+
+ dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
+ value, sg_len, flags);
+
+ if (unlikely(!sgl || !sg_len)) {
+ dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
__func__);
return NULL;
}
- desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC,
- &desc->memset_paddr);
- if (!desc->memset_vaddr) {
+ vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
+ if (!vaddr) {
dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
__func__);
- goto err_put_desc;
+ return NULL;
}
+ *(u32*)vaddr = value;
- *desc->memset_vaddr = value;
- desc->memset = true;
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_addr_t dest = sg_dma_address(sg);
+ size_t len = sg_dma_len(sg);
- desc->lli.saddr = desc->memset_paddr;
- desc->lli.daddr = dest;
- desc->lli.ctrla = ctrla | xfer_count;
- desc->lli.ctrlb = ctrlb;
+ dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n",
+ __func__, dest, len);
- desc->txd.cookie = -EBUSY;
- desc->len = len;
- desc->total_len = len;
+ if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
+ dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
+ __func__);
+ goto err_put_desc;
+ }
+
+ desc = atc_create_memset_desc(chan, paddr, dest, len);
+ if (!desc)
+ goto err_put_desc;
+
+ atc_desc_chain(&first, &prev, desc);
+
+ total_len += len;
+ }
+
+ /*
+ * Only set the buffer pointers on the last descriptor to
+ * avoid free'ing while we have our transfer still going
+ */
+ desc->memset_paddr = paddr;
+ desc->memset_vaddr = vaddr;
+ desc->memset_buffer = true;
+
+ first->txd.cookie = -EBUSY;
+ first->total_len = total_len;
/* set end-of-link on the descriptor */
set_desc_eol(desc);
- desc->txd.flags = flags;
+ first->txd.flags = flags;
- return &desc->txd;
+ return &first->txd;
err_put_desc:
- atc_desc_put(atchan, desc);
+ atc_desc_put(atchan, first);
return NULL;
}
-
/**
* atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @chan: DMA channel
@@ -1851,6 +1945,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
@@ -1972,6 +2067,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
+ atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
}
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index c3bebbe899ac..d1cfc8c876f9 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -202,7 +202,7 @@ struct at_desc {
size_t src_hole;
/* Memset temporary buffer */
- bool memset;
+ bool memset_buffer;
dma_addr_t memset_paddr;
int *memset_vaddr;
};
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index dd24375b76dd..b5e132d4bae5 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -938,13 +938,19 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac_desc *prev = NULL, *first = NULL;
- struct data_chunk *chunk, *prev_chunk = NULL;
dma_addr_t dst_addr, src_addr;
- size_t dst_skip, src_skip, len = 0;
- size_t prev_dst_icg = 0, prev_src_icg = 0;
+ size_t src_skip = 0, dst_skip = 0, len = 0;
+ struct data_chunk *chunk;
int i;
- if (!xt || (xt->numf != 1) || (xt->dir != DMA_MEM_TO_MEM))
+ if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
+ return NULL;
+
+ /*
+ * TODO: Handle the case where we have to repeat a chain of
+ * descriptors...
+ */
+ if ((xt->numf > 1) && (xt->frame_size > 1))
return NULL;
dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
@@ -954,66 +960,60 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
src_addr = xt->src_start;
dst_addr = xt->dst_start;
- for (i = 0; i < xt->frame_size; i++) {
- struct at_xdmac_desc *desc;
- size_t src_icg, dst_icg;
+ if (xt->numf > 1) {
+ first = at_xdmac_interleaved_queue_desc(chan, atchan,
+ NULL,
+ src_addr, dst_addr,
+ xt, xt->sgl);
+ for (i = 0; i < xt->numf; i++)
+ at_xdmac_increment_block_count(chan, first);
- chunk = xt->sgl + i;
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
+ __func__, first, first);
+ list_add_tail(&first->desc_node, &first->descs_list);
+ } else {
+ for (i = 0; i < xt->frame_size; i++) {
+ size_t src_icg = 0, dst_icg = 0;
+ struct at_xdmac_desc *desc;
- dst_icg = dmaengine_get_dst_icg(xt, chunk);
- src_icg = dmaengine_get_src_icg(xt, chunk);
+ chunk = xt->sgl + i;
- src_skip = chunk->size + src_icg;
- dst_skip = chunk->size + dst_icg;
+ dst_icg = dmaengine_get_dst_icg(xt, chunk);
+ src_icg = dmaengine_get_src_icg(xt, chunk);
- dev_dbg(chan2dev(chan),
- "%s: chunk size=%d, src icg=%d, dst icg=%d\n",
- __func__, chunk->size, src_icg, dst_icg);
+ src_skip = chunk->size + src_icg;
+ dst_skip = chunk->size + dst_icg;
- /*
- * Handle the case where we just have the same
- * transfer to setup, we can just increase the
- * block number and reuse the same descriptor.
- */
- if (prev_chunk && prev &&
- (prev_chunk->size == chunk->size) &&
- (prev_src_icg == src_icg) &&
- (prev_dst_icg == dst_icg)) {
dev_dbg(chan2dev(chan),
- "%s: same configuration that the previous chunk, merging the descriptors...\n",
- __func__);
- at_xdmac_increment_block_count(chan, prev);
- continue;
- }
-
- desc = at_xdmac_interleaved_queue_desc(chan, atchan,
- prev,
- src_addr, dst_addr,
- xt, chunk);
- if (!desc) {
- list_splice_init(&first->descs_list,
- &atchan->free_descs_list);
- return NULL;
- }
+ "%s: chunk size=%d, src icg=%d, dst icg=%d\n",
+ __func__, chunk->size, src_icg, dst_icg);
+
+ desc = at_xdmac_interleaved_queue_desc(chan, atchan,
+ prev,
+ src_addr, dst_addr,
+ xt, chunk);
+ if (!desc) {
+ list_splice_init(&first->descs_list,
+ &atchan->free_descs_list);
+ return NULL;
+ }
- if (!first)
- first = desc;
+ if (!first)
+ first = desc;
- dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
- __func__, desc, first);
- list_add_tail(&desc->desc_node, &first->descs_list);
+ dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
+ __func__, desc, first);
+ list_add_tail(&desc->desc_node, &first->descs_list);
- if (xt->src_sgl)
- src_addr += src_skip;
+ if (xt->src_sgl)
+ src_addr += src_skip;
- if (xt->dst_sgl)
- dst_addr += dst_skip;
+ if (xt->dst_sgl)
+ dst_addr += dst_skip;
- len += chunk->size;
- prev_chunk = chunk;
- prev_dst_icg = dst_icg;
- prev_src_icg = src_icg;
- prev = desc;
+ len += chunk->size;
+ prev = desc;
+ }
}
first->tx_dma_desc.cookie = -EBUSY;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 09479d4be4db..3ecec1445adf 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1074,11 +1074,9 @@ static void dmaengine_destroy_unmap_pool(void)
for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
struct dmaengine_unmap_pool *p = &unmap_pool[i];
- if (p->pool)
- mempool_destroy(p->pool);
+ mempool_destroy(p->pool);
p->pool = NULL;
- if (p->cache)
- kmem_cache_destroy(p->cache);
+ kmem_cache_destroy(p->cache);
p->cache = NULL;
}
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index bedce038c6e2..7067b6ddc1db 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -163,7 +163,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
/*----------------------------------------------------------------------*/
-static inline unsigned int dwc_fast_fls(unsigned long long v)
+static inline unsigned int dwc_fast_ffs(unsigned long long v)
{
/*
* We can be a lot more clever here, but this should take care
@@ -712,7 +712,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dw->data_width[dwc->dst_master]);
src_width = dst_width = min_t(unsigned int, data_width,
- dwc_fast_fls(src | dest | len));
+ dwc_fast_ffs(src | dest | len));
ctllo = DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_DST_WIDTH(dst_width)
@@ -791,7 +791,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
switch (direction) {
case DMA_MEM_TO_DEV:
- reg_width = __fls(sconfig->dst_addr_width);
+ reg_width = __ffs(sconfig->dst_addr_width);
reg = sconfig->dst_addr;
ctllo = (DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_DST_WIDTH(reg_width)
@@ -811,7 +811,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
len = sg_dma_len(sg);
mem_width = min_t(unsigned int,
- data_width, dwc_fast_fls(mem | len));
+ data_width, dwc_fast_ffs(mem | len));
slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
@@ -848,7 +848,7 @@ slave_sg_todev_fill_desc:
}
break;
case DMA_DEV_TO_MEM:
- reg_width = __fls(sconfig->src_addr_width);
+ reg_width = __ffs(sconfig->src_addr_width);
reg = sconfig->src_addr;
ctllo = (DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_SRC_WIDTH(reg_width)
@@ -868,7 +868,7 @@ slave_sg_todev_fill_desc:
len = sg_dma_len(sg);
mem_width = min_t(unsigned int,
- data_width, dwc_fast_fls(mem | len));
+ data_width, dwc_fast_ffs(mem | len));
slave_sg_fromdev_fill_desc:
desc = dwc_desc_get(dwc);
@@ -1499,9 +1499,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
{
struct dw_dma *dw;
- bool autocfg;
+ bool autocfg = false;
unsigned int dw_params;
- unsigned int nr_channels;
unsigned int max_blk_size = 0;
int err;
int i;
@@ -1515,33 +1514,42 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
pm_runtime_get_sync(chip->dev);
- dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
- autocfg = dw_params >> DW_PARAMS_EN & 0x1;
+ if (!pdata) {
+ dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
+ dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
- dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
+ autocfg = dw_params >> DW_PARAMS_EN & 1;
+ if (!autocfg) {
+ err = -EINVAL;
+ goto err_pdata;
+ }
- if (!pdata && autocfg) {
pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
err = -ENOMEM;
goto err_pdata;
}
+ /* Get hardware configuration parameters */
+ pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
+ pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
+ for (i = 0; i < pdata->nr_masters; i++) {
+ pdata->data_width[i] =
+ (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
+ }
+ max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
+
/* Fill platform data with the default values */
pdata->is_private = true;
+ pdata->is_memcpy = true;
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
- } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
+ } else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
err = -EINVAL;
goto err_pdata;
}
- if (autocfg)
- nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
- else
- nr_channels = pdata->nr_channels;
-
- dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
+ dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
GFP_KERNEL);
if (!dw->chan) {
err = -ENOMEM;
@@ -1549,22 +1557,12 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
}
/* Get hardware configuration parameters */
- if (autocfg) {
- max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
-
- dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
- for (i = 0; i < dw->nr_masters; i++) {
- dw->data_width[i] =
- (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
- }
- } else {
- dw->nr_masters = pdata->nr_masters;
- for (i = 0; i < dw->nr_masters; i++)
- dw->data_width[i] = pdata->data_width[i];
- }
+ dw->nr_masters = pdata->nr_masters;
+ for (i = 0; i < dw->nr_masters; i++)
+ dw->data_width[i] = pdata->data_width[i];
/* Calculate all channel mask before DMA setup */
- dw->all_chan_mask = (1 << nr_channels) - 1;
+ dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
/* Force dma off, just in case */
dw_dma_off(dw);
@@ -1589,7 +1587,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
goto err_pdata;
INIT_LIST_HEAD(&dw->dma.channels);
- for (i = 0; i < nr_channels; i++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
dwc->chan.device = &dw->dma;
@@ -1602,7 +1600,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* 7 is highest priority & 0 is lowest. */
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
- dwc->priority = nr_channels - i - 1;
+ dwc->priority = pdata->nr_channels - i - 1;
else
dwc->priority = i;
@@ -1656,10 +1654,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
- dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+ /* Set capabilities */
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
if (pdata->is_private)
dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
+ if (pdata->is_memcpy)
+ dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+
dw->dma.dev = chip->dev;
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
dw->dma.device_free_chan_resources = dwc_free_chan_resources;
@@ -1687,7 +1688,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
goto err_dma_register;
dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
- nr_channels);
+ pdata->nr_channels);
pm_runtime_put_sync_suspend(chip->dev);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index b144706b3d85..4c30fdd092b3 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -15,12 +15,6 @@
#include "internal.h"
-static struct dw_dma_platform_data dw_pci_pdata = {
- .is_private = 1,
- .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
- .chan_priority = CHAN_PRIORITY_ASCENDING,
-};
-
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
struct dw_dma_chip *chip;
@@ -101,19 +95,19 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
static const struct pci_device_id dw_pci_id_table[] = {
/* Medfield */
- { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
- { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata },
+ { PCI_VDEVICE(INTEL, 0x0827) },
+ { PCI_VDEVICE(INTEL, 0x0830) },
/* BayTrail */
- { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
- { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
+ { PCI_VDEVICE(INTEL, 0x0f06) },
+ { PCI_VDEVICE(INTEL, 0x0f40) },
/* Braswell */
- { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_pdata },
- { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_pdata },
+ { PCI_VDEVICE(INTEL, 0x2286) },
+ { PCI_VDEVICE(INTEL, 0x22c0) },
/* Haswell */
- { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata },
+ { PCI_VDEVICE(INTEL, 0x9c60) },
{ }
};
MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index b2c3ae071429..68a4815750b5 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -155,6 +155,7 @@ static int dw_probe(struct platform_device *pdev)
struct dw_dma_chip *chip;
struct device *dev = &pdev->dev;
struct resource *mem;
+ const struct acpi_device_id *id;
struct dw_dma_platform_data *pdata;
int err;
@@ -178,6 +179,11 @@ static int dw_probe(struct platform_device *pdev)
pdata = dev_get_platdata(dev);
if (!pdata)
pdata = dw_dma_parse_dt(pdev);
+ if (!pdata && has_acpi_companion(dev)) {
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (id)
+ pdata = (struct dw_dma_platform_data *)id->driver_data;
+ }
chip->dev = dev;
@@ -246,8 +252,17 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#endif
#ifdef CONFIG_ACPI
+static struct dw_dma_platform_data dw_dma_acpi_pdata = {
+ .nr_channels = 8,
+ .is_private = true,
+ .chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
+ .chan_priority = CHAN_PRIORITY_ASCENDING,
+ .block_size = 4095,
+ .nr_masters = 2,
+};
+
static const struct acpi_device_id dw_dma_acpi_id_table[] = {
- { "INTL9C60", 0 },
+ { "INTL9C60", (kernel_ulong_t)&dw_dma_acpi_pdata },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3e5d4f193005..6b03e4e84e6b 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -25,28 +25,93 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_data/edma.h>
#include "dmaengine.h"
#include "virt-dma.h"
-/*
- * This will go away when the private EDMA API is folded
- * into this driver and the platform device(s) are
- * instantiated in the arch code. We can only get away
- * with this simplification because DA8XX may not be built
- * in the same kernel image with other DaVinci parts. This
- * avoids having to sprinkle dmaengine driver platform devices
- * and data throughout all the existing board files.
- */
-#ifdef CONFIG_ARCH_DAVINCI_DA8XX
-#define EDMA_CTLRS 2
-#define EDMA_CHANS 32
-#else
-#define EDMA_CTLRS 1
-#define EDMA_CHANS 64
-#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
+/* Offsets matching "struct edmacc_param" */
+#define PARM_OPT 0x00
+#define PARM_SRC 0x04
+#define PARM_A_B_CNT 0x08
+#define PARM_DST 0x0c
+#define PARM_SRC_DST_BIDX 0x10
+#define PARM_LINK_BCNTRLD 0x14
+#define PARM_SRC_DST_CIDX 0x18
+#define PARM_CCNT 0x1c
+
+#define PARM_SIZE 0x20
+
+/* Offsets for EDMA CC global channel registers and their shadows */
+#define SH_ER 0x00 /* 64 bits */
+#define SH_ECR 0x08 /* 64 bits */
+#define SH_ESR 0x10 /* 64 bits */
+#define SH_CER 0x18 /* 64 bits */
+#define SH_EER 0x20 /* 64 bits */
+#define SH_EECR 0x28 /* 64 bits */
+#define SH_EESR 0x30 /* 64 bits */
+#define SH_SER 0x38 /* 64 bits */
+#define SH_SECR 0x40 /* 64 bits */
+#define SH_IER 0x50 /* 64 bits */
+#define SH_IECR 0x58 /* 64 bits */
+#define SH_IESR 0x60 /* 64 bits */
+#define SH_IPR 0x68 /* 64 bits */
+#define SH_ICR 0x70 /* 64 bits */
+#define SH_IEVAL 0x78
+#define SH_QER 0x80
+#define SH_QEER 0x84
+#define SH_QEECR 0x88
+#define SH_QEESR 0x8c
+#define SH_QSER 0x90
+#define SH_QSECR 0x94
+#define SH_SIZE 0x200
+
+/* Offsets for EDMA CC global registers */
+#define EDMA_REV 0x0000
+#define EDMA_CCCFG 0x0004
+#define EDMA_QCHMAP 0x0200 /* 8 registers */
+#define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
+#define EDMA_QDMAQNUM 0x0260
+#define EDMA_QUETCMAP 0x0280
+#define EDMA_QUEPRI 0x0284
+#define EDMA_EMR 0x0300 /* 64 bits */
+#define EDMA_EMCR 0x0308 /* 64 bits */
+#define EDMA_QEMR 0x0310
+#define EDMA_QEMCR 0x0314
+#define EDMA_CCERR 0x0318
+#define EDMA_CCERRCLR 0x031c
+#define EDMA_EEVAL 0x0320
+#define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
+#define EDMA_QRAE 0x0380 /* 4 registers */
+#define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
+#define EDMA_QSTAT 0x0600 /* 2 registers */
+#define EDMA_QWMTHRA 0x0620
+#define EDMA_QWMTHRB 0x0624
+#define EDMA_CCSTAT 0x0640
+
+#define EDMA_M 0x1000 /* global channel registers */
+#define EDMA_ECR 0x1008
+#define EDMA_ECRH 0x100C
+#define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
+#define EDMA_PARM 0x4000 /* PaRAM entries */
+
+#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
+
+#define EDMA_DCHMAP 0x0100 /* 64 registers */
+
+/* CCCFG register */
+#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
+#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */
+#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
+#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
+#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
+#define CHMAP_EXIST BIT(24)
/*
* Max of 20 segments per channel to conserve PaRAM slots
@@ -59,6 +124,37 @@
#define EDMA_MAX_SLOTS MAX_NR_SG
#define EDMA_DESCRIPTORS 16
+#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
+#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
+#define EDMA_CONT_PARAMS_ANY 1001
+#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
+#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
+
+/* PaRAM slots are laid out like this */
+struct edmacc_param {
+ u32 opt;
+ u32 src;
+ u32 a_b_cnt;
+ u32 dst;
+ u32 src_dst_bidx;
+ u32 link_bcntrld;
+ u32 src_dst_cidx;
+ u32 ccnt;
+} __packed;
+
+/* fields in edmacc_param.opt */
+#define SAM BIT(0)
+#define DAM BIT(1)
+#define SYNCDIM BIT(2)
+#define STATIC BIT(3)
+#define EDMA_FWID (0x07 << 8)
+#define TCCMODE BIT(11)
+#define EDMA_TCC(t) ((t) << 12)
+#define TCINTEN BIT(20)
+#define ITCINTEN BIT(21)
+#define TCCHEN BIT(22)
+#define ITCCHEN BIT(23)
+
struct edma_pset {
u32 len;
dma_addr_t addr;
@@ -105,26 +201,524 @@ struct edma_desc {
struct edma_cc;
+struct edma_tc {
+ struct device_node *node;
+ u16 id;
+};
+
struct edma_chan {
struct virt_dma_chan vchan;
struct list_head node;
struct edma_desc *edesc;
struct edma_cc *ecc;
+ struct edma_tc *tc;
int ch_num;
bool alloced;
+ bool hw_triggered;
int slot[EDMA_MAX_SLOTS];
int missed;
struct dma_slave_config cfg;
};
struct edma_cc {
- int ctlr;
+ struct device *dev;
+ struct edma_soc_info *info;
+ void __iomem *base;
+ int id;
+ bool legacy_mode;
+
+ /* eDMA3 resource information */
+ unsigned num_channels;
+ unsigned num_qchannels;
+ unsigned num_region;
+ unsigned num_slots;
+ unsigned num_tc;
+ bool chmap_exist;
+ enum dma_event_q default_queue;
+
+ /*
+ * The slot_inuse bit for each PaRAM slot is clear unless the slot is
+ * in use by Linux or if it is allocated to be used by DSP.
+ */
+ unsigned long *slot_inuse;
+
struct dma_device dma_slave;
- struct edma_chan slave_chans[EDMA_CHANS];
- int num_slave_chans;
+ struct dma_device *dma_memcpy;
+ struct edma_chan *slave_chans;
+ struct edma_tc *tc_list;
int dummy_slot;
};
+/* dummy param set used to (re)initialize parameter RAM slots */
+static const struct edmacc_param dummy_paramset = {
+ .link_bcntrld = 0xffff,
+ .ccnt = 1,
+};
+
+#define EDMA_BINDING_LEGACY 0
+#define EDMA_BINDING_TPCC 1
+static const struct of_device_id edma_of_ids[] = {
+ {
+ .compatible = "ti,edma3",
+ .data = (void *)EDMA_BINDING_LEGACY,
+ },
+ {
+ .compatible = "ti,edma3-tpcc",
+ .data = (void *)EDMA_BINDING_TPCC,
+ },
+ {}
+};
+
+static const struct of_device_id edma_tptc_of_ids[] = {
+ { .compatible = "ti,edma3-tptc", },
+ {}
+};
+
+static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
+{
+ return (unsigned int)__raw_readl(ecc->base + offset);
+}
+
+static inline void edma_write(struct edma_cc *ecc, int offset, int val)
+{
+ __raw_writel(val, ecc->base + offset);
+}
+
+static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
+ unsigned or)
+{
+ unsigned val = edma_read(ecc, offset);
+
+ val &= and;
+ val |= or;
+ edma_write(ecc, offset, val);
+}
+
+static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
+{
+ unsigned val = edma_read(ecc, offset);
+
+ val &= and;
+ edma_write(ecc, offset, val);
+}
+
+static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
+{
+ unsigned val = edma_read(ecc, offset);
+
+ val |= or;
+ edma_write(ecc, offset, val);
+}
+
+static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
+ int i)
+{
+ return edma_read(ecc, offset + (i << 2));
+}
+
+static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
+ unsigned val)
+{
+ edma_write(ecc, offset + (i << 2), val);
+}
+
+static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
+ unsigned and, unsigned or)
+{
+ edma_modify(ecc, offset + (i << 2), and, or);
+}
+
+static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
+ unsigned or)
+{
+ edma_or(ecc, offset + (i << 2), or);
+}
+
+static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
+ unsigned or)
+{
+ edma_or(ecc, offset + ((i * 2 + j) << 2), or);
+}
+
+static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
+ int j, unsigned val)
+{
+ edma_write(ecc, offset + ((i * 2 + j) << 2), val);
+}
+
+static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
+{
+ return edma_read(ecc, EDMA_SHADOW0 + offset);
+}
+
+static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
+ int offset, int i)
+{
+ return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
+}
+
+static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
+ unsigned val)
+{
+ edma_write(ecc, EDMA_SHADOW0 + offset, val);
+}
+
+static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
+ int i, unsigned val)
+{
+ edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
+}
+
+static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
+ int param_no)
+{
+ return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
+}
+
+static inline void edma_param_write(struct edma_cc *ecc, int offset,
+ int param_no, unsigned val)
+{
+ edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
+}
+
+static inline void edma_param_modify(struct edma_cc *ecc, int offset,
+ int param_no, unsigned and, unsigned or)
+{
+ edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
+}
+
+static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
+ unsigned and)
+{
+ edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
+}
+
+static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
+ unsigned or)
+{
+ edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
+}
+
+static inline void set_bits(int offset, int len, unsigned long *p)
+{
+ for (; len > 0; len--)
+ set_bit(offset + (len - 1), p);
+}
+
+static inline void clear_bits(int offset, int len, unsigned long *p)
+{
+ for (; len > 0; len--)
+ clear_bit(offset + (len - 1), p);
+}
+
+static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
+ int priority)
+{
+ int bit = queue_no * 4;
+
+ edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
+}
+
+static void edma_set_chmap(struct edma_chan *echan, int slot)
+{
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+
+ if (ecc->chmap_exist) {
+ slot = EDMA_CHAN_SLOT(slot);
+ edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
+ }
+}
+
+static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
+{
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+
+ if (enable) {
+ edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
+ BIT(channel & 0x1f));
+ edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
+ BIT(channel & 0x1f));
+ } else {
+ edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
+ BIT(channel & 0x1f));
+ }
+}
+
+/*
+ * paRAM slot management functions
+ */
+static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
+ const struct edmacc_param *param)
+{
+ slot = EDMA_CHAN_SLOT(slot);
+ if (slot >= ecc->num_slots)
+ return;
+ memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
+}
+
+static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
+ struct edmacc_param *param)
+{
+ slot = EDMA_CHAN_SLOT(slot);
+ if (slot >= ecc->num_slots)
+ return;
+ memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
+}
+
+/**
+ * edma_alloc_slot - allocate DMA parameter RAM
+ * @ecc: pointer to edma_cc struct
+ * @slot: specific slot to allocate; negative for "any unused slot"
+ *
+ * This allocates a parameter RAM slot, initializing it to hold a
+ * dummy transfer. Slots allocated using this routine have not been
+ * mapped to a hardware DMA channel, and will normally be used by
+ * linking to them from a slot associated with a DMA channel.
+ *
+ * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
+ * slots may be allocated on behalf of DSP firmware.
+ *
+ * Returns the number of the slot, else negative errno.
+ */
+static int edma_alloc_slot(struct edma_cc *ecc, int slot)
+{
+ if (slot > 0) {
+ slot = EDMA_CHAN_SLOT(slot);
+ /* Requesting entry paRAM slot for a HW triggered channel. */
+ if (ecc->chmap_exist && slot < ecc->num_channels)
+ slot = EDMA_SLOT_ANY;
+ }
+
+ if (slot < 0) {
+ if (ecc->chmap_exist)
+ slot = 0;
+ else
+ slot = ecc->num_channels;
+ for (;;) {
+ slot = find_next_zero_bit(ecc->slot_inuse,
+ ecc->num_slots,
+ slot);
+ if (slot == ecc->num_slots)
+ return -ENOMEM;
+ if (!test_and_set_bit(slot, ecc->slot_inuse))
+ break;
+ }
+ } else if (slot >= ecc->num_slots) {
+ return -EINVAL;
+ } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
+ return -EBUSY;
+ }
+
+ edma_write_slot(ecc, slot, &dummy_paramset);
+
+ return EDMA_CTLR_CHAN(ecc->id, slot);
+}
+
+static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
+{
+ slot = EDMA_CHAN_SLOT(slot);
+ if (slot >= ecc->num_slots)
+ return;
+
+ edma_write_slot(ecc, slot, &dummy_paramset);
+ clear_bit(slot, ecc->slot_inuse);
+}
+
+/**
+ * edma_link - link one parameter RAM slot to another
+ * @ecc: pointer to edma_cc struct
+ * @from: parameter RAM slot originating the link
+ * @to: parameter RAM slot which is the link target
+ *
+ * The originating slot should not be part of any active DMA transfer.
+ */
+static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
+{
+ if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
+ dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
+
+ from = EDMA_CHAN_SLOT(from);
+ to = EDMA_CHAN_SLOT(to);
+ if (from >= ecc->num_slots || to >= ecc->num_slots)
+ return;
+
+ edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
+ PARM_OFFSET(to));
+}
+
+/**
+ * edma_get_position - returns the current transfer point
+ * @ecc: pointer to edma_cc struct
+ * @slot: parameter RAM slot being examined
+ * @dst: true selects the dest position, false the source
+ *
+ * Returns the position of the current active slot
+ */
+static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
+ bool dst)
+{
+ u32 offs;
+
+ slot = EDMA_CHAN_SLOT(slot);
+ offs = PARM_OFFSET(slot);
+ offs += dst ? PARM_DST : PARM_SRC;
+
+ return edma_read(ecc, offs);
+}
+
+/*
+ * Channels with event associations will be triggered by their hardware
+ * events, and channels without such associations will be triggered by
+ * software. (At this writing there is no interface for using software
+ * triggers except with channels that don't support hardware triggers.)
+ */
+static void edma_start(struct edma_chan *echan)
+{
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int j = (channel >> 5);
+ unsigned int mask = BIT(channel & 0x1f);
+
+ if (!echan->hw_triggered) {
+ /* EDMA channels without event association */
+ dev_dbg(ecc->dev, "ESR%d %08x\n", j,
+ edma_shadow0_read_array(ecc, SH_ESR, j));
+ edma_shadow0_write_array(ecc, SH_ESR, j, mask);
+ } else {
+ /* EDMA channel with event association */
+ dev_dbg(ecc->dev, "ER%d %08x\n", j,
+ edma_shadow0_read_array(ecc, SH_ER, j));
+ /* Clear any pending event or error */
+ edma_write_array(ecc, EDMA_ECR, j, mask);
+ edma_write_array(ecc, EDMA_EMCR, j, mask);
+ /* Clear any SER */
+ edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+ edma_shadow0_write_array(ecc, SH_EESR, j, mask);
+ dev_dbg(ecc->dev, "EER%d %08x\n", j,
+ edma_shadow0_read_array(ecc, SH_EER, j));
+ }
+}
+
+static void edma_stop(struct edma_chan *echan)
+{
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int j = (channel >> 5);
+ unsigned int mask = BIT(channel & 0x1f);
+
+ edma_shadow0_write_array(ecc, SH_EECR, j, mask);
+ edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+ edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+ edma_write_array(ecc, EDMA_EMCR, j, mask);
+
+ /* clear possibly pending completion interrupt */
+ edma_shadow0_write_array(ecc, SH_ICR, j, mask);
+
+ dev_dbg(ecc->dev, "EER%d %08x\n", j,
+ edma_shadow0_read_array(ecc, SH_EER, j));
+
+ /* REVISIT: consider guarding against inappropriate event
+ * chaining by overwriting with dummy_paramset.
+ */
+}
+
+/*
+ * Temporarily disable EDMA hardware events on the specified channel,
+ * preventing them from triggering new transfers
+ */
+static void edma_pause(struct edma_chan *echan)
+{
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ unsigned int mask = BIT(channel & 0x1f);
+
+ edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
+}
+
+/* Re-enable EDMA hardware events on the specified channel. */
+static void edma_resume(struct edma_chan *echan)
+{
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ unsigned int mask = BIT(channel & 0x1f);
+
+ edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
+}
+
+static void edma_trigger_channel(struct edma_chan *echan)
+{
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ unsigned int mask = BIT(channel & 0x1f);
+
+ edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
+
+ dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
+ edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
+}
+
+static void edma_clean_channel(struct edma_chan *echan)
+{
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int j = (channel >> 5);
+ unsigned int mask = BIT(channel & 0x1f);
+
+ dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
+ edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+ /* Clear the corresponding EMR bits */
+ edma_write_array(ecc, EDMA_EMCR, j, mask);
+ /* Clear any SER */
+ edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+ edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
+}
+
+/* Move channel to a specific event queue */
+static void edma_assign_channel_eventq(struct edma_chan *echan,
+ enum dma_event_q eventq_no)
+{
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ int bit = (channel & 0x7) * 4;
+
+ /* default to low priority queue */
+ if (eventq_no == EVENTQ_DEFAULT)
+ eventq_no = ecc->default_queue;
+ if (eventq_no >= ecc->num_tc)
+ return;
+
+ eventq_no &= 7;
+ edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
+ eventq_no << bit);
+}
+
+static int edma_alloc_channel(struct edma_chan *echan,
+ enum dma_event_q eventq_no)
+{
+ struct edma_cc *ecc = echan->ecc;
+ int channel = EDMA_CHAN_SLOT(echan->ch_num);
+
+ /* ensure access through shadow region 0 */
+ edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
+
+ /* ensure no events are pending */
+ edma_stop(echan);
+
+ edma_setup_interrupt(echan, true);
+
+ edma_assign_channel_eventq(echan, eventq_no);
+
+ return 0;
+}
+
+static void edma_free_channel(struct edma_chan *echan)
+{
+ /* ensure no events are pending */
+ edma_stop(echan);
+ /* REVISIT should probably take out of shadow region 0 */
+ edma_setup_interrupt(echan, false);
+}
+
static inline struct edma_cc *to_edma_cc(struct dma_device *d)
{
return container_of(d, struct edma_cc, dma_slave);
@@ -135,8 +729,7 @@ static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
return container_of(c, struct edma_chan, vchan.chan);
}
-static inline struct edma_desc
-*to_edma_desc(struct dma_async_tx_descriptor *tx)
+static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
{
return container_of(tx, struct edma_desc, vdesc.tx);
}
@@ -149,20 +742,17 @@ static void edma_desc_free(struct virt_dma_desc *vdesc)
/* Dispatch a queued descriptor to the controller (caller holds lock) */
static void edma_execute(struct edma_chan *echan)
{
+ struct edma_cc *ecc = echan->ecc;
struct virt_dma_desc *vdesc;
struct edma_desc *edesc;
struct device *dev = echan->vchan.chan.device->dev;
int i, j, left, nslots;
- /* If either we processed all psets or we're still not started */
- if (!echan->edesc ||
- echan->edesc->pset_nr == echan->edesc->processed) {
- /* Get next vdesc */
+ if (!echan->edesc) {
+ /* Setup is needed for the first transfer */
vdesc = vchan_next_desc(&echan->vchan);
- if (!vdesc) {
- echan->edesc = NULL;
+ if (!vdesc)
return;
- }
list_del(&vdesc->node);
echan->edesc = to_edma_desc(&vdesc->tx);
}
@@ -177,32 +767,32 @@ static void edma_execute(struct edma_chan *echan)
/* Write descriptor PaRAM set(s) */
for (i = 0; i < nslots; i++) {
j = i + edesc->processed;
- edma_write_slot(echan->slot[i], &edesc->pset[j].param);
+ edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
edesc->sg_len += edesc->pset[j].len;
- dev_vdbg(echan->vchan.chan.device->dev,
- "\n pset[%d]:\n"
- " chnum\t%d\n"
- " slot\t%d\n"
- " opt\t%08x\n"
- " src\t%08x\n"
- " dst\t%08x\n"
- " abcnt\t%08x\n"
- " ccnt\t%08x\n"
- " bidx\t%08x\n"
- " cidx\t%08x\n"
- " lkrld\t%08x\n",
- j, echan->ch_num, echan->slot[i],
- edesc->pset[j].param.opt,
- edesc->pset[j].param.src,
- edesc->pset[j].param.dst,
- edesc->pset[j].param.a_b_cnt,
- edesc->pset[j].param.ccnt,
- edesc->pset[j].param.src_dst_bidx,
- edesc->pset[j].param.src_dst_cidx,
- edesc->pset[j].param.link_bcntrld);
+ dev_vdbg(dev,
+ "\n pset[%d]:\n"
+ " chnum\t%d\n"
+ " slot\t%d\n"
+ " opt\t%08x\n"
+ " src\t%08x\n"
+ " dst\t%08x\n"
+ " abcnt\t%08x\n"
+ " ccnt\t%08x\n"
+ " bidx\t%08x\n"
+ " cidx\t%08x\n"
+ " lkrld\t%08x\n",
+ j, echan->ch_num, echan->slot[i],
+ edesc->pset[j].param.opt,
+ edesc->pset[j].param.src,
+ edesc->pset[j].param.dst,
+ edesc->pset[j].param.a_b_cnt,
+ edesc->pset[j].param.ccnt,
+ edesc->pset[j].param.src_dst_bidx,
+ edesc->pset[j].param.src_dst_cidx,
+ edesc->pset[j].param.link_bcntrld);
/* Link to the previous slot if not the last set */
if (i != (nslots - 1))
- edma_link(echan->slot[i], echan->slot[i+1]);
+ edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
}
edesc->processed += nslots;
@@ -214,34 +804,32 @@ static void edma_execute(struct edma_chan *echan)
*/
if (edesc->processed == edesc->pset_nr) {
if (edesc->cyclic)
- edma_link(echan->slot[nslots-1], echan->slot[1]);
+ edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
else
- edma_link(echan->slot[nslots-1],
+ edma_link(ecc, echan->slot[nslots - 1],
echan->ecc->dummy_slot);
}
- if (edesc->processed <= MAX_NR_SG) {
+ if (echan->missed) {
+ /*
+ * This happens due to setup times between intermediate
+ * transfers in long SG lists which have to be broken up into
+ * transfers of MAX_NR_SG
+ */
+ dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
+ edma_clean_channel(echan);
+ edma_stop(echan);
+ edma_start(echan);
+ edma_trigger_channel(echan);
+ echan->missed = 0;
+ } else if (edesc->processed <= MAX_NR_SG) {
dev_dbg(dev, "first transfer starting on channel %d\n",
echan->ch_num);
- edma_start(echan->ch_num);
+ edma_start(echan);
} else {
dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
echan->ch_num, edesc->processed);
- edma_resume(echan->ch_num);
- }
-
- /*
- * This happens due to setup times between intermediate transfers
- * in long SG lists which have to be broken up into transfers of
- * MAX_NR_SG
- */
- if (echan->missed) {
- dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
- edma_clean_channel(echan->ch_num);
- edma_stop(echan->ch_num);
- edma_start(echan->ch_num);
- edma_trigger_channel(echan->ch_num);
- echan->missed = 0;
+ edma_resume(echan);
}
}
@@ -259,20 +847,16 @@ static int edma_terminate_all(struct dma_chan *chan)
* echan->edesc is NULL and exit.)
*/
if (echan->edesc) {
- int cyclic = echan->edesc->cyclic;
-
+ edma_stop(echan);
+ /* Move the cyclic channel back to default queue */
+ if (!echan->tc && echan->edesc->cyclic)
+ edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
/*
* free the running request descriptor
* since it is not in any of the vdesc lists
*/
edma_desc_free(&echan->edesc->vdesc);
-
echan->edesc = NULL;
- edma_stop(echan->ch_num);
- /* Move the cyclic channel back to default queue */
- if (cyclic)
- edma_assign_channel_eventq(echan->ch_num,
- EVENTQ_DEFAULT);
}
vchan_get_all_descriptors(&echan->vchan, &head);
@@ -303,7 +887,7 @@ static int edma_dma_pause(struct dma_chan *chan)
if (!echan->edesc)
return -EINVAL;
- edma_pause(echan->ch_num);
+ edma_pause(echan);
return 0;
}
@@ -311,7 +895,7 @@ static int edma_dma_resume(struct dma_chan *chan)
{
struct edma_chan *echan = to_edma_chan(chan);
- edma_resume(echan->ch_num);
+ edma_resume(echan);
return 0;
}
@@ -327,19 +911,17 @@ static int edma_dma_resume(struct dma_chan *chan)
* @direction: Direction of the transfer
*/
static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
- dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
- enum dma_slave_buswidth dev_width, unsigned int dma_length,
- enum dma_transfer_direction direction)
+ dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
+ unsigned int acnt, unsigned int dma_length,
+ enum dma_transfer_direction direction)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edmacc_param *param = &epset->param;
- int acnt, bcnt, ccnt, cidx;
+ int bcnt, ccnt, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
int absync;
- acnt = dev_width;
-
/* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
if (!burst)
burst = 1;
@@ -475,8 +1057,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
return NULL;
}
- edesc = kzalloc(sizeof(*edesc) + sg_len *
- sizeof(edesc->pset[0]), GFP_ATOMIC);
+ edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
+ GFP_ATOMIC);
if (!edesc) {
dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
return NULL;
@@ -493,8 +1075,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
for (i = 0; i < nslots; i++) {
if (echan->slot[i] < 0) {
echan->slot[i] =
- edma_alloc_slot(EDMA_CTLR(echan->ch_num),
- EDMA_SLOT_ANY);
+ edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
kfree(edesc);
dev_err(dev, "%s: Failed to allocate slot\n",
@@ -541,36 +1122,98 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long tx_flags)
{
- int ret;
+ int ret, nslots;
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
+ unsigned int width, pset_len;
if (unlikely(!echan || !len))
return NULL;
- edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
+ if (len < SZ_64K) {
+ /*
+ * Transfer size less than 64K can be handled with one paRAM
+ * slot and with one burst.
+ * ACNT = length
+ */
+ width = len;
+ pset_len = len;
+ nslots = 1;
+ } else {
+ /*
+ * Transfer size bigger than 64K will be handled with maximum of
+ * two paRAM slots.
+ * slot1: (full_length / 32767) times 32767 bytes bursts.
+ * ACNT = 32767, length1: (full_length / 32767) * 32767
+ * slot2: the remaining amount of data after slot1.
+ * ACNT = full_length - length1, length2 = ACNT
+ *
+ * When the full_length is multibple of 32767 one slot can be
+ * used to complete the transfer.
+ */
+ width = SZ_32K - 1;
+ pset_len = rounddown(len, width);
+ /* One slot is enough for lengths multiple of (SZ_32K -1) */
+ if (unlikely(pset_len == len))
+ nslots = 1;
+ else
+ nslots = 2;
+ }
+
+ edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
+ GFP_ATOMIC);
if (!edesc) {
dev_dbg(dev, "Failed to allocate a descriptor\n");
return NULL;
}
- edesc->pset_nr = 1;
+ edesc->pset_nr = nslots;
+ edesc->residue = edesc->residue_stat = len;
+ edesc->direction = DMA_MEM_TO_MEM;
+ edesc->echan = echan;
ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
- DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
- if (ret < 0)
+ width, pset_len, DMA_MEM_TO_MEM);
+ if (ret < 0) {
+ kfree(edesc);
return NULL;
+ }
edesc->absync = ret;
- /*
- * Enable intermediate transfer chaining to re-trigger channel
- * on completion of every TR, and enable transfer-completion
- * interrupt on completion of the whole transfer.
- */
edesc->pset[0].param.opt |= ITCCHEN;
- edesc->pset[0].param.opt |= TCINTEN;
+ if (nslots == 1) {
+ /* Enable transfer complete interrupt */
+ edesc->pset[0].param.opt |= TCINTEN;
+ } else {
+ /* Enable transfer complete chaining for the first slot */
+ edesc->pset[0].param.opt |= TCCHEN;
+
+ if (echan->slot[1] < 0) {
+ echan->slot[1] = edma_alloc_slot(echan->ecc,
+ EDMA_SLOT_ANY);
+ if (echan->slot[1] < 0) {
+ kfree(edesc);
+ dev_err(dev, "%s: Failed to allocate slot\n",
+ __func__);
+ return NULL;
+ }
+ }
+ dest += pset_len;
+ src += pset_len;
+ pset_len = width = len % (SZ_32K - 1);
+
+ ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
+ width, pset_len, DMA_MEM_TO_MEM);
+ if (ret < 0) {
+ kfree(edesc);
+ return NULL;
+ }
+
+ edesc->pset[1].param.opt |= ITCCHEN;
+ edesc->pset[1].param.opt |= TCINTEN;
+ }
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -629,8 +1272,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
if (nslots > MAX_NR_SG)
return NULL;
- edesc = kzalloc(sizeof(*edesc) + nslots *
- sizeof(edesc->pset[0]), GFP_ATOMIC);
+ edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
+ GFP_ATOMIC);
if (!edesc) {
dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
return NULL;
@@ -649,8 +1292,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
/* Allocate a PaRAM slot, if needed */
if (echan->slot[i] < 0) {
echan->slot[i] =
- edma_alloc_slot(EDMA_CTLR(echan->ch_num),
- EDMA_SLOT_ANY);
+ edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
kfree(edesc);
dev_err(dev, "%s: Failed to allocate slot\n",
@@ -711,128 +1353,281 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
}
/* Place the cyclic channel to highest priority queue */
- edma_assign_channel_eventq(echan->ch_num, EVENTQ_0);
+ if (!echan->tc)
+ edma_assign_channel_eventq(echan, EVENTQ_0);
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
-static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
+static void edma_completion_handler(struct edma_chan *echan)
{
- struct edma_chan *echan = data;
struct device *dev = echan->vchan.chan.device->dev;
- struct edma_desc *edesc;
- struct edmacc_param p;
+ struct edma_desc *edesc = echan->edesc;
- edesc = echan->edesc;
+ if (!edesc)
+ return;
- /* Pause the channel for non-cyclic */
- if (!edesc || (edesc && !edesc->cyclic))
- edma_pause(echan->ch_num);
-
- switch (ch_status) {
- case EDMA_DMA_COMPLETE:
- spin_lock(&echan->vchan.lock);
-
- if (edesc) {
- if (edesc->cyclic) {
- vchan_cyclic_callback(&edesc->vdesc);
- } else if (edesc->processed == edesc->pset_nr) {
- dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
- edesc->residue = 0;
- edma_stop(echan->ch_num);
- vchan_cookie_complete(&edesc->vdesc);
- edma_execute(echan);
- } else {
- dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
-
- /* Update statistics for tx_status */
- edesc->residue -= edesc->sg_len;
- edesc->residue_stat = edesc->residue;
- edesc->processed_stat = edesc->processed;
-
- edma_execute(echan);
- }
+ spin_lock(&echan->vchan.lock);
+ if (edesc->cyclic) {
+ vchan_cyclic_callback(&edesc->vdesc);
+ spin_unlock(&echan->vchan.lock);
+ return;
+ } else if (edesc->processed == edesc->pset_nr) {
+ edesc->residue = 0;
+ edma_stop(echan);
+ vchan_cookie_complete(&edesc->vdesc);
+ echan->edesc = NULL;
+
+ dev_dbg(dev, "Transfer completed on channel %d\n",
+ echan->ch_num);
+ } else {
+ dev_dbg(dev, "Sub transfer completed on channel %d\n",
+ echan->ch_num);
+
+ edma_pause(echan);
+
+ /* Update statistics for tx_status */
+ edesc->residue -= edesc->sg_len;
+ edesc->residue_stat = edesc->residue;
+ edesc->processed_stat = edesc->processed;
+ }
+ edma_execute(echan);
+
+ spin_unlock(&echan->vchan.lock);
+}
+
+/* eDMA interrupt handler */
+static irqreturn_t dma_irq_handler(int irq, void *data)
+{
+ struct edma_cc *ecc = data;
+ int ctlr;
+ u32 sh_ier;
+ u32 sh_ipr;
+ u32 bank;
+
+ ctlr = ecc->id;
+ if (ctlr < 0)
+ return IRQ_NONE;
+
+ dev_vdbg(ecc->dev, "dma_irq_handler\n");
+
+ sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
+ if (!sh_ipr) {
+ sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
+ if (!sh_ipr)
+ return IRQ_NONE;
+ sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
+ bank = 1;
+ } else {
+ sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
+ bank = 0;
+ }
+
+ do {
+ u32 slot;
+ u32 channel;
+
+ slot = __ffs(sh_ipr);
+ sh_ipr &= ~(BIT(slot));
+
+ if (sh_ier & BIT(slot)) {
+ channel = (bank << 5) | slot;
+ /* Clear the corresponding IPR bits */
+ edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
+ edma_completion_handler(&ecc->slave_chans[channel]);
}
+ } while (sh_ipr);
- spin_unlock(&echan->vchan.lock);
+ edma_shadow0_write(ecc, SH_IEVAL, 1);
+ return IRQ_HANDLED;
+}
+
+static void edma_error_handler(struct edma_chan *echan)
+{
+ struct edma_cc *ecc = echan->ecc;
+ struct device *dev = echan->vchan.chan.device->dev;
+ struct edmacc_param p;
- break;
- case EDMA_DMA_CC_ERROR:
- spin_lock(&echan->vchan.lock);
+ if (!echan->edesc)
+ return;
- edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
+ spin_lock(&echan->vchan.lock);
+ edma_read_slot(ecc, echan->slot[0], &p);
+ /*
+ * Issue later based on missed flag which will be sure
+ * to happen as:
+ * (1) we finished transmitting an intermediate slot and
+ * edma_execute is coming up.
+ * (2) or we finished current transfer and issue will
+ * call edma_execute.
+ *
+ * Important note: issuing can be dangerous here and
+ * lead to some nasty recursion when we are in a NULL
+ * slot. So we avoid doing so and set the missed flag.
+ */
+ if (p.a_b_cnt == 0 && p.ccnt == 0) {
+ dev_dbg(dev, "Error on null slot, setting miss\n");
+ echan->missed = 1;
+ } else {
/*
- * Issue later based on missed flag which will be sure
- * to happen as:
- * (1) we finished transmitting an intermediate slot and
- * edma_execute is coming up.
- * (2) or we finished current transfer and issue will
- * call edma_execute.
- *
- * Important note: issuing can be dangerous here and
- * lead to some nasty recursion when we are in a NULL
- * slot. So we avoid doing so and set the missed flag.
+ * The slot is already programmed but the event got
+ * missed, so its safe to issue it here.
*/
- if (p.a_b_cnt == 0 && p.ccnt == 0) {
- dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
- echan->missed = 1;
- } else {
- /*
- * The slot is already programmed but the event got
- * missed, so its safe to issue it here.
- */
- dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
- edma_clean_channel(echan->ch_num);
- edma_stop(echan->ch_num);
- edma_start(echan->ch_num);
- edma_trigger_channel(echan->ch_num);
+ dev_dbg(dev, "Missed event, TRIGGERING\n");
+ edma_clean_channel(echan);
+ edma_stop(echan);
+ edma_start(echan);
+ edma_trigger_channel(echan);
+ }
+ spin_unlock(&echan->vchan.lock);
+}
+
+static inline bool edma_error_pending(struct edma_cc *ecc)
+{
+ if (edma_read_array(ecc, EDMA_EMR, 0) ||
+ edma_read_array(ecc, EDMA_EMR, 1) ||
+ edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
+ return true;
+
+ return false;
+}
+
+/* eDMA error interrupt handler */
+static irqreturn_t dma_ccerr_handler(int irq, void *data)
+{
+ struct edma_cc *ecc = data;
+ int i, j;
+ int ctlr;
+ unsigned int cnt = 0;
+ unsigned int val;
+
+ ctlr = ecc->id;
+ if (ctlr < 0)
+ return IRQ_NONE;
+
+ dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
+
+ if (!edma_error_pending(ecc))
+ return IRQ_NONE;
+
+ while (1) {
+ /* Event missed register(s) */
+ for (j = 0; j < 2; j++) {
+ unsigned long emr;
+
+ val = edma_read_array(ecc, EDMA_EMR, j);
+ if (!val)
+ continue;
+
+ dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
+ emr = val;
+ for (i = find_next_bit(&emr, 32, 0); i < 32;
+ i = find_next_bit(&emr, 32, i + 1)) {
+ int k = (j << 5) + i;
+
+ /* Clear the corresponding EMR bits */
+ edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
+ /* Clear any SER */
+ edma_shadow0_write_array(ecc, SH_SECR, j,
+ BIT(i));
+ edma_error_handler(&ecc->slave_chans[k]);
+ }
}
- spin_unlock(&echan->vchan.lock);
+ val = edma_read(ecc, EDMA_QEMR);
+ if (val) {
+ dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
+ /* Not reported, just clear the interrupt reason. */
+ edma_write(ecc, EDMA_QEMCR, val);
+ edma_shadow0_write(ecc, SH_QSECR, val);
+ }
+
+ val = edma_read(ecc, EDMA_CCERR);
+ if (val) {
+ dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
+ /* Not reported, just clear the interrupt reason. */
+ edma_write(ecc, EDMA_CCERRCLR, val);
+ }
- break;
- default:
- break;
+ if (!edma_error_pending(ecc))
+ break;
+ cnt++;
+ if (cnt > 10)
+ break;
}
+ edma_write(ecc, EDMA_EEVAL, 1);
+ return IRQ_HANDLED;
+}
+
+static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
+{
+ struct platform_device *tc_pdev;
+ int ret;
+
+ if (!tc)
+ return;
+
+ tc_pdev = of_find_device_by_node(tc->node);
+ if (!tc_pdev) {
+ pr_err("%s: TPTC device is not found\n", __func__);
+ return;
+ }
+ if (!pm_runtime_enabled(&tc_pdev->dev))
+ pm_runtime_enable(&tc_pdev->dev);
+
+ if (enable)
+ ret = pm_runtime_get_sync(&tc_pdev->dev);
+ else
+ ret = pm_runtime_put_sync(&tc_pdev->dev);
+
+ if (ret < 0)
+ pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
+ enable ? "get" : "put", dev_name(&tc_pdev->dev));
}
/* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan)
{
struct edma_chan *echan = to_edma_chan(chan);
- struct device *dev = chan->device->dev;
+ struct edma_cc *ecc = echan->ecc;
+ struct device *dev = ecc->dev;
+ enum dma_event_q eventq_no = EVENTQ_DEFAULT;
int ret;
- int a_ch_num;
- LIST_HEAD(descs);
- a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
- echan, EVENTQ_DEFAULT);
-
- if (a_ch_num < 0) {
- ret = -ENODEV;
- goto err_no_chan;
+ if (echan->tc) {
+ eventq_no = echan->tc->id;
+ } else if (ecc->tc_list) {
+ /* memcpy channel */
+ echan->tc = &ecc->tc_list[ecc->info->default_queue];
+ eventq_no = echan->tc->id;
}
- if (a_ch_num != echan->ch_num) {
- dev_err(dev, "failed to allocate requested channel %u:%u\n",
- EDMA_CTLR(echan->ch_num),
+ ret = edma_alloc_channel(echan, eventq_no);
+ if (ret)
+ return ret;
+
+ echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
+ if (echan->slot[0] < 0) {
+ dev_err(dev, "Entry slot allocation failed for channel %u\n",
EDMA_CHAN_SLOT(echan->ch_num));
- ret = -ENODEV;
- goto err_wrong_chan;
+ goto err_slot;
}
+ /* Set up channel -> slot mapping for the entry slot */
+ edma_set_chmap(echan, echan->slot[0]);
echan->alloced = true;
- echan->slot[0] = echan->ch_num;
- dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
- EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
+ dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
+ EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
+ echan->hw_triggered ? "HW" : "SW");
+
+ edma_tc_set_pm_state(echan->tc, true);
return 0;
-err_wrong_chan:
- edma_free_channel(a_ch_num);
-err_no_chan:
+err_slot:
+ edma_free_channel(echan);
return ret;
}
@@ -840,29 +1635,37 @@ err_no_chan:
static void edma_free_chan_resources(struct dma_chan *chan)
{
struct edma_chan *echan = to_edma_chan(chan);
- struct device *dev = chan->device->dev;
+ struct device *dev = echan->ecc->dev;
int i;
/* Terminate transfers */
- edma_stop(echan->ch_num);
+ edma_stop(echan);
vchan_free_chan_resources(&echan->vchan);
/* Free EDMA PaRAM slots */
- for (i = 1; i < EDMA_MAX_SLOTS; i++) {
+ for (i = 0; i < EDMA_MAX_SLOTS; i++) {
if (echan->slot[i] >= 0) {
- edma_free_slot(echan->slot[i]);
+ edma_free_slot(echan->ecc, echan->slot[i]);
echan->slot[i] = -1;
}
}
+ /* Set entry slot to the dummy slot */
+ edma_set_chmap(echan, echan->ecc->dummy_slot);
+
/* Free EDMA channel */
if (echan->alloced) {
- edma_free_channel(echan->ch_num);
+ edma_free_channel(echan);
echan->alloced = false;
}
- dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
+ edma_tc_set_pm_state(echan->tc, false);
+ echan->tc = NULL;
+ echan->hw_triggered = false;
+
+ dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
+ EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
}
/* Send pending descriptor to hardware */
@@ -888,7 +1691,7 @@ static u32 edma_residue(struct edma_desc *edesc)
* We always read the dst/src position from the first RamPar
* pset. That's the one which is active now.
*/
- pos = edma_get_position(edesc->echan->slot[0], dst);
+ pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
/*
* Cyclic is simple. Just subtract pset[0].addr from pos.
@@ -949,19 +1752,101 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
return ret;
}
-static void __init edma_chan_init(struct edma_cc *ecc,
- struct dma_device *dma,
- struct edma_chan *echans)
+static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels)
{
+ s16 *memcpy_ch = memcpy_channels;
+
+ if (!memcpy_channels)
+ return false;
+ while (*memcpy_ch != -1) {
+ if (*memcpy_ch == ch_num)
+ return true;
+ memcpy_ch++;
+ }
+ return false;
+}
+
+#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
+{
+ struct dma_device *s_ddev = &ecc->dma_slave;
+ struct dma_device *m_ddev = NULL;
+ s16 *memcpy_channels = ecc->info->memcpy_channels;
int i, j;
- for (i = 0; i < EDMA_CHANS; i++) {
- struct edma_chan *echan = &echans[i];
- echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
+ dma_cap_zero(s_ddev->cap_mask);
+ dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
+ if (ecc->legacy_mode && !memcpy_channels) {
+ dev_warn(ecc->dev,
+ "Legacy memcpy is enabled, things might not work\n");
+
+ dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
+ s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+ s_ddev->directions = BIT(DMA_MEM_TO_MEM);
+ }
+
+ s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
+ s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
+ s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
+ s_ddev->device_free_chan_resources = edma_free_chan_resources;
+ s_ddev->device_issue_pending = edma_issue_pending;
+ s_ddev->device_tx_status = edma_tx_status;
+ s_ddev->device_config = edma_slave_config;
+ s_ddev->device_pause = edma_dma_pause;
+ s_ddev->device_resume = edma_dma_resume;
+ s_ddev->device_terminate_all = edma_terminate_all;
+
+ s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+ s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
+ s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
+ s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ s_ddev->dev = ecc->dev;
+ INIT_LIST_HEAD(&s_ddev->channels);
+
+ if (memcpy_channels) {
+ m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
+ ecc->dma_memcpy = m_ddev;
+
+ dma_cap_zero(m_ddev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
+
+ m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+ m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
+ m_ddev->device_free_chan_resources = edma_free_chan_resources;
+ m_ddev->device_issue_pending = edma_issue_pending;
+ m_ddev->device_tx_status = edma_tx_status;
+ m_ddev->device_config = edma_slave_config;
+ m_ddev->device_pause = edma_dma_pause;
+ m_ddev->device_resume = edma_dma_resume;
+ m_ddev->device_terminate_all = edma_terminate_all;
+
+ m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+ m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
+ m_ddev->directions = BIT(DMA_MEM_TO_MEM);
+ m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ m_ddev->dev = ecc->dev;
+ INIT_LIST_HEAD(&m_ddev->channels);
+ } else if (!ecc->legacy_mode) {
+ dev_info(ecc->dev, "memcpy is disabled\n");
+ }
+
+ for (i = 0; i < ecc->num_channels; i++) {
+ struct edma_chan *echan = &ecc->slave_chans[i];
+ echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
echan->ecc = ecc;
echan->vchan.desc_free = edma_desc_free;
- vchan_init(&echan->vchan, dma);
+ if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
+ vchan_init(&echan->vchan, m_ddev);
+ else
+ vchan_init(&echan->vchan, s_ddev);
INIT_LIST_HEAD(&echan->node);
for (j = 0; j < EDMA_MAX_SLOTS; j++)
@@ -969,85 +1854,474 @@ static void __init edma_chan_init(struct edma_cc *ecc,
}
}
-#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
-
-static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
- struct device *dev)
+static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+ struct edma_cc *ecc)
{
- dma->device_prep_slave_sg = edma_prep_slave_sg;
- dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
- dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
- dma->device_alloc_chan_resources = edma_alloc_chan_resources;
- dma->device_free_chan_resources = edma_free_chan_resources;
- dma->device_issue_pending = edma_issue_pending;
- dma->device_tx_status = edma_tx_status;
- dma->device_config = edma_slave_config;
- dma->device_pause = edma_dma_pause;
- dma->device_resume = edma_dma_resume;
- dma->device_terminate_all = edma_terminate_all;
+ int i;
+ u32 value, cccfg;
+ s8 (*queue_priority_map)[2];
+
+ /* Decode the eDMA3 configuration from CCCFG register */
+ cccfg = edma_read(ecc, EDMA_CCCFG);
+
+ value = GET_NUM_REGN(cccfg);
+ ecc->num_region = BIT(value);
+
+ value = GET_NUM_DMACH(cccfg);
+ ecc->num_channels = BIT(value + 1);
- dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
- dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
- dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ value = GET_NUM_QDMACH(cccfg);
+ ecc->num_qchannels = value * 2;
- dma->dev = dev;
+ value = GET_NUM_PAENTRY(cccfg);
+ ecc->num_slots = BIT(value + 4);
+
+ value = GET_NUM_EVQUE(cccfg);
+ ecc->num_tc = value + 1;
+
+ ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
+
+ dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
+ dev_dbg(dev, "num_region: %u\n", ecc->num_region);
+ dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
+ dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
+ dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
+ dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
+ dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
+
+ /* Nothing need to be done if queue priority is provided */
+ if (pdata->queue_priority_mapping)
+ return 0;
/*
- * code using dma memcpy must make sure alignment of
- * length is at dma->copy_align boundary.
+ * Configure TC/queue priority as follows:
+ * Q0 - priority 0
+ * Q1 - priority 1
+ * Q2 - priority 2
+ * ...
+ * The meaning of priority numbers: 0 highest priority, 7 lowest
+ * priority. So Q0 is the highest priority queue and the last queue has
+ * the lowest priority.
*/
- dma->copy_align = DMAENGINE_ALIGN_4_BYTES;
+ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+ GFP_KERNEL);
+ if (!queue_priority_map)
+ return -ENOMEM;
+
+ for (i = 0; i < ecc->num_tc; i++) {
+ queue_priority_map[i][0] = i;
+ queue_priority_map[i][1] = i;
+ }
+ queue_priority_map[i][0] = -1;
+ queue_priority_map[i][1] = -1;
+
+ pdata->queue_priority_mapping = queue_priority_map;
+ /* Default queue has the lowest priority */
+ pdata->default_queue = i - 1;
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
+ size_t sz)
+{
+ const char pname[] = "ti,edma-xbar-event-map";
+ struct resource res;
+ void __iomem *xbar;
+ s16 (*xbar_chans)[2];
+ size_t nelm = sz / sizeof(s16);
+ u32 shift, offset, mux;
+ int ret, i;
+
+ xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
+ if (!xbar_chans)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(dev->of_node, 1, &res);
+ if (ret)
+ return -ENOMEM;
+
+ xbar = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!xbar)
+ return -ENOMEM;
+
+ ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
+ nelm);
+ if (ret)
+ return -EIO;
+
+ /* Invalidate last entry for the other user of this mess */
+ nelm >>= 1;
+ xbar_chans[nelm][0] = -1;
+ xbar_chans[nelm][1] = -1;
+
+ for (i = 0; i < nelm; i++) {
+ shift = (xbar_chans[i][1] & 0x03) << 3;
+ offset = xbar_chans[i][1] & 0xfffffffc;
+ mux = readl(xbar + offset);
+ mux &= ~(0xff << shift);
+ mux |= xbar_chans[i][0] << shift;
+ writel(mux, (xbar + offset));
+ }
- INIT_LIST_HEAD(&dma->channels);
+ pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
+ return 0;
+}
+
+static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
+ bool legacy_mode)
+{
+ struct edma_soc_info *info;
+ struct property *prop;
+ size_t sz;
+ int ret;
+
+ info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ if (legacy_mode) {
+ prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
+ &sz);
+ if (prop) {
+ ret = edma_xbar_event_map(dev, info, sz);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ return info;
+ }
+
+ /* Get the list of channels allocated to be used for memcpy */
+ prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
+ if (prop) {
+ const char pname[] = "ti,edma-memcpy-channels";
+ size_t nelm = sz / sizeof(s16);
+ s16 *memcpy_ch;
+
+ memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16),
+ GFP_KERNEL);
+ if (!memcpy_ch)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_property_read_u16_array(dev->of_node, pname,
+ (u16 *)memcpy_ch, nelm);
+ if (ret)
+ return ERR_PTR(ret);
+
+ memcpy_ch[nelm] = -1;
+ info->memcpy_channels = memcpy_ch;
+ }
+
+ prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
+ &sz);
+ if (prop) {
+ const char pname[] = "ti,edma-reserved-slot-ranges";
+ s16 (*rsv_slots)[2];
+ size_t nelm = sz / sizeof(*rsv_slots);
+ struct edma_rsv_info *rsv_info;
+
+ if (!nelm)
+ return info;
+
+ rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
+ if (!rsv_info)
+ return ERR_PTR(-ENOMEM);
+
+ rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
+ GFP_KERNEL);
+ if (!rsv_slots)
+ return ERR_PTR(-ENOMEM);
+
+ ret = of_property_read_u16_array(dev->of_node, pname,
+ (u16 *)rsv_slots, nelm * 2);
+ if (ret)
+ return ERR_PTR(ret);
+
+ rsv_slots[nelm][0] = -1;
+ rsv_slots[nelm][1] = -1;
+ info->rsv = rsv_info;
+ info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
+ }
+
+ return info;
+}
+
+static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct edma_cc *ecc = ofdma->of_dma_data;
+ struct dma_chan *chan = NULL;
+ struct edma_chan *echan;
+ int i;
+
+ if (!ecc || dma_spec->args_count < 1)
+ return NULL;
+
+ for (i = 0; i < ecc->num_channels; i++) {
+ echan = &ecc->slave_chans[i];
+ if (echan->ch_num == dma_spec->args[0]) {
+ chan = &echan->vchan.chan;
+ break;
+ }
+ }
+
+ if (!chan)
+ return NULL;
+
+ if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
+ goto out;
+
+ if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
+ dma_spec->args[1] < echan->ecc->num_tc) {
+ echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
+ goto out;
+ }
+
+ return NULL;
+out:
+ /* The channel is going to be used as HW synchronized */
+ echan->hw_triggered = true;
+ return dma_get_slave_channel(chan);
+}
+#else
+static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
+ bool legacy_mode)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ return NULL;
}
+#endif
static int edma_probe(struct platform_device *pdev)
{
- struct edma_cc *ecc;
+ struct edma_soc_info *info = pdev->dev.platform_data;
+ s8 (*queue_priority_mapping)[2];
+ int i, off, ln;
+ const s16 (*rsv_slots)[2];
+ const s16 (*xbar_chans)[2];
+ int irq;
+ char *irq_name;
+ struct resource *mem;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct edma_cc *ecc;
+ bool legacy_mode = true;
int ret;
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(edma_of_ids, node);
+ if (match && (u32)match->data == EDMA_BINDING_TPCC)
+ legacy_mode = false;
+
+ info = edma_setup_info_from_dt(dev, legacy_mode);
+ if (IS_ERR(info)) {
+ dev_err(dev, "failed to get DT data\n");
+ return PTR_ERR(info);
+ }
+ }
+
+ if (!info)
+ return -ENODEV;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ return ret;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
- ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
if (!ecc) {
- dev_err(&pdev->dev, "Can't allocate controller\n");
+ dev_err(dev, "Can't allocate controller\n");
return -ENOMEM;
}
- ecc->ctlr = pdev->id;
- ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
+ ecc->dev = dev;
+ ecc->id = pdev->id;
+ ecc->legacy_mode = legacy_mode;
+ /* When booting with DT the pdev->id is -1 */
+ if (ecc->id < 0)
+ ecc->id = 0;
+
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
+ if (!mem) {
+ dev_dbg(dev, "mem resource not found, using index 0\n");
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(dev, "no mem resource?\n");
+ return -ENODEV;
+ }
+ }
+ ecc->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(ecc->base))
+ return PTR_ERR(ecc->base);
+
+ platform_set_drvdata(pdev, ecc);
+
+ /* Get eDMA3 configuration from IP */
+ ret = edma_setup_from_hw(dev, info, ecc);
+ if (ret)
+ return ret;
+
+ /* Allocate memory based on the information we got from the IP */
+ ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
+ sizeof(*ecc->slave_chans), GFP_KERNEL);
+ if (!ecc->slave_chans)
+ return -ENOMEM;
+
+ ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!ecc->slot_inuse)
+ return -ENOMEM;
+
+ ecc->default_queue = info->default_queue;
+
+ for (i = 0; i < ecc->num_slots; i++)
+ edma_write_slot(ecc, i, &dummy_paramset);
+
+ if (info->rsv) {
+ /* Set the reserved slots in inuse list */
+ rsv_slots = info->rsv->rsv_slots;
+ if (rsv_slots) {
+ for (i = 0; rsv_slots[i][0] != -1; i++) {
+ off = rsv_slots[i][0];
+ ln = rsv_slots[i][1];
+ set_bits(off, ln, ecc->slot_inuse);
+ }
+ }
+ }
+
+ /* Clear the xbar mapped channels in unused list */
+ xbar_chans = info->xbar_chans;
+ if (xbar_chans) {
+ for (i = 0; xbar_chans[i][1] != -1; i++) {
+ off = xbar_chans[i][1];
+ }
+ }
+
+ irq = platform_get_irq_byname(pdev, "edma3_ccint");
+ if (irq < 0 && node)
+ irq = irq_of_parse_and_map(node, 0);
+
+ if (irq >= 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+ dev_name(dev));
+ ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+ ecc);
+ if (ret) {
+ dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
+ return ret;
+ }
+ }
+
+ irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
+ if (irq < 0 && node)
+ irq = irq_of_parse_and_map(node, 2);
+
+ if (irq >= 0) {
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+ dev_name(dev));
+ ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+ ecc);
+ if (ret) {
+ dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
+ return ret;
+ }
+ }
+
+ ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
if (ecc->dummy_slot < 0) {
- dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
+ dev_err(dev, "Can't allocate PaRAM dummy slot\n");
return ecc->dummy_slot;
}
- dma_cap_zero(ecc->dma_slave.cap_mask);
- dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
- dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
- dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
+ queue_priority_mapping = info->queue_priority_mapping;
+
+ if (!ecc->legacy_mode) {
+ int lowest_priority = 0;
+ struct of_phandle_args tc_args;
+
+ ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
+ sizeof(*ecc->tc_list), GFP_KERNEL);
+ if (!ecc->tc_list)
+ return -ENOMEM;
+
+ for (i = 0;; i++) {
+ ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
+ 1, i, &tc_args);
+ if (ret || i == ecc->num_tc)
+ break;
+
+ ecc->tc_list[i].node = tc_args.np;
+ ecc->tc_list[i].id = i;
+ queue_priority_mapping[i][1] = tc_args.args[0];
+ if (queue_priority_mapping[i][1] > lowest_priority) {
+ lowest_priority = queue_priority_mapping[i][1];
+ info->default_queue = i;
+ }
+ }
+ }
+
+ /* Event queue priority mapping */
+ for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+ edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
+ queue_priority_mapping[i][1]);
+
+ for (i = 0; i < ecc->num_region; i++) {
+ edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
+ edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
+ edma_write_array(ecc, EDMA_QRAE, i, 0x0);
+ }
+ ecc->info = info;
- edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
+ /* Init the dma device and channels */
+ edma_dma_init(ecc, legacy_mode);
- edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
+ for (i = 0; i < ecc->num_channels; i++) {
+ /* Assign all channels to the default queue */
+ edma_assign_channel_eventq(&ecc->slave_chans[i],
+ info->default_queue);
+ /* Set entry slot to the dummy slot */
+ edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
+ }
ret = dma_async_device_register(&ecc->dma_slave);
- if (ret)
+ if (ret) {
+ dev_err(dev, "slave ddev registration failed (%d)\n", ret);
goto err_reg1;
+ }
- platform_set_drvdata(pdev, ecc);
+ if (ecc->dma_memcpy) {
+ ret = dma_async_device_register(ecc->dma_memcpy);
+ if (ret) {
+ dev_err(dev, "memcpy ddev registration failed (%d)\n",
+ ret);
+ dma_async_device_unregister(&ecc->dma_slave);
+ goto err_reg1;
+ }
+ }
+
+ if (node)
+ of_dma_controller_register(node, of_edma_xlate, ecc);
- dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
+ dev_info(dev, "TI EDMA DMA engine driver\n");
return 0;
err_reg1:
- edma_free_slot(ecc->dummy_slot);
+ edma_free_slot(ecc, ecc->dummy_slot);
return ret;
}
@@ -1056,33 +2330,112 @@ static int edma_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct edma_cc *ecc = dev_get_drvdata(dev);
+ if (dev->of_node)
+ of_dma_controller_free(dev->of_node);
dma_async_device_unregister(&ecc->dma_slave);
- edma_free_slot(ecc->dummy_slot);
+ if (ecc->dma_memcpy)
+ dma_async_device_unregister(ecc->dma_memcpy);
+ edma_free_slot(ecc, ecc->dummy_slot);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int edma_pm_suspend(struct device *dev)
+{
+ struct edma_cc *ecc = dev_get_drvdata(dev);
+ struct edma_chan *echan = ecc->slave_chans;
+ int i;
+
+ for (i = 0; i < ecc->num_channels; i++) {
+ if (echan[i].alloced) {
+ edma_setup_interrupt(&echan[i], false);
+ edma_tc_set_pm_state(echan[i].tc, false);
+ }
+ }
+
+ return 0;
+}
+
+static int edma_pm_resume(struct device *dev)
+{
+ struct edma_cc *ecc = dev_get_drvdata(dev);
+ struct edma_chan *echan = ecc->slave_chans;
+ int i;
+ s8 (*queue_priority_mapping)[2];
+
+ queue_priority_mapping = ecc->info->queue_priority_mapping;
+
+ /* Event queue priority mapping */
+ for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+ edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
+ queue_priority_mapping[i][1]);
+
+ for (i = 0; i < ecc->num_channels; i++) {
+ if (echan[i].alloced) {
+ /* ensure access through shadow region 0 */
+ edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
+ BIT(i & 0x1f));
+
+ edma_setup_interrupt(&echan[i], true);
+
+ /* Set up channel -> slot mapping for the entry slot */
+ edma_set_chmap(&echan[i], echan[i].slot[0]);
+
+ edma_tc_set_pm_state(echan[i].tc, true);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops edma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
+};
+
static struct platform_driver edma_driver = {
.probe = edma_probe,
.remove = edma_remove,
.driver = {
- .name = "edma-dma-engine",
+ .name = "edma",
+ .pm = &edma_pm_ops,
+ .of_match_table = edma_of_ids,
+ },
+};
+
+static struct platform_driver edma_tptc_driver = {
+ .driver = {
+ .name = "edma3-tptc",
+ .of_match_table = edma_tptc_of_ids,
},
};
bool edma_filter_fn(struct dma_chan *chan, void *param)
{
+ bool match = false;
+
if (chan->device->dev->driver == &edma_driver.driver) {
struct edma_chan *echan = to_edma_chan(chan);
unsigned ch_req = *(unsigned *)param;
- return ch_req == echan->ch_num;
+ if (ch_req == echan->ch_num) {
+ /* The channel is going to be used as HW synchronized */
+ echan->hw_triggered = true;
+ match = true;
+ }
}
- return false;
+ return match;
}
EXPORT_SYMBOL(edma_filter_fn);
static int edma_init(void)
{
+ int ret;
+
+ ret = platform_driver_register(&edma_tptc_driver);
+ if (ret)
+ return ret;
+
return platform_driver_register(&edma_driver);
}
subsys_initcall(edma_init);
@@ -1090,6 +2443,7 @@ subsys_initcall(edma_init);
static void __exit edma_exit(void)
{
platform_driver_unregister(&edma_driver);
+ platform_driver_unregister(&edma_tptc_driver);
}
module_exit(edma_exit);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 300f821f1890..2209f75fdf05 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1512,6 +1512,7 @@ static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,elo-dma", },
{}
};
+MODULE_DEVICE_TABLE(of, fsldma_of_ids);
static struct platform_driver fsldma_of_driver = {
.driver = {
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 48d6d9e94f67..7d56b47e4fcf 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -65,9 +65,6 @@ static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
u32 cfglo = 0;
- /* Enforce FIFO drain when channel is suspended */
- cfglo |= IDMA64C_CFGL_CH_DRAIN;
-
/* Set default burst alignment */
cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
@@ -257,15 +254,15 @@ static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
dar = config->dst_addr;
ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
IDMA64C_CTLL_FC_M2P;
- src_width = min_t(u32, 2, __fls(sar | hw->len));
- dst_width = __fls(config->dst_addr_width);
+ src_width = __ffs(sar | hw->len | 4);
+ dst_width = __ffs(config->dst_addr_width);
} else { /* DMA_DEV_TO_MEM */
sar = config->src_addr;
dar = hw->phys;
ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
IDMA64C_CTLL_FC_P2M;
- src_width = __fls(config->src_addr_width);
- dst_width = min_t(u32, 2, __fls(dar | hw->len));
+ src_width = __ffs(config->src_addr_width);
+ dst_width = __ffs(dar | hw->len | 4);
}
lli->sar = sar;
@@ -428,12 +425,17 @@ static int idma64_slave_config(struct dma_chan *chan,
return 0;
}
-static void idma64_chan_deactivate(struct idma64_chan *idma64c)
+static void idma64_chan_deactivate(struct idma64_chan *idma64c, bool drain)
{
unsigned short count = 100;
u32 cfglo;
cfglo = channel_readl(idma64c, CFG_LO);
+ if (drain)
+ cfglo |= IDMA64C_CFGL_CH_DRAIN;
+ else
+ cfglo &= ~IDMA64C_CFGL_CH_DRAIN;
+
channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
do {
udelay(1);
@@ -456,7 +458,7 @@ static int idma64_pause(struct dma_chan *chan)
spin_lock_irqsave(&idma64c->vchan.lock, flags);
if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
- idma64_chan_deactivate(idma64c);
+ idma64_chan_deactivate(idma64c, false);
idma64c->desc->status = DMA_PAUSED;
}
spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
@@ -486,7 +488,7 @@ static int idma64_terminate_all(struct dma_chan *chan)
LIST_HEAD(head);
spin_lock_irqsave(&idma64c->vchan.lock, flags);
- idma64_chan_deactivate(idma64c);
+ idma64_chan_deactivate(idma64c, true);
idma64_stop_transfer(idma64c);
if (idma64c->desc) {
idma64_vdesc_free(&idma64c->desc->vdesc);
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
index a4d99685a7c4..f6aeff0af8a5 100644
--- a/drivers/dma/idma64.h
+++ b/drivers/dma/idma64.h
@@ -16,6 +16,8 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
#include "virt-dma.h"
/* Channel registers */
@@ -166,19 +168,13 @@ static inline void idma64c_writel(struct idma64_chan *idma64c, int offset,
static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset)
{
- u64 l, h;
-
- l = idma64c_readl(idma64c, offset);
- h = idma64c_readl(idma64c, offset + 4);
-
- return l | (h << 32);
+ return lo_hi_readq(idma64c->regs + offset);
}
static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset,
u64 value)
{
- idma64c_writel(idma64c, offset, value);
- idma64c_writel(idma64c, offset + 4, value >> 32);
+ lo_hi_writeq(value, idma64c->regs + offset);
}
#define channel_readq(idma64c, reg) \
@@ -217,7 +213,7 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
idma64_writel(idma64, IDMA64_##reg, (value))
/**
- * struct idma64_chip - representation of DesignWare DMA controller hardware
+ * struct idma64_chip - representation of iDMA 64-bit controller hardware
* @dev: struct device of the DMA controller
* @irq: irq line
* @regs: memory mapped I/O space
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 9d375bc7590a..7058d58ba588 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1478,7 +1478,7 @@ static int __init sdma_event_remap(struct sdma_engine *sdma)
event_remap = of_find_property(np, propname, NULL);
num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
if (!num_map) {
- dev_warn(sdma->dev, "no event needs to be remapped\n");
+ dev_dbg(sdma->dev, "no event needs to be remapped\n");
goto out;
} else if (num_map % EVENT_REMAP_CELLS) {
dev_err(sdma->dev, "the property %s must modulo %d\n",
@@ -1826,8 +1826,6 @@ static int sdma_probe(struct platform_device *pdev)
of_node_put(spba_bus);
}
- dev_info(sdma->dev, "initialized\n");
-
return 0;
err_register:
@@ -1852,7 +1850,6 @@ static int sdma_remove(struct platform_device *pdev)
}
platform_set_drvdata(pdev, NULL);
- dev_info(&pdev->dev, "Removed...\n");
return 0;
}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index f66b7e640610..1d5df2ef148b 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -197,7 +197,8 @@ static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
{
spin_lock_bh(&ioat_chan->prep_lock);
- __ioat_start_null_desc(ioat_chan);
+ if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+ __ioat_start_null_desc(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 1bc084986646..8f4e607d5817 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -82,8 +82,9 @@ struct ioatdma_device {
struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
struct dma_device dma_dev;
u8 version;
- struct msix_entry msix_entries[4];
- struct ioatdma_chan *idx[4];
+#define IOAT_MAX_CHANS 4
+ struct msix_entry msix_entries[IOAT_MAX_CHANS];
+ struct ioatdma_chan *idx[IOAT_MAX_CHANS];
struct dca_provider *dca;
enum ioat_irq_mode irq_mode;
u32 cap;
@@ -95,6 +96,7 @@ struct ioatdma_chan {
dma_addr_t last_completion;
spinlock_t cleanup_lock;
unsigned long state;
+ #define IOAT_CHAN_DOWN 0
#define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 1c3c9b0abf4e..4ef0c5e07912 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -27,6 +27,7 @@
#include <linux/workqueue.h>
#include <linux/prefetch.h>
#include <linux/dca.h>
+#include <linux/aer.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
@@ -1186,13 +1187,116 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
return 0;
}
+static void ioat_shutdown(struct pci_dev *pdev)
+{
+ struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
+ struct ioatdma_chan *ioat_chan;
+ int i;
+
+ if (!ioat_dma)
+ return;
+
+ for (i = 0; i < IOAT_MAX_CHANS; i++) {
+ ioat_chan = ioat_dma->idx[i];
+ if (!ioat_chan)
+ continue;
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ del_timer_sync(&ioat_chan->timer);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ /* this should quiesce then reset */
+ ioat_reset_hw(ioat_chan);
+ }
+
+ ioat_disable_interrupts(ioat_dma);
+}
+
+void ioat_resume(struct ioatdma_device *ioat_dma)
+{
+ struct ioatdma_chan *ioat_chan;
+ u32 chanerr;
+ int i;
+
+ for (i = 0; i < IOAT_MAX_CHANS; i++) {
+ ioat_chan = ioat_dma->idx[i];
+ if (!ioat_chan)
+ continue;
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ /* no need to reset as shutdown already did that */
+ }
+}
+
#define DRV_NAME "ioatdma"
+static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state error)
+{
+ dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
+
+ /* quiesce and block I/O */
+ ioat_shutdown(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
+{
+ pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
+ int err;
+
+ dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
+
+ if (pci_enable_device_mem(pdev) < 0) {
+ dev_err(&pdev->dev,
+ "Failed to enable PCIe device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_wake_from_d3(pdev, false);
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "AER uncorrect error status clear failed: %#x\n", err);
+ }
+
+ return result;
+}
+
+static void ioat_pcie_error_resume(struct pci_dev *pdev)
+{
+ struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
+
+ /* initialize and bring everything back */
+ ioat_resume(ioat_dma);
+}
+
+static const struct pci_error_handlers ioat_err_handler = {
+ .error_detected = ioat_pcie_error_detected,
+ .slot_reset = ioat_pcie_error_slot_reset,
+ .resume = ioat_pcie_error_resume,
+};
+
static struct pci_driver ioat_pci_driver = {
.name = DRV_NAME,
.id_table = ioat_pci_tbl,
.probe = ioat_pci_probe,
.remove = ioat_remove,
+ .shutdown = ioat_shutdown,
+ .err_handler = &ioat_err_handler,
};
static struct ioatdma_device *
@@ -1245,13 +1349,17 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, device);
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
- if (device->version >= IOAT_VER_3_0)
+ if (device->version >= IOAT_VER_3_0) {
err = ioat3_dma_probe(device, ioat_dca_enabled);
- else
+
+ if (device->version >= IOAT_VER_3_3)
+ pci_enable_pcie_error_reporting(pdev);
+ } else
return -ENODEV;
if (err) {
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+ pci_disable_pcie_error_reporting(pdev);
return -ENODEV;
}
@@ -1271,6 +1379,8 @@ static void ioat_remove(struct pci_dev *pdev)
free_dca_provider(device->dca);
device->dca = NULL;
}
+
+ pci_disable_pcie_error_reporting(pdev);
ioat_dma_remove(device);
}
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c
index ad4fb41cd23b..6bb4a13a8fbd 100644
--- a/drivers/dma/ioat/prep.c
+++ b/drivers/dma/ioat/prep.c
@@ -121,6 +121,9 @@ ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
size_t total_len = len;
int num_descs, idx, i;
+ if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+ return NULL;
+
num_descs = ioat_xferlen_to_descs(ioat_chan, len);
if (likely(num_descs) &&
ioat_check_space_lock(ioat_chan, num_descs) == 0)
@@ -254,6 +257,11 @@ struct dma_async_tx_descriptor *
ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+ if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+ return NULL;
+
return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
}
@@ -262,6 +270,11 @@ ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+ if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+ return NULL;
+
/* the cleanup routine only sets bits on validate failure, it
* does not clear bits on validate success... so clear it here
*/
@@ -574,6 +587,11 @@ ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+ if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+ return NULL;
+
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1];
@@ -614,6 +632,11 @@ ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+ if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+ return NULL;
+
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1];
@@ -638,6 +661,10 @@ ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
{
unsigned char scf[MAX_SCF];
dma_addr_t pq[2];
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+ if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+ return NULL;
if (src_cnt > MAX_SCF)
return NULL;
@@ -661,6 +688,10 @@ ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
{
unsigned char scf[MAX_SCF];
dma_addr_t pq[2];
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+
+ if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+ return NULL;
if (src_cnt > MAX_SCF)
return NULL;
@@ -689,6 +720,9 @@ ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
+ if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
+ return NULL;
+
if (ioat_check_space_lock(ioat_chan, 1) == 0)
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
else
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index b4634109e010..631c4435e075 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -652,6 +652,7 @@ static const struct of_device_id moxart_dma_match[] = {
{ .compatible = "moxa,moxart-dma" },
{ }
};
+MODULE_DEVICE_TABLE(of, moxart_dma_match);
static struct platform_driver moxart_driver = {
.probe = moxart_probe,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index e6281e7aa46e..aae76fb39adc 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1073,6 +1073,7 @@ static const struct of_device_id mpc_dma_match[] = {
{ .compatible = "fsl,mpc8308-dma", },
{},
};
+MODULE_DEVICE_TABLE(of, mpc_dma_match);
static struct platform_driver mpc_dma_driver = {
.probe = mpc_dma_probe,
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 249445c8a4c6..1dfc71c90123 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -935,8 +935,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
else
d->ccr |= CCR_SYNC_ELEMENT;
- if (dir == DMA_DEV_TO_MEM)
+ if (dir == DMA_DEV_TO_MEM) {
d->ccr |= CCR_TRIGGER_SRC;
+ d->csdp |= CSDP_DST_PACKED;
+ } else {
+ d->csdp |= CSDP_SRC_PACKED;
+ }
d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 7d5598d874e1..22ea2419ee56 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -1149,6 +1149,7 @@ static const struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,},
{},
};
+MODULE_DEVICE_TABLE(of, sirfsoc_dma_match);
static struct platform_driver sirfsoc_dma_driver = {
.probe = sirfsoc_dma_probe,
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 750d1b313684..dd3e7ba273ad 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2907,7 +2907,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
if (err) {
d40_err(base->dev,
- "Failed to regsiter memcpy only channels\n");
+ "Failed to register memcpy only channels\n");
goto failure2;
}
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 73e0be6e2100..2db12e493c53 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -908,6 +908,7 @@ static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sun6i_dma_match);
static int sun6i_dma_probe(struct platform_device *pdev)
{
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 5cce8c9d0026..a415edbe61b1 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -17,13 +17,184 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
-#define TI_XBAR_OUTPUTS 127
-#define TI_XBAR_INPUTS 256
+#define TI_XBAR_DRA7 0
+#define TI_XBAR_AM335X 1
+
+static const struct of_device_id ti_dma_xbar_match[] = {
+ {
+ .compatible = "ti,dra7-dma-crossbar",
+ .data = (void *)TI_XBAR_DRA7,
+ },
+ {
+ .compatible = "ti,am335x-edma-crossbar",
+ .data = (void *)TI_XBAR_AM335X,
+ },
+ {},
+};
+
+/* Crossbar on AM335x/AM437x family */
+#define TI_AM335X_XBAR_LINES 64
+
+struct ti_am335x_xbar_data {
+ void __iomem *iomem;
+
+ struct dma_router dmarouter;
+
+ u32 xbar_events; /* maximum number of events to select in xbar */
+ u32 dma_requests; /* number of DMA requests on eDMA */
+};
+
+struct ti_am335x_xbar_map {
+ u16 dma_line;
+ u16 mux_val;
+};
+
+static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
+{
+ writeb_relaxed(val & 0x1f, iomem + event);
+}
+
+static void ti_am335x_xbar_free(struct device *dev, void *route_data)
+{
+ struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
+ struct ti_am335x_xbar_map *map = route_data;
+
+ dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
+ map->mux_val, map->dma_line);
+
+ ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
+ kfree(map);
+}
+
+static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
+ struct ti_am335x_xbar_map *map;
+
+ if (dma_spec->args_count != 3)
+ return ERR_PTR(-EINVAL);
+
+ if (dma_spec->args[2] >= xbar->xbar_events) {
+ dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
+ dma_spec->args[2]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dma_spec->args[0] >= xbar->dma_requests) {
+ dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
+ dma_spec->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The of_node_put() will be done in the core for the node */
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "Can't get DMA master\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map) {
+ of_node_put(dma_spec->np);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ map->dma_line = (u16)dma_spec->args[0];
+ map->mux_val = (u16)dma_spec->args[2];
+
+ dma_spec->args[2] = 0;
+ dma_spec->args_count = 2;
+
+ dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
+ map->mux_val, map->dma_line);
+
+ ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
+
+ return map;
+}
+
+static const struct of_device_id ti_am335x_master_match[] = {
+ { .compatible = "ti,edma3-tpcc", },
+ {},
+};
+
+static int ti_am335x_xbar_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct device_node *dma_node;
+ struct ti_am335x_xbar_data *xbar;
+ struct resource *res;
+ void __iomem *iomem;
+ int i, ret;
+
+ if (!node)
+ return -ENODEV;
+
+ xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
+ if (!xbar)
+ return -ENOMEM;
+
+ dma_node = of_parse_phandle(node, "dma-masters", 0);
+ if (!dma_node) {
+ dev_err(&pdev->dev, "Can't get DMA master node\n");
+ return -ENODEV;
+ }
+
+ match = of_match_node(ti_am335x_master_match, dma_node);
+ if (!match) {
+ dev_err(&pdev->dev, "DMA master is not supported\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dma_node, "dma-requests",
+ &xbar->dma_requests)) {
+ dev_info(&pdev->dev,
+ "Missing XBAR output information, using %u.\n",
+ TI_AM335X_XBAR_LINES);
+ xbar->dma_requests = TI_AM335X_XBAR_LINES;
+ }
+ of_node_put(dma_node);
+
+ if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
+ dev_info(&pdev->dev,
+ "Missing XBAR input information, using %u.\n",
+ TI_AM335X_XBAR_LINES);
+ xbar->xbar_events = TI_AM335X_XBAR_LINES;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iomem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+
+ xbar->iomem = iomem;
+
+ xbar->dmarouter.dev = &pdev->dev;
+ xbar->dmarouter.route_free = ti_am335x_xbar_free;
+
+ platform_set_drvdata(pdev, xbar);
+
+ /* Reset the crossbar */
+ for (i = 0; i < xbar->dma_requests; i++)
+ ti_am335x_xbar_write(xbar->iomem, i, 0);
+
+ ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
+ &xbar->dmarouter);
+
+ return ret;
+}
+
+/* Crossbar on DRA7xx family */
+#define TI_DRA7_XBAR_OUTPUTS 127
+#define TI_DRA7_XBAR_INPUTS 256
#define TI_XBAR_EDMA_OFFSET 0
#define TI_XBAR_SDMA_OFFSET 1
-struct ti_dma_xbar_data {
+struct ti_dra7_xbar_data {
void __iomem *iomem;
struct dma_router dmarouter;
@@ -35,35 +206,35 @@ struct ti_dma_xbar_data {
u32 dma_offset;
};
-struct ti_dma_xbar_map {
+struct ti_dra7_xbar_map {
u16 xbar_in;
int xbar_out;
};
-static inline void ti_dma_xbar_write(void __iomem *iomem, int xbar, u16 val)
+static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
{
writew_relaxed(val, iomem + (xbar * 2));
}
-static void ti_dma_xbar_free(struct device *dev, void *route_data)
+static void ti_dra7_xbar_free(struct device *dev, void *route_data)
{
- struct ti_dma_xbar_data *xbar = dev_get_drvdata(dev);
- struct ti_dma_xbar_map *map = route_data;
+ struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
+ struct ti_dra7_xbar_map *map = route_data;
dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
map->xbar_in, map->xbar_out);
- ti_dma_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
+ ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
idr_remove(&xbar->map_idr, map->xbar_out);
kfree(map);
}
-static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
+static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
{
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
- struct ti_dma_xbar_data *xbar = platform_get_drvdata(pdev);
- struct ti_dma_xbar_map *map;
+ struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
+ struct ti_dra7_xbar_map *map;
if (dma_spec->args[0] >= xbar->xbar_requests) {
dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
@@ -93,12 +264,12 @@ static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec,
dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
map->xbar_in, map->xbar_out);
- ti_dma_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
+ ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
return map;
}
-static const struct of_device_id ti_dma_master_match[] = {
+static const struct of_device_id ti_dra7_master_match[] = {
{
.compatible = "ti,omap4430-sdma",
.data = (void *)TI_XBAR_SDMA_OFFSET,
@@ -110,12 +281,12 @@ static const struct of_device_id ti_dma_master_match[] = {
{},
};
-static int ti_dma_xbar_probe(struct platform_device *pdev)
+static int ti_dra7_xbar_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *match;
struct device_node *dma_node;
- struct ti_dma_xbar_data *xbar;
+ struct ti_dra7_xbar_data *xbar;
struct resource *res;
u32 safe_val;
void __iomem *iomem;
@@ -136,7 +307,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
return -ENODEV;
}
- match = of_match_node(ti_dma_master_match, dma_node);
+ match = of_match_node(ti_dra7_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
return -EINVAL;
@@ -146,16 +317,16 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
&xbar->dma_requests)) {
dev_info(&pdev->dev,
"Missing XBAR output information, using %u.\n",
- TI_XBAR_OUTPUTS);
- xbar->dma_requests = TI_XBAR_OUTPUTS;
+ TI_DRA7_XBAR_OUTPUTS);
+ xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
}
of_node_put(dma_node);
if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
dev_info(&pdev->dev,
"Missing XBAR input information, using %u.\n",
- TI_XBAR_INPUTS);
- xbar->xbar_requests = TI_XBAR_INPUTS;
+ TI_DRA7_XBAR_INPUTS);
+ xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
}
if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
@@ -169,30 +340,50 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
xbar->iomem = iomem;
xbar->dmarouter.dev = &pdev->dev;
- xbar->dmarouter.route_free = ti_dma_xbar_free;
+ xbar->dmarouter.route_free = ti_dra7_xbar_free;
xbar->dma_offset = (u32)match->data;
platform_set_drvdata(pdev, xbar);
/* Reset the crossbar */
for (i = 0; i < xbar->dma_requests; i++)
- ti_dma_xbar_write(xbar->iomem, i, xbar->safe_val);
+ ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
- ret = of_dma_router_register(node, ti_dma_xbar_route_allocate,
+ ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
&xbar->dmarouter);
if (ret) {
/* Restore the defaults for the crossbar */
for (i = 0; i < xbar->dma_requests; i++)
- ti_dma_xbar_write(xbar->iomem, i, i);
+ ti_dra7_xbar_write(xbar->iomem, i, i);
}
return ret;
}
-static const struct of_device_id ti_dma_xbar_match[] = {
- { .compatible = "ti,dra7-dma-crossbar" },
- {},
-};
+static int ti_dma_xbar_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ int ret;
+
+ match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
+ if (unlikely(!match))
+ return -EINVAL;
+
+ switch ((u32)match->data) {
+ case TI_XBAR_DRA7:
+ ret = ti_dra7_xbar_probe(pdev);
+ break;
+ case TI_XBAR_AM335X:
+ ret = ti_am335x_xbar_probe(pdev);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported crossbar\n");
+ ret = -ENODEV;
+ break;
+ }
+
+ return ret;
+}
static struct platform_driver ti_dma_xbar_driver = {
.driver = {
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 181b95267866..2fa47745a41f 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -47,9 +47,9 @@ struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
/**
* vchan_tx_prep - prepare a descriptor
- * vc: virtual channel allocating this descriptor
- * vd: virtual descriptor to prepare
- * tx_flags: flags argument passed in to prepare function
+ * @vc: virtual channel allocating this descriptor
+ * @vd: virtual descriptor to prepare
+ * @tx_flags: flags argument passed in to prepare function
*/
static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
struct virt_dma_desc *vd, unsigned long tx_flags)
@@ -65,7 +65,7 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
/**
* vchan_issue_pending - move submitted descriptors to issued list
- * vc: virtual channel to update
+ * @vc: virtual channel to update
*
* vc.lock must be held by caller
*/
@@ -77,7 +77,7 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
/**
* vchan_cookie_complete - report completion of a descriptor
- * vd: virtual descriptor to update
+ * @vd: virtual descriptor to update
*
* vc.lock must be held by caller
*/
@@ -97,7 +97,7 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
/**
* vchan_cyclic_callback - report the completion of a period
- * vd: virtual descriptor
+ * @vd: virtual descriptor
*/
static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
{
@@ -109,7 +109,7 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
/**
* vchan_next_desc - peek at the next descriptor to be processed
- * vc: virtual channel to obtain descriptor from
+ * @vc: virtual channel to obtain descriptor from
*
* vc.lock must be held by caller
*/
@@ -123,8 +123,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
/**
* vchan_get_all_descriptors - obtain all submitted and issued descriptors
- * vc: virtual channel to get descriptors from
- * head: list of descriptors found
+ * @vc: virtual channel to get descriptors from
+ * @head: list of descriptors found
*
* vc.lock must be held by caller
*
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 8d57b1b12e41..9dfa2b0fa5da 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -547,14 +547,12 @@ static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
struct xgene_dma_desc_sw *desc;
dma_addr_t phys;
- desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys);
+ desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
if (!desc) {
chan_err(chan, "Failed to allocate LDs\n");
return NULL;
}
- memset(desc, 0, sizeof(*desc));
-
INIT_LIST_HEAD(&desc->tx_list);
desc->tx.phys = phys;
desc->tx.tx_submit = xgene_dma_tx_submit;
@@ -894,60 +892,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
chan->desc_pool = NULL;
}
-static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy(
- struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct xgene_dma_desc_sw *first = NULL, *new;
- struct xgene_dma_chan *chan;
- size_t copy;
-
- if (unlikely(!dchan || !len))
- return NULL;
-
- chan = to_dma_chan(dchan);
-
- do {
- /* Allocate the link descriptor from DMA pool */
- new = xgene_dma_alloc_descriptor(chan);
- if (!new)
- goto fail;
-
- /* Create the largest transaction possible */
- copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
-
- /* Prepare DMA descriptor */
- xgene_dma_prep_cpy_desc(chan, new, dst, src, copy);
-
- if (!first)
- first = new;
-
- new->tx.cookie = 0;
- async_tx_ack(&new->tx);
-
- /* Update metadata */
- len -= copy;
- dst += copy;
- src += copy;
-
- /* Insert the link descriptor to the LD ring */
- list_add_tail(&new->node, &first->tx_list);
- } while (len);
-
- new->tx.flags = flags; /* client is in control of this ack */
- new->tx.cookie = -EBUSY;
- list_splice(&first->tx_list, &new->tx_list);
-
- return &new->tx;
-
-fail:
- if (!first)
- return NULL;
-
- xgene_dma_free_desc_list(chan, &first->tx_list);
- return NULL;
-}
-
static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
struct dma_chan *dchan, struct scatterlist *dst_sg,
u32 dst_nents, struct scatterlist *src_sg,
@@ -1707,7 +1651,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
dma_cap_zero(dma_dev->cap_mask);
/* Set DMA device capability */
- dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_SG, dma_dev->cap_mask);
/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
@@ -1734,7 +1677,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
dma_dev->device_issue_pending = xgene_dma_issue_pending;
dma_dev->device_tx_status = xgene_dma_tx_status;
- dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy;
dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
@@ -1787,8 +1729,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
/* DMA capability info */
dev_info(pdma->dev,
- "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan),
- dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "",
+ "%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan),
dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index d8434d465885..6f4b5017ca3b 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1349,6 +1349,7 @@ static const struct of_device_id xilinx_vdma_of_ids[] = {
{ .compatible = "xlnx,axi-vdma-1.00.a",},
{}
};
+MODULE_DEVICE_TABLE(of, xilinx_vdma_of_ids);
static struct platform_driver xilinx_vdma_driver = {
.driver = {
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index c017fcd8e07c..245d759d5ffc 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -441,7 +441,7 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
kfree(ds);
return NULL;
}
- memset(ds->desc_hw, sizeof(struct zx_desc_hw) * num, 0);
+ memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num);
ds->desc_num = num;
return ds;
}
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index 98ba7525929e..36112cdd665a 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -34,7 +34,7 @@ struct of_dma_filter_info {
dma_filter_fn filter_fn;
};
-#ifdef CONFIG_OF
+#ifdef CONFIG_DMA_OF
extern int of_dma_controller_register(struct device_node *np,
struct dma_chan *(*of_dma_xlate)
(struct of_phandle_args *, struct of_dma *),
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 87ac14c584f2..03b6095d3b18 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -37,6 +37,7 @@ struct dw_dma_slave {
* @nr_channels: Number of channels supported by hardware (max 8)
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
+ * @is_memcpy: The device channels do support memory-to-memory transfers.
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
@@ -47,6 +48,7 @@ struct dw_dma_slave {
struct dw_dma_platform_data {
unsigned int nr_channels;
bool is_private;
+ bool is_memcpy;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index bdb2710e2aab..e2878baeb90e 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -41,51 +41,6 @@
#ifndef EDMA_H_
#define EDMA_H_
-/* PaRAM slots are laid out like this */
-struct edmacc_param {
- u32 opt;
- u32 src;
- u32 a_b_cnt;
- u32 dst;
- u32 src_dst_bidx;
- u32 link_bcntrld;
- u32 src_dst_cidx;
- u32 ccnt;
-} __packed;
-
-/* fields in edmacc_param.opt */
-#define SAM BIT(0)
-#define DAM BIT(1)
-#define SYNCDIM BIT(2)
-#define STATIC BIT(3)
-#define EDMA_FWID (0x07 << 8)
-#define TCCMODE BIT(11)
-#define EDMA_TCC(t) ((t) << 12)
-#define TCINTEN BIT(20)
-#define ITCINTEN BIT(21)
-#define TCCHEN BIT(22)
-#define ITCCHEN BIT(23)
-
-/*ch_status paramater of callback function possible values*/
-#define EDMA_DMA_COMPLETE 1
-#define EDMA_DMA_CC_ERROR 2
-#define EDMA_DMA_TC1_ERROR 3
-#define EDMA_DMA_TC2_ERROR 4
-
-enum address_mode {
- INCR = 0,
- FIFO = 1
-};
-
-enum fifo_width {
- W8BIT = 0,
- W16BIT = 1,
- W32BIT = 2,
- W64BIT = 3,
- W128BIT = 4,
- W256BIT = 5
-};
-
enum dma_event_q {
EVENTQ_0 = 0,
EVENTQ_1 = 1,
@@ -94,64 +49,10 @@ enum dma_event_q {
EVENTQ_DEFAULT = -1
};
-enum sync_dimension {
- ASYNC = 0,
- ABSYNC = 1
-};
-
#define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan))
#define EDMA_CTLR(i) ((i) >> 16)
#define EDMA_CHAN_SLOT(i) ((i) & 0xffff)
-#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
-#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
-#define EDMA_CONT_PARAMS_ANY 1001
-#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
-#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
-
-#define EDMA_MAX_CC 2
-
-/* alloc/free DMA channels and their dedicated parameter RAM slots */
-int edma_alloc_channel(int channel,
- void (*callback)(unsigned channel, u16 ch_status, void *data),
- void *data, enum dma_event_q);
-void edma_free_channel(unsigned channel);
-
-/* alloc/free parameter RAM slots */
-int edma_alloc_slot(unsigned ctlr, int slot);
-void edma_free_slot(unsigned slot);
-
-/* alloc/free a set of contiguous parameter RAM slots */
-int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count);
-int edma_free_cont_slots(unsigned slot, int count);
-
-/* calls that operate on part of a parameter RAM slot */
-void edma_set_src(unsigned slot, dma_addr_t src_port,
- enum address_mode mode, enum fifo_width);
-void edma_set_dest(unsigned slot, dma_addr_t dest_port,
- enum address_mode mode, enum fifo_width);
-dma_addr_t edma_get_position(unsigned slot, bool dst);
-void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx);
-void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx);
-void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt,
- u16 bcnt_rld, enum sync_dimension sync_mode);
-void edma_link(unsigned from, unsigned to);
-void edma_unlink(unsigned from);
-
-/* calls that operate on an entire parameter RAM slot */
-void edma_write_slot(unsigned slot, const struct edmacc_param *params);
-void edma_read_slot(unsigned slot, struct edmacc_param *params);
-
-/* channel control operations */
-int edma_start(unsigned channel);
-void edma_stop(unsigned channel);
-void edma_clean_channel(unsigned channel);
-void edma_clear_event(unsigned channel);
-void edma_pause(unsigned channel);
-void edma_resume(unsigned channel);
-
-void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no);
-
struct edma_rsv_info {
const s16 (*rsv_chans)[2];
@@ -170,10 +71,11 @@ struct edma_soc_info {
/* Resource reservation for other cores */
struct edma_rsv_info *rsv;
+ /* List of channels allocated for memcpy, terminated with -1 */
+ s16 *memcpy_channels;
+
s8 (*queue_priority_mapping)[2];
const s16 (*xbar_chans)[2];
};
-int edma_trigger_channel(unsigned);
-
#endif