summaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/atmel-quadspi.c6
-rw-r--r--drivers/spi/spi-at91-usart.c221
-rw-r--r--drivers/spi/spi-bcm2835.c328
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-ep93xx.c1
-rw-r--r--drivers/spi/spi-mt7621.c7
-rw-r--r--drivers/spi/spi-pxa2xx.c15
-rw-r--r--drivers/spi/spi-rockchip.c5
-rw-r--r--drivers/spi/spi-rspi.c71
-rw-r--r--drivers/spi/spi-stm32.c5
-rw-r--r--drivers/spi/spi-tegra114.c175
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c6
-rw-r--r--drivers/spi/spi.c68
-rw-r--r--drivers/spi/spidev.c3
15 files changed, 695 insertions, 224 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0fba8f400c592..3ee152feee2bd 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -119,7 +119,7 @@ config SPI_AXI_SPI_ENGINE
config SPI_BCM2835
tristate "BCM2835 SPI controller"
depends on GPIOLIB
- depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
help
This selects a driver for the Broadcom BCM2835 SPI master.
@@ -130,7 +130,7 @@ config SPI_BCM2835
config SPI_BCM2835AUX
tristate "BCM2835 SPI auxiliary controller"
- depends on (ARCH_BCM2835 && GPIOLIB) || COMPILE_TEST
+ depends on ((ARCH_BCM2835 || ARCH_BRCMSTB) && GPIOLIB) || COMPILE_TEST
help
This selects a driver for the Broadcom BCM2835 SPI aux master.
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index e54109759d348..9f24d5f0b4312 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -570,7 +570,8 @@ static int atmel_qspi_remove(struct platform_device *pdev)
static int __maybe_unused atmel_qspi_suspend(struct device *dev)
{
- struct atmel_qspi *aq = dev_get_drvdata(dev);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
clk_disable_unprepare(aq->qspick);
clk_disable_unprepare(aq->pclk);
@@ -580,7 +581,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
static int __maybe_unused atmel_qspi_resume(struct device *dev)
{
- struct atmel_qspi *aq = dev_get_drvdata(dev);
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
clk_prepare_enable(aq->pclk);
clk_prepare_enable(aq->qspick);
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index f763e14bdf124..a40bb2ef89dc3 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -8,9 +8,12 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -59,6 +62,8 @@
#define US_INIT \
(US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
+#define US_DMA_MIN_BYTES 16
+#define US_DMA_TIMEOUT (msecs_to_jiffies(1000))
/* Register access macros */
#define at91_usart_spi_readl(port, reg) \
@@ -72,14 +77,19 @@
writeb_relaxed((value), (port)->regs + US_##reg)
struct at91_usart_spi {
+ struct platform_device *mpdev;
struct spi_transfer *current_transfer;
void __iomem *regs;
struct device *dev;
struct clk *clk;
+ struct completion xfer_completion;
+
/*used in interrupt to protect data reading*/
spinlock_t lock;
+ phys_addr_t phybase;
+
int irq;
unsigned int current_tx_remaining_bytes;
unsigned int current_rx_remaining_bytes;
@@ -88,8 +98,182 @@ struct at91_usart_spi {
u32 status;
bool xfer_failed;
+ bool use_dma;
};
+static void dma_callback(void *data)
+{
+ struct spi_controller *ctlr = data;
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
+ aus->current_rx_remaining_bytes = 0;
+ complete(&aus->xfer_completion);
+}
+
+static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+
+ return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
+}
+
+static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
+ struct at91_usart_spi *aus)
+{
+ struct dma_slave_config slave_config;
+ struct device *dev = &aus->mpdev->dev;
+ phys_addr_t phybase = aus->phybase;
+ dma_cap_mask_t mask;
+ int err = 0;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ ctlr->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
+ if (IS_ERR(ctlr->dma_tx)) {
+ err = PTR_ERR(ctlr->dma_tx);
+ goto at91_usart_spi_error_clear;
+ }
+
+ dev_dbg(dev,
+ "DMA TX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto at91_usart_spi_error_clear;
+ }
+
+ ctlr->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
+ if (IS_ERR(ctlr->dma_rx)) {
+ err = PTR_ERR(ctlr->dma_rx);
+ goto at91_usart_spi_error;
+ }
+
+ dev_dbg(dev,
+ "DMA RX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto at91_usart_spi_error;
+ }
+
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_addr = (dma_addr_t)phybase + US_THR;
+ slave_config.src_addr = (dma_addr_t)phybase + US_RHR;
+ slave_config.src_maxburst = 1;
+ slave_config.dst_maxburst = 1;
+ slave_config.device_fc = false;
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) {
+ dev_err(&ctlr->dev,
+ "failed to configure rx dma channel\n");
+ err = -EINVAL;
+ goto at91_usart_spi_error;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) {
+ dev_err(&ctlr->dev,
+ "failed to configure tx dma channel\n");
+ err = -EINVAL;
+ goto at91_usart_spi_error;
+ }
+
+ aus->use_dma = true;
+ return 0;
+
+at91_usart_spi_error:
+ if (!IS_ERR_OR_NULL(ctlr->dma_tx))
+ dma_release_channel(ctlr->dma_tx);
+ if (!IS_ERR_OR_NULL(ctlr->dma_rx))
+ dma_release_channel(ctlr->dma_rx);
+ ctlr->dma_tx = NULL;
+ ctlr->dma_rx = NULL;
+
+at91_usart_spi_error_clear:
+ return err;
+}
+
+static void at91_usart_spi_release_dma(struct spi_controller *ctlr)
+{
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+}
+
+static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
+{
+ if (ctlr->dma_rx)
+ dmaengine_terminate_all(ctlr->dma_rx);
+ if (ctlr->dma_tx)
+ dmaengine_terminate_all(ctlr->dma_tx);
+}
+
+static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct dma_chan *rxchan = ctlr->dma_rx;
+ struct dma_chan *txchan = ctlr->dma_tx;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ dma_cookie_t cookie;
+
+ /* Disable RX interrupt */
+ at91_usart_spi_writel(aus, IDR, US_IR_RXRDY);
+
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto at91_usart_spi_err_dma;
+
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!txdesc)
+ goto at91_usart_spi_err_dma;
+
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = ctlr;
+
+ cookie = rxdesc->tx_submit(rxdesc);
+ if (dma_submit_error(cookie))
+ goto at91_usart_spi_err_dma;
+
+ cookie = txdesc->tx_submit(txdesc);
+ if (dma_submit_error(cookie))
+ goto at91_usart_spi_err_dma;
+
+ rxchan->device->device_issue_pending(rxchan);
+ txchan->device->device_issue_pending(txchan);
+
+ return 0;
+
+at91_usart_spi_err_dma:
+ /* Enable RX interrupt if something fails and fallback to PIO */
+ at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
+ at91_usart_spi_stop_dma(ctlr);
+
+ return -ENOMEM;
+}
+
+static unsigned long at91_usart_spi_dma_timeout(struct at91_usart_spi *aus)
+{
+ return wait_for_completion_timeout(&aus->xfer_completion,
+ US_DMA_TIMEOUT);
+}
+
static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
{
return aus->status & US_IR_TXRDY;
@@ -216,6 +400,8 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ unsigned long dma_timeout = 0;
+ int ret = 0;
at91_usart_spi_set_xfer_speed(aus, xfer);
aus->xfer_failed = false;
@@ -225,8 +411,25 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
while ((aus->current_tx_remaining_bytes ||
aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
- at91_usart_spi_read_status(aus);
- at91_usart_spi_tx(aus);
+ reinit_completion(&aus->xfer_completion);
+ if (at91_usart_spi_can_dma(ctlr, spi, xfer) &&
+ !ret) {
+ ret = at91_usart_spi_dma_transfer(ctlr, xfer);
+ if (ret)
+ continue;
+
+ dma_timeout = at91_usart_spi_dma_timeout(aus);
+
+ if (WARN_ON(dma_timeout == 0)) {
+ dev_err(&spi->dev, "DMA transfer timeout\n");
+ return -EIO;
+ }
+ aus->current_tx_remaining_bytes = 0;
+ } else {
+ at91_usart_spi_read_status(aus);
+ at91_usart_spi_tx(aus);
+ }
+
cpu_relax();
}
@@ -345,6 +548,7 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
controller->transfer_one = at91_usart_spi_transfer_one;
controller->prepare_message = at91_usart_spi_prepare_message;
controller->unprepare_message = at91_usart_spi_unprepare_message;
+ controller->can_dma = at91_usart_spi_can_dma;
controller->cleanup = at91_usart_spi_cleanup;
controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
US_MIN_CLK_DIV);
@@ -376,7 +580,17 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
aus->spi_clk = clk_get_rate(clk);
at91_usart_spi_init(aus);
+ aus->phybase = regs->start;
+
+ aus->mpdev = to_platform_device(pdev->dev.parent);
+
+ ret = at91_usart_spi_configure_dma(controller, aus);
+ if (ret)
+ goto at91_usart_fail_dma;
+
spin_lock_init(&aus->lock);
+ init_completion(&aus->xfer_completion);
+
ret = devm_spi_register_master(&pdev->dev, controller);
if (ret)
goto at91_usart_fail_register_master;
@@ -389,6 +603,8 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
return 0;
at91_usart_fail_register_master:
+ at91_usart_spi_release_dma(controller);
+at91_usart_fail_dma:
clk_disable_unprepare(clk);
at91_usart_spi_probe_fail:
spi_master_put(controller);
@@ -453,6 +669,7 @@ static int at91_usart_spi_remove(struct platform_device *pdev)
struct spi_controller *ctlr = platform_get_drvdata(pdev);
struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ at91_usart_spi_release_dma(ctlr);
clk_disable_unprepare(aus->clk);
return 0;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 8aa22713c483c..9c03da7c18ddb 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -22,6 +22,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -73,14 +74,18 @@
#define BCM2835_SPI_FIFO_SIZE 64
#define BCM2835_SPI_FIFO_SIZE_3_4 48
-#define BCM2835_SPI_POLLING_LIMIT_US 30
-#define BCM2835_SPI_POLLING_JIFFIES 2
#define BCM2835_SPI_DMA_MIN_LENGTH 96
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
#define DRV_NAME "spi-bcm2835"
+/* define polling limits */
+unsigned int polling_limit_us = 30;
+module_param(polling_limit_us, uint, 0664);
+MODULE_PARM_DESC(polling_limit_us,
+ "time in us to run a transfer in polling mode\n");
+
/**
* struct bcm2835_spi - BCM2835 SPI controller
* @regs: base address of register map
@@ -97,6 +102,15 @@
* length is not a multiple of 4 (to overcome hardware limitation)
* @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
* @dma_pending: whether a DMA transfer is in progress
+ * @debugfs_dir: the debugfs directory - neede to remove debugfs when
+ * unloading the module
+ * @count_transfer_polling: count of how often polling mode is used
+ * @count_transfer_irq: count of how often interrupt mode is used
+ * @count_transfer_irq_after_polling: count of how often we fall back to
+ * interrupt mode after starting in polling mode.
+ * These are counted as well in @count_transfer_polling and
+ * @count_transfer_irq
+ * @count_transfer_dma: count how often dma mode is used
*/
struct bcm2835_spi {
void __iomem *regs;
@@ -111,8 +125,55 @@ struct bcm2835_spi {
int rx_prologue;
unsigned int tx_spillover;
unsigned int dma_pending;
+
+ struct dentry *debugfs_dir;
+ u64 count_transfer_polling;
+ u64 count_transfer_irq;
+ u64 count_transfer_irq_after_polling;
+ u64 count_transfer_dma;
};
+#if defined(CONFIG_DEBUG_FS)
+static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
+ const char *dname)
+{
+ char name[64];
+ struct dentry *dir;
+
+ /* get full name */
+ snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
+
+ /* the base directory */
+ dir = debugfs_create_dir(name, NULL);
+ bs->debugfs_dir = dir;
+
+ /* the counters */
+ debugfs_create_u64("count_transfer_polling", 0444, dir,
+ &bs->count_transfer_polling);
+ debugfs_create_u64("count_transfer_irq", 0444, dir,
+ &bs->count_transfer_irq);
+ debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
+ &bs->count_transfer_irq_after_polling);
+ debugfs_create_u64("count_transfer_dma", 0444, dir,
+ &bs->count_transfer_dma);
+}
+
+static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
+{
+ debugfs_remove_recursive(bs->debugfs_dir);
+ bs->debugfs_dir = NULL;
+}
+#else
+static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
+ const char *dname)
+{
+}
+
+static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
{
return readl(bs->regs + reg);
@@ -257,9 +318,9 @@ static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
}
}
-static void bcm2835_spi_reset_hw(struct spi_master *master)
+static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* Disable SPI interrupts and transfer */
@@ -278,8 +339,8 @@ static void bcm2835_spi_reset_hw(struct spi_master *master)
static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = dev_id;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/*
@@ -301,20 +362,23 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
if (!bs->rx_len) {
/* Transfer complete - reset SPI HW */
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
/* wake up the framework */
- complete(&master->xfer_completion);
+ complete(&ctlr->xfer_completion);
}
return IRQ_HANDLED;
}
-static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
+static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs, bool fifo_empty)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ /* update usage statistics */
+ bs->count_transfer_irq++;
/*
* Enable HW block, but with interrupts still disabled.
@@ -337,7 +401,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
/**
* bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
- * @master: SPI master
+ * @ctlr: SPI master controller
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
* @cs: CS register
@@ -381,7 +445,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
* be transmitted in 32-bit width to ensure that the following DMA transfer can
* pick up the residue in the RX FIFO in ungarbled form.
*/
-static void bcm2835_spi_transfer_prologue(struct spi_master *master,
+static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
u32 cs)
@@ -422,9 +486,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_master *master,
bcm2835_wr_fifo_count(bs, bs->rx_prologue);
bcm2835_wait_tx_fifo_empty(bs);
bcm2835_rd_fifo_count(bs, bs->rx_prologue);
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
- dma_sync_single_for_device(master->dma_rx->device->dev,
+ dma_sync_single_for_device(ctlr->dma_rx->device->dev,
sg_dma_address(&tfr->rx_sg.sgl[0]),
bs->rx_prologue, DMA_FROM_DEVICE);
@@ -488,11 +552,11 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
static void bcm2835_spi_dma_done(void *data)
{
- struct spi_master *master = data;
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = data;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* reset fifo and HW */
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
/* and terminate tx-dma as we do not have an irq for it
* because when the rx dma will terminate and this callback
@@ -500,15 +564,15 @@ static void bcm2835_spi_dma_done(void *data)
* situation otherwise...
*/
if (cmpxchg(&bs->dma_pending, true, false)) {
- dmaengine_terminate_async(master->dma_tx);
+ dmaengine_terminate_async(ctlr->dma_tx);
bcm2835_spi_undo_prologue(bs);
}
/* and mark as completed */;
- complete(&master->xfer_completion);
+ complete(&ctlr->xfer_completion);
}
-static int bcm2835_spi_prepare_sg(struct spi_master *master,
+static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
struct spi_transfer *tfr,
bool is_tx)
{
@@ -523,14 +587,14 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
if (is_tx) {
dir = DMA_MEM_TO_DEV;
- chan = master->dma_tx;
+ chan = ctlr->dma_tx;
nents = tfr->tx_sg.nents;
sgl = tfr->tx_sg.sgl;
flags = 0 /* no tx interrupt */;
} else {
dir = DMA_DEV_TO_MEM;
- chan = master->dma_rx;
+ chan = ctlr->dma_rx;
nents = tfr->rx_sg.nents;
sgl = tfr->rx_sg.sgl;
flags = DMA_PREP_INTERRUPT;
@@ -543,7 +607,7 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
/* set callback for rx */
if (!is_tx) {
desc->callback = bcm2835_spi_dma_done;
- desc->callback_param = master;
+ desc->callback_param = ctlr;
}
/* submit it to DMA-engine */
@@ -552,27 +616,30 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
return dma_submit_error(cookie);
}
-static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
+static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
int ret;
+ /* update usage statistics */
+ bs->count_transfer_dma++;
+
/*
* Transfer first few bytes without DMA if length of first TX or RX
* sglist entry is not a multiple of 4 bytes (hardware limitation).
*/
- bcm2835_spi_transfer_prologue(master, tfr, bs, cs);
+ bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
/* setup tx-DMA */
- ret = bcm2835_spi_prepare_sg(master, tfr, true);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, true);
if (ret)
goto err_reset_hw;
/* start TX early */
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(ctlr->dma_tx);
/* mark as dma pending */
bs->dma_pending = 1;
@@ -588,27 +655,27 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
* mapping of the rx buffers still takes place
* this saves 10us or more.
*/
- ret = bcm2835_spi_prepare_sg(master, tfr, false);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, false);
if (ret) {
/* need to reset on errors */
- dmaengine_terminate_sync(master->dma_tx);
+ dmaengine_terminate_sync(ctlr->dma_tx);
bs->dma_pending = false;
goto err_reset_hw;
}
/* start rx dma late */
- dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(ctlr->dma_rx);
/* wait for wakeup in framework */
return 1;
err_reset_hw:
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
bcm2835_spi_undo_prologue(bs);
return ret;
}
-static bool bcm2835_spi_can_dma(struct spi_master *master,
+static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr)
{
@@ -620,21 +687,21 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
return true;
}
-static void bcm2835_dma_release(struct spi_master *master)
+static void bcm2835_dma_release(struct spi_controller *ctlr)
{
- if (master->dma_tx) {
- dmaengine_terminate_sync(master->dma_tx);
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (ctlr->dma_tx) {
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ dma_release_channel(ctlr->dma_tx);
+ ctlr->dma_tx = NULL;
}
- if (master->dma_rx) {
- dmaengine_terminate_sync(master->dma_rx);
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (ctlr->dma_rx) {
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ dma_release_channel(ctlr->dma_rx);
+ ctlr->dma_rx = NULL;
}
}
-static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
+static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
{
struct dma_slave_config slave_config;
const __be32 *addr;
@@ -642,7 +709,7 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
int ret;
/* base address in dma-space */
- addr = of_get_address(master->dev.of_node, 0, NULL, NULL);
+ addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
if (!addr) {
dev_err(dev, "could not get DMA-register address - not using dma mode\n");
goto err;
@@ -650,38 +717,36 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
dma_reg_base = be32_to_cpup(addr);
/* get tx/rx dma */
- master->dma_tx = dma_request_slave_channel(dev, "tx");
- if (!master->dma_tx) {
+ ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
+ if (!ctlr->dma_tx) {
dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
goto err;
}
- master->dma_rx = dma_request_slave_channel(dev, "rx");
- if (!master->dma_rx) {
+ ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
+ if (!ctlr->dma_rx) {
dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
goto err_release;
}
/* configure DMAs */
- slave_config.direction = DMA_MEM_TO_DEV;
slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- ret = dmaengine_slave_config(master->dma_tx, &slave_config);
+ ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
if (ret)
goto err_config;
- slave_config.direction = DMA_DEV_TO_MEM;
slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- ret = dmaengine_slave_config(master->dma_rx, &slave_config);
+ ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
if (ret)
goto err_config;
/* all went well, so set can_dma */
- master->can_dma = bcm2835_spi_can_dma;
+ ctlr->can_dma = bcm2835_spi_can_dma;
/* need to do TX AND RX DMA, so we need dummy buffers */
- master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
return;
@@ -689,20 +754,22 @@ err_config:
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
ret);
err_release:
- bcm2835_dma_release(master);
+ bcm2835_dma_release(ctlr);
err:
return;
}
-static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
+static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr,
- u32 cs,
- unsigned long long xfer_time_us)
+ u32 cs)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
unsigned long timeout;
+ /* update usage statistics */
+ bs->count_transfer_polling++;
+
/* enable HW block without interrupts */
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
@@ -712,8 +779,8 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
*/
bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
- /* set the timeout */
- timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES;
+ /* set the timeout to at least 2 jiffies */
+ timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
/* loop until finished the transfer */
while (bs->rx_len) {
@@ -732,25 +799,28 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
jiffies - timeout,
bs->tx_len, bs->rx_len);
/* fall back to interrupt mode */
- return bcm2835_spi_transfer_one_irq(master, spi,
+
+ /* update usage statistics */
+ bs->count_transfer_irq_after_polling++;
+
+ return bcm2835_spi_transfer_one_irq(ctlr, spi,
tfr, cs, false);
}
}
/* Transfer complete - reset SPI HW */
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
/* and return without waiting for completion */
return 0;
}
-static int bcm2835_spi_transfer_one(struct spi_master *master,
+static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
- unsigned long spi_hz, clk_hz, cdiv;
- unsigned long spi_used_hz;
- unsigned long long xfer_time_us;
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+ unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
+ unsigned long hz_per_byte, byte_limit;
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* set clock */
@@ -791,42 +861,49 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
bs->tx_len = tfr->len;
bs->rx_len = tfr->len;
- /* calculate the estimated time in us the transfer runs */
- xfer_time_us = (unsigned long long)tfr->len
- * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */
- * 1000000;
- do_div(xfer_time_us, spi_used_hz);
+ /* Calculate the estimated time in us the transfer runs. Note that
+ * there is 1 idle clocks cycles after each byte getting transferred
+ * so we have 9 cycles/byte. This is used to find the number of Hz
+ * per byte per polling limit. E.g., we can transfer 1 byte in 30 us
+ * per 300,000 Hz of bus clock.
+ */
+ hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
+ byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
- /* for short requests run polling*/
- if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US)
- return bcm2835_spi_transfer_one_poll(master, spi, tfr,
- cs, xfer_time_us);
+ /* run in polling mode for short transfers */
+ if (tfr->len < byte_limit)
+ return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
- /* run in dma mode if conditions are right */
- if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr))
- return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs);
+ /* run in dma mode if conditions are right
+ * Note that unlike poll or interrupt mode DMA mode does not have
+ * this 1 idle clock cycle pattern but runs the spi clock without gaps
+ */
+ if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
+ return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs);
/* run in interrupt-mode */
- return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs, true);
+ return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
}
-static int bcm2835_spi_prepare_message(struct spi_master *master,
+static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
int ret;
- /*
- * DMA transfers are limited to 16 bit (0 to 65535 bytes) by the SPI HW
- * due to DLEN. Split up transfers (32-bit FIFO aligned) if the limit is
- * exceeded.
- */
- ret = spi_split_transfers_maxsize(master, msg, 65532,
- GFP_KERNEL | GFP_DMA);
- if (ret)
- return ret;
+ if (ctlr->can_dma) {
+ /*
+ * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
+ * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
+ * aligned) if the limit is exceeded.
+ */
+ ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
+ GFP_KERNEL | GFP_DMA);
+ if (ret)
+ return ret;
+ }
cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
@@ -840,19 +917,19 @@ static int bcm2835_spi_prepare_message(struct spi_master *master,
return 0;
}
-static void bcm2835_spi_handle_err(struct spi_master *master,
+static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
struct spi_message *msg)
{
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */
if (cmpxchg(&bs->dma_pending, true, false)) {
- dmaengine_terminate_sync(master->dma_tx);
- dmaengine_terminate_sync(master->dma_rx);
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ dmaengine_terminate_sync(ctlr->dma_rx);
bcm2835_spi_undo_prologue(bs);
}
/* and reset */
- bcm2835_spi_reset_hw(master);
+ bcm2835_spi_reset_hw(ctlr);
}
static int chip_match_name(struct gpio_chip *chip, void *data)
@@ -909,85 +986,88 @@ static int bcm2835_spi_setup(struct spi_device *spi)
static int bcm2835_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *ctlr;
struct bcm2835_spi *bs;
struct resource *res;
int err;
- master = spi_alloc_master(&pdev->dev, sizeof(*bs));
- if (!master) {
- dev_err(&pdev->dev, "spi_alloc_master() failed\n");
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!ctlr)
return -ENOMEM;
- }
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, ctlr);
- master->mode_bits = BCM2835_SPI_MODE_BITS;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->num_chipselect = 3;
- master->setup = bcm2835_spi_setup;
- master->transfer_one = bcm2835_spi_transfer_one;
- master->handle_err = bcm2835_spi_handle_err;
- master->prepare_message = bcm2835_spi_prepare_message;
- master->dev.of_node = pdev->dev.of_node;
+ ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->num_chipselect = 3;
+ ctlr->setup = bcm2835_spi_setup;
+ ctlr->transfer_one = bcm2835_spi_transfer_one;
+ ctlr->handle_err = bcm2835_spi_handle_err;
+ ctlr->prepare_message = bcm2835_spi_prepare_message;
+ ctlr->dev.of_node = pdev->dev.of_node;
- bs = spi_master_get_devdata(master);
+ bs = spi_controller_get_devdata(ctlr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bs->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(bs->regs)) {
err = PTR_ERR(bs->regs);
- goto out_master_put;
+ goto out_controller_put;
}
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
- goto out_master_put;
+ goto out_controller_put;
}
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
err = bs->irq ? bs->irq : -ENODEV;
- goto out_master_put;
+ goto out_controller_put;
}
clk_prepare_enable(bs->clk);
- bcm2835_dma_init(master, &pdev->dev);
+ bcm2835_dma_init(ctlr, &pdev->dev);
/* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
- dev_name(&pdev->dev), master);
+ dev_name(&pdev->dev), ctlr);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_clk_disable;
}
- err = devm_spi_register_master(&pdev->dev, master);
+ err = devm_spi_register_controller(&pdev->dev, ctlr);
if (err) {
- dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ dev_err(&pdev->dev, "could not register SPI controller: %d\n",
+ err);
goto out_clk_disable;
}
+ bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
+
return 0;
out_clk_disable:
clk_disable_unprepare(bs->clk);
-out_master_put:
- spi_master_put(master);
+out_controller_put:
+ spi_controller_put(ctlr);
return err;
}
static int bcm2835_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+ bcm2835_debugfs_remove(bs);
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -995,7 +1075,7 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(bs->clk);
- bcm2835_dma_release(master);
+ bcm2835_dma_release(ctlr);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index bbf87adb3ff8a..4523bacd583ff 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -505,10 +505,8 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
int err;
master = spi_alloc_master(&pdev->dev, sizeof(*bs));
- if (!master) {
- dev_err(&pdev->dev, "spi_alloc_master() failed\n");
+ if (!master)
return -ENOMEM;
- }
platform_set_drvdata(pdev, master);
master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 47e39251bad9d..81889389280bc 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -651,7 +651,6 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
struct resource *res;
int irq;
int error;
- int i;
info = dev_get_platdata(&pdev->dev);
if (!info) {
diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index ae836114ee3d2..ff85982464d2e 100644
--- a/drivers/spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -57,8 +57,6 @@ struct mt7621_spi {
unsigned int speed;
struct clk *clk;
int pending_write;
-
- struct mt7621_spi_ops *ops;
};
static inline struct mt7621_spi *spidev_to_mt7621_spi(struct spi_device *spi)
@@ -306,7 +304,7 @@ static int mt7621_spi_setup(struct spi_device *spi)
if ((spi->max_speed_hz == 0) ||
(spi->max_speed_hz > (rs->sys_freq / 2)))
- spi->max_speed_hz = (rs->sys_freq / 2);
+ spi->max_speed_hz = rs->sys_freq / 2;
if (spi->max_speed_hz < (rs->sys_freq / 4097)) {
dev_err(&spi->dev, "setup: requested speed is too low %d Hz\n",
@@ -332,13 +330,11 @@ static int mt7621_spi_probe(struct platform_device *pdev)
struct resource *r;
int status = 0;
struct clk *clk;
- struct mt7621_spi_ops *ops;
int ret;
match = of_match_device(mt7621_spi_match, &pdev->dev);
if (!match)
return -EINVAL;
- ops = (struct mt7621_spi_ops *)match->data;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, r);
@@ -377,7 +373,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
rs->clk = clk;
rs->master = master;
rs->sys_freq = clk_get_rate(rs->clk);
- rs->ops = ops;
rs->pending_write = 0;
dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index f5546eeaebe42..298a0bec29d10 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -884,10 +884,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
rate = min_t(int, ssp_clk, rate);
+ /*
+ * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
+ * that the SSP transmission rate can be greater than the device rate
+ */
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
- return (ssp_clk / (2 * rate) - 1) & 0xff;
+ return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
else
- return (ssp_clk / rate - 1) & 0xfff;
+ return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff;
}
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
@@ -1494,12 +1498,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
- struct device *dev = param;
-
- if (dev != chan->device->dev->parent)
- return false;
-
- return true;
+ return param == chan->device->dev;
}
#endif /* CONFIG_PCI */
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 3912526ead664..62affd7dc7c1e 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
@@ -425,7 +426,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
.direction = DMA_MEM_TO_DEV,
.dst_addr = rs->dma_addr_tx,
.dst_addr_width = rs->n_bytes,
- .dst_maxburst = rs->fifo_len / 2,
+ .dst_maxburst = rs->fifo_len / 4,
};
dmaengine_slave_config(master->dma_tx, &txconf);
@@ -526,7 +527,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
else
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
- writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
+ writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 3be8fbe80b084..15f5723d9f952 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -739,27 +739,22 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
while (len > 0) {
n = qspi_set_send_trigger(rspi, len);
qspi_set_receive_trigger(rspi, len);
- if (n == QSPI_BUFFER_SIZE) {
- ret = rspi_wait_for_tx_empty(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "transmit timeout\n");
- return ret;
- }
- for (i = 0; i < n; i++)
- rspi_write_data(rspi, *tx++);
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
+ return ret;
+ }
+ for (i = 0; i < n; i++)
+ rspi_write_data(rspi, *tx++);
- ret = rspi_wait_for_rx_full(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "receive timeout\n");
- return ret;
- }
- for (i = 0; i < n; i++)
- *rx++ = rspi_read_data(rspi);
- } else {
- ret = rspi_pio_transfer(rspi, tx, rx, n);
- if (ret < 0)
- return ret;
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
+ return ret;
}
+ for (i = 0; i < n; i++)
+ *rx++ = rspi_read_data(rspi);
+
len -= n;
}
@@ -796,19 +791,14 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
while (n > 0) {
len = qspi_set_send_trigger(rspi, n);
- if (len == QSPI_BUFFER_SIZE) {
- ret = rspi_wait_for_tx_empty(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "transmit timeout\n");
- return ret;
- }
- for (i = 0; i < len; i++)
- rspi_write_data(rspi, *tx++);
- } else {
- ret = rspi_pio_transfer(rspi, tx, NULL, len);
- if (ret < 0)
- return ret;
+ ret = rspi_wait_for_tx_empty(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "transmit timeout\n");
+ return ret;
}
+ for (i = 0; i < len; i++)
+ rspi_write_data(rspi, *tx++);
+
n -= len;
}
@@ -833,19 +823,14 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
while (n > 0) {
len = qspi_set_receive_trigger(rspi, n);
- if (len == QSPI_BUFFER_SIZE) {
- ret = rspi_wait_for_rx_full(rspi);
- if (ret < 0) {
- dev_err(&rspi->ctlr->dev, "receive timeout\n");
- return ret;
- }
- for (i = 0; i < len; i++)
- *rx++ = rspi_read_data(rspi);
- } else {
- ret = rspi_pio_transfer(rspi, NULL, rx, len);
- if (ret < 0)
- return ret;
+ ret = rspi_wait_for_rx_full(rspi);
+ if (ret < 0) {
+ dev_err(&rspi->ctlr->dev, "receive timeout\n");
+ return ret;
}
+ for (i = 0; i < len; i++)
+ *rx++ = rspi_read_data(rspi);
+
n -= len;
}
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 4186ed20d7969..b222ce8d083ef 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1839,8 +1839,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
spi->irq = platform_get_irq(pdev, 0);
if (spi->irq <= 0) {
- dev_err(&pdev->dev, "no irq: %d\n", spi->irq);
- ret = -ENOENT;
+ ret = spi->irq;
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
goto err_master_put;
}
ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index b8c6393e21901..15f9368fc0f8b 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -95,8 +95,10 @@
(reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
((reg) & ~(1 << ((cs) * 8 + 5))))
#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
- (reg = (((val) & 0xF) << ((cs) * 8)) | \
- ((reg) & ~(0xF << ((cs) * 8))))
+ (reg = (((val) & 0x1F) << ((cs) * 8)) | \
+ ((reg) & ~(0x1F << ((cs) * 8))))
+#define MAX_SETUP_HOLD_CYCLES 16
+#define MAX_INACTIVE_CYCLES 32
#define SPI_TRANS_STATUS 0x010
#define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
@@ -167,6 +169,11 @@ struct tegra_spi_soc_data {
bool has_intr_mask_reg;
};
+struct tegra_spi_client_data {
+ int tx_clk_tap_delay;
+ int rx_clk_tap_delay;
+};
+
struct tegra_spi_data {
struct device *dev;
struct spi_master *master;
@@ -193,6 +200,7 @@ struct tegra_spi_data {
unsigned dma_buf_size;
unsigned max_buf_size;
bool is_curr_dma_xfer;
+ bool use_hw_based_cs;
struct completion rx_dma_complete;
struct completion tx_dma_complete;
@@ -205,6 +213,10 @@ struct tegra_spi_data {
u32 command1_reg;
u32 dma_control_reg;
u32 def_command1_reg;
+ u32 def_command2_reg;
+ u32 spi_cs_timing1;
+ u32 spi_cs_timing2;
+ u8 last_used_cs;
struct completion xfer_completion;
struct spi_transfer *curr_xfer;
@@ -651,8 +663,9 @@ static int tegra_spi_start_cpu_based_transfer(
tspi->is_curr_dma_xfer = false;
- val |= SPI_DMA_EN;
- tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ val = tspi->command1_reg;
+ val |= SPI_PIO;
+ tegra_spi_writel(tspi, val, SPI_COMMAND1);
return 0;
}
@@ -721,14 +734,55 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
+static void tegra_spi_set_hw_cs_timing(struct spi_device *spi, u8 setup_dly,
+ u8 hold_dly, u8 inactive_dly)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ u32 setup_hold;
+ u32 spi_cs_timing;
+ u32 inactive_cycles;
+ u8 cs_state;
+
+ setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
+ hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
+ if (setup_dly && hold_dly) {
+ setup_hold = SPI_SETUP_HOLD(setup_dly - 1, hold_dly - 1);
+ spi_cs_timing = SPI_CS_SETUP_HOLD(tspi->spi_cs_timing1,
+ spi->chip_select,
+ setup_hold);
+ if (tspi->spi_cs_timing1 != spi_cs_timing) {
+ tspi->spi_cs_timing1 = spi_cs_timing;
+ tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING1);
+ }
+ }
+
+ inactive_cycles = min_t(u8, inactive_dly, MAX_INACTIVE_CYCLES);
+ if (inactive_cycles)
+ inactive_cycles--;
+ cs_state = inactive_cycles ? 0 : 1;
+ spi_cs_timing = tspi->spi_cs_timing2;
+ SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
+ cs_state);
+ SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
+ inactive_cycles);
+ if (tspi->spi_cs_timing2 != spi_cs_timing) {
+ tspi->spi_cs_timing2 = spi_cs_timing;
+ tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
+ }
+}
+
static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
- struct spi_transfer *t, bool is_first_of_msg)
+ struct spi_transfer *t,
+ bool is_first_of_msg,
+ bool is_single_xfer)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_client_data *cdata = spi->controller_data;
u32 speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
- u32 command1;
+ u32 command1, command2;
int req_mode;
+ u32 tx_tap = 0, rx_tap = 0;
if (speed != tspi->cur_speed) {
clk_set_rate(tspi->clk, speed);
@@ -775,13 +829,34 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
} else
tegra_spi_writel(tspi, command1, SPI_COMMAND1);
- command1 |= SPI_CS_SW_HW;
- if (spi->mode & SPI_CS_HIGH)
- command1 |= SPI_CS_SW_VAL;
- else
- command1 &= ~SPI_CS_SW_VAL;
+ /* GPIO based chip select control */
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 1);
+
+ if (is_single_xfer && !(t->cs_change)) {
+ tspi->use_hw_based_cs = true;
+ command1 &= ~(SPI_CS_SW_HW | SPI_CS_SW_VAL);
+ } else {
+ tspi->use_hw_based_cs = false;
+ command1 |= SPI_CS_SW_HW;
+ if (spi->mode & SPI_CS_HIGH)
+ command1 |= SPI_CS_SW_VAL;
+ else
+ command1 &= ~SPI_CS_SW_VAL;
+ }
+
+ if (tspi->last_used_cs != spi->chip_select) {
+ if (cdata && cdata->tx_clk_tap_delay)
+ tx_tap = cdata->tx_clk_tap_delay;
+ if (cdata && cdata->rx_clk_tap_delay)
+ rx_tap = cdata->rx_clk_tap_delay;
+ command2 = SPI_TX_TAP_DELAY(tx_tap) |
+ SPI_RX_TAP_DELAY(rx_tap);
+ if (command2 != tspi->def_command2_reg)
+ tegra_spi_writel(tspi, command2, SPI_COMMAND2);
+ tspi->last_used_cs = spi->chip_select;
+ }
- tegra_spi_writel(tspi, 0, SPI_COMMAND2);
} else {
command1 = tspi->command1_reg;
command1 &= ~SPI_BIT_LENGTH(~0);
@@ -837,9 +912,42 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
return ret;
}
+static struct tegra_spi_client_data
+ *tegra_spi_parse_cdata_dt(struct spi_device *spi)
+{
+ struct tegra_spi_client_data *cdata;
+ struct device_node *slave_np;
+
+ slave_np = spi->dev.of_node;
+ if (!slave_np) {
+ dev_dbg(&spi->dev, "device node not found\n");
+ return NULL;
+ }
+
+ cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
+ if (!cdata)
+ return NULL;
+
+ of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
+ &cdata->tx_clk_tap_delay);
+ of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
+ &cdata->rx_clk_tap_delay);
+ return cdata;
+}
+
+static void tegra_spi_cleanup(struct spi_device *spi)
+{
+ struct tegra_spi_client_data *cdata = spi->controller_data;
+
+ spi->controller_data = NULL;
+ if (spi->dev.of_node)
+ kfree(cdata);
+}
+
static int tegra_spi_setup(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ struct tegra_spi_client_data *cdata = spi->controller_data;
u32 val;
unsigned long flags;
int ret;
@@ -850,9 +958,16 @@ static int tegra_spi_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
+ if (!cdata) {
+ cdata = tegra_spi_parse_cdata_dt(spi);
+ spi->controller_data = cdata;
+ }
+
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ if (cdata)
+ tegra_spi_cleanup(spi);
return ret;
}
@@ -863,6 +978,10 @@ static int tegra_spi_setup(struct spi_device *spi)
}
spin_lock_irqsave(&tspi->lock, flags);
+ /* GPIO based chip select control */
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 0);
+
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
@@ -892,11 +1011,18 @@ static void tegra_spi_transfer_end(struct spi_device *spi)
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
- if (cs_val)
- tspi->command1_reg |= SPI_CS_SW_VAL;
- else
- tspi->command1_reg &= ~SPI_CS_SW_VAL;
- tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ /* GPIO based chip select control */
+ if (spi->cs_gpiod)
+ gpiod_set_value(spi->cs_gpiod, 0);
+
+ if (!tspi->use_hw_based_cs) {
+ if (cs_val)
+ tspi->command1_reg |= SPI_CS_SW_VAL;
+ else
+ tspi->command1_reg &= ~SPI_CS_SW_VAL;
+ tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ }
+
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
}
@@ -923,16 +1049,19 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
struct spi_device *spi = msg->spi;
int ret;
bool skip = false;
+ int single_xfer;
msg->status = 0;
msg->actual_length = 0;
+ single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
u32 cmd1;
reinit_completion(&tspi->xfer_completion);
- cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg);
+ cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg,
+ single_xfer);
if (!xfer->len) {
ret = 0;
@@ -965,6 +1094,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
reset_control_assert(tspi->rst);
udelay(2);
reset_control_deassert(tspi->rst);
+ tspi->last_used_cs = master->num_chipselect + 1;
goto complete_xfer;
}
@@ -1198,11 +1328,14 @@ static int tegra_spi_probe(struct platform_device *pdev)
master->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
+ master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = tegra_spi_setup;
+ master->cleanup = tegra_spi_cleanup;
master->transfer_one_message = tegra_spi_transfer_one_message;
+ master->set_cs_timing = tegra_spi_set_hw_cs_timing;
master->num_chipselect = MAX_CHIP_SELECT;
master->auto_runtime_pm = true;
bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
@@ -1278,6 +1411,10 @@ static int tegra_spi_probe(struct platform_device *pdev)
reset_control_deassert(tspi->rst);
tspi->def_command1_reg = SPI_M_S;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
+ tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
+ tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
+ tspi->last_used_cs = master->num_chipselect + 1;
pm_runtime_put(&pdev->dev);
ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
tegra_spi_isr_thread, IRQF_ONESHOT,
@@ -1350,6 +1487,8 @@ static int tegra_spi_resume(struct device *dev)
return ret;
}
tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
+ tspi->last_used_cs = master->num_chipselect + 1;
pm_runtime_put(dev);
return spi_master_resume(master);
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 9f83e1b17aa16..9850a0efe85a3 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -138,6 +139,7 @@
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
+static const struct zynqmp_eemi_ops *eemi_ops;
/**
* struct zynqmp_qspi - Defines qspi driver instance
@@ -1021,6 +1023,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev = &pdev->dev;
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!master)
return -ENOMEM;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5e4654032bfa5..232ed4bb8fcac 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1090,6 +1090,60 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return 0;
}
+static void _spi_transfer_delay_ns(u32 ns)
+{
+ if (!ns)
+ return;
+ if (ns <= 1000) {
+ ndelay(ns);
+ } else {
+ u32 us = DIV_ROUND_UP(ns, 1000);
+
+ if (us <= 10)
+ udelay(us);
+ else
+ usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ }
+}
+
+static void _spi_transfer_cs_change_delay(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ u32 delay = xfer->cs_change_delay;
+ u32 unit = xfer->cs_change_delay_unit;
+ u32 hz;
+
+ /* return early on "fast" mode - for everything but USECS */
+ if (!delay && unit != SPI_DELAY_UNIT_USECS)
+ return;
+
+ switch (unit) {
+ case SPI_DELAY_UNIT_USECS:
+ /* for compatibility use default of 10us */
+ if (!delay)
+ delay = 10000;
+ else
+ delay *= 1000;
+ break;
+ case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
+ break;
+ case SPI_DELAY_UNIT_SCK:
+ /* if there is no effective speed know, then approximate
+ * by underestimating with half the requested hz
+ */
+ hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
+ delay *= DIV_ROUND_UP(1000000000, hz);
+ break;
+ default:
+ dev_err_once(&msg->spi->dev,
+ "Use of unsupported delay unit %i, using default of 10us\n",
+ xfer->cs_change_delay_unit);
+ delay = 10000;
+ }
+ /* now sleep for the requested amount of time */
+ _spi_transfer_delay_ns(delay);
+}
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -1148,14 +1202,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
if (msg->status != -EINPROGRESS)
goto out;
- if (xfer->delay_usecs) {
- u16 us = xfer->delay_usecs;
-
- if (us <= 10)
- udelay(us);
- else
- usleep_range(us, us + DIV_ROUND_UP(us, 10));
- }
+ if (xfer->delay_usecs)
+ _spi_transfer_delay_ns(xfer->delay_usecs * 1000);
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
@@ -1163,7 +1211,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
keep_cs = true;
} else {
spi_set_cs(msg->spi, false);
- udelay(10);
+ _spi_transfer_cs_change_delay(msg, xfer);
spi_set_cs(msg->spi, true);
}
}
@@ -3083,6 +3131,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
*/
message->frame_length = 0;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ xfer->effective_speed_hz = 0;
message->frame_length += xfer->len;
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
@@ -3762,4 +3811,3 @@ err0:
* include needing to have boardinfo data structures be much more public.
*/
postcore_initcall(spi_init);
-
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 70966e10be7e2..df4c0a9b34dd1 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -593,7 +593,7 @@ static int spidev_open(struct inode *inode, struct file *filp)
spidev->users++;
filp->private_data = spidev;
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
mutex_unlock(&device_list_lock);
return 0;
@@ -672,6 +672,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "ge,achc" },
{ .compatible = "semtech,sx1301" },
{ .compatible = "lwn,bk4" },
+ { .compatible = "dh,dhcom-board" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);