summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSylvain Lesne <lesne@alse-fr.com>2017-09-18 13:08:01 +0200
committerVinod Koul <vinod.koul@intel.com>2017-09-28 13:11:46 +0530
commitedf10919e5fc8dfd10e57ed72f651204559bc6ba (patch)
tree29c938ba46a6352433f884d05f36192eaa696a26
parentd9ec46416de5ef83220f1c7010ee0f5d1be1d753 (diff)
downloadlinux-edf10919e5fc8dfd10e57ed72f651204559bc6ba.tar.gz
linux-edf10919e5fc8dfd10e57ed72f651204559bc6ba.tar.xz
dmaengine: altera: fix spinlock usage
Since this lock is acquired in both process and IRQ context, failing to to disable IRQs when trying to acquire the lock in process context can lead to deadlocks. Signed-off-by: Sylvain Lesne <lesne@alse-fr.com> Reviewed-by: Stefan Roese <sr@denx.de> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/altera-msgdma.c35
1 files changed, 21 insertions, 14 deletions
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 35cbf2365f68..339186f25a2a 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -212,11 +212,12 @@ struct msgdma_device {
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
{
struct msgdma_sw_desc *desc;
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
list_del(&desc->node);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
INIT_LIST_HEAD(&desc->tx_list);
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
struct msgdma_device *mdev = to_mdev(tx->chan);
struct msgdma_sw_desc *new;
dma_cookie_t cookie;
+ unsigned long flags;
new = tx_to_desc(tx);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
cookie = dma_cookie_assign(tx);
list_add_tail(&new->node, &mdev->pending_list);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
return cookie;
}
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct msgdma_extended_desc *desc;
size_t copy;
u32 desc_cnt;
+ unsigned long irqflags;
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
do {
/* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
u32 desc_cnt = 0, i;
struct scatterlist *sg;
u32 stride;
+ unsigned long irqflags;
for_each_sg(sgl, sg, sg_len, i)
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
mdev->desc_free_cnt -= desc_cnt;
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
avail = sg_dma_len(sgl);
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
static void msgdma_issue_pending(struct dma_chan *chan)
{
struct msgdma_device *mdev = to_mdev(chan);
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
msgdma_start_transfer(mdev);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
}
/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
static void msgdma_free_chan_resources(struct dma_chan *dchan)
{
struct msgdma_device *mdev = to_mdev(dchan);
+ unsigned long flags;
- spin_lock_bh(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
msgdma_free_descriptors(mdev);
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
kfree(mdev->sw_desq);
}
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
u32 count;
u32 __maybe_unused size;
u32 __maybe_unused status;
+ unsigned long flags;
- spin_lock(&mdev->lock);
+ spin_lock_irqsave(&mdev->lock, flags);
/* Read number of responses that are available */
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -704,7 +711,7 @@ static void msgdma_tasklet(unsigned long data)
msgdma_chan_desc_cleanup(mdev);
}
- spin_unlock(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, flags);
}
/**