summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_bufs.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-12-16 11:21:06 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-04-23 10:32:43 +0200
commit2177a2182f3f375f64d9938dd884895c3872c380 (patch)
treed34e7ccc73ba72cac17ad1449400e2a4ea8cc23e /drivers/gpu/drm/drm_bufs.c
parentfc8fd40eb29a936cc689d0008863d39a67741c67 (diff)
downloadlinux-2177a2182f3f375f64d9938dd884895c3872c380.tar.gz
linux-2177a2182f3f375f64d9938dd884895c3872c380.tar.xz
drm: rename dev->count_lock to dev->buf_lock
Since really that's all it protects - legacy horror stories in drm_bufs.c. Since I don't want to waste any more time on this I didn't bother to actually look at what it protects in there, but it's at least contained now. v2: Move the spurious hunk to the right patch (Thierry). Cc: Thierry Reding <thierry.reding@gmail.com> Reviewed-by: Thierry Reding <treding@nvidia.com> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/drm_bufs.c')
-rw-r--r--drivers/gpu/drm/drm_bufs.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index edec31fe3fed..ef7f0199a0f1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -656,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@@ -805,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@@ -1015,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
@@ -1175,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
* \param arg pointer to a drm_buf_info structure.
* \return zero on success or a negative number on failure.
*
- * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * Increments drm_device::buf_use while holding the drm_device::buf_lock
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
@@ -1196,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,
if (!dma)
return -EINVAL;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count)
@@ -1381,13 +1381,13 @@ int drm_mapbufs(struct drm_device *dev, void *data,
if (!dma)
return -EINVAL;
- spin_lock(&dev->count_lock);
+ spin_lock(&dev->buf_lock);
if (atomic_read(&dev->buf_alloc)) {
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
return -EBUSY;
}
dev->buf_use++; /* Can't allocate more after this call */
- spin_unlock(&dev->count_lock);
+ spin_unlock(&dev->buf_lock);
if (request->count >= dma->buf_count) {
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))