summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <clm@fb.com>2015-02-04 13:47:52 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2015-02-09 14:26:41 +1100
commitf35380d0cc3acee4ca5b75114945d07c967ba184 (patch)
treeeea6764e487a6be93388c7546a6852cca862ab73
parentadb3eb33a84ee889d87701a1a51b1438917b92cf (diff)
downloadlinux-f35380d0cc3acee4ca5b75114945d07c967ba184.tar.gz
linux-f35380d0cc3acee4ca5b75114945d07c967ba184.tar.xz
eventfd: don't take the spinlock in eventfd_poll
The spinlock in eventfd_poll is trying to protect the count of events so it can decide if it should return POLLIN, POLLERR, or POLLOUT. But, because of the way we drop the lock after calling poll_wait, and drop it again before returning, we have the same pile of races with the lock as we do with a single read of ctx->count(). This replaces the lock with a read barrier and single read. eventfd_write does a single bump of ctx->count, so this should not add new races with adding events. eventfd_read is similar, it will do a single decrement with the lock held, and so we're making the race with concurrent readers slightly larger. This spinlock is the top CPU user in kernel code during one of our workloads. Removing it gives us a ~2% boost. Signed-off-by: Chris Mason <clm@fb.com> Cc: Davide Libenzi <davidel@xmailserver.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--fs/eventfd.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 4b0a226024fa..439e6f0177f3 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -119,17 +119,18 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait)
struct eventfd_ctx *ctx = file->private_data;
unsigned int events = 0;
unsigned long flags;
+ unsigned int count;
poll_wait(file, &ctx->wqh, wait);
+ smp_rmb();
+ count = ctx->count;
- spin_lock_irqsave(&ctx->wqh.lock, flags);
- if (ctx->count > 0)
+ if (count > 0)
events |= POLLIN;
- if (ctx->count == ULLONG_MAX)
+ if (count == ULLONG_MAX)
events |= POLLERR;
- if (ULLONG_MAX - 1 > ctx->count)
+ if (ULLONG_MAX - 1 > count)
events |= POLLOUT;
- spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return events;
}