summaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2015-03-16 10:42:26 +0100
committerDavid S. Miller <davem@davemloft.net>2015-03-16 17:14:34 -0400
commit617011e7d5559046e4fc8f87793c8a5d9c3431b0 (patch)
tree43dc9257e68f0bb7c5a3ce132932ecf632654d22 /lib/rhashtable.c
parent9f1ab18672bee992b6169bbfa2b5ae86b42e88a8 (diff)
downloadlinux-617011e7d5559046e4fc8f87793c8a5d9c3431b0.tar.gz
linux-617011e7d5559046e4fc8f87793c8a5d9c3431b0.tar.xz
rhashtable: Avoid calculating hash again to unlock
Caching the lock pointer avoids having to hash on the object again to unlock the bucket locks. Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index eae26a67bd18..09a7ada89ade 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -384,14 +384,16 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
struct rhash_head *head;
bool no_resize_running;
unsigned hash;
+ spinlock_t *old_lock;
bool success = true;
rcu_read_lock();
old_tbl = rht_dereference_rcu(ht->tbl, ht);
hash = head_hashfn(ht, old_tbl, obj);
+ old_lock = bucket_lock(old_tbl, hash);
- spin_lock_bh(bucket_lock(old_tbl, hash));
+ spin_lock_bh(old_lock);
/* Because we have already taken the bucket lock in old_tbl,
* if we find that future_tbl is not yet visible then that
@@ -428,13 +430,10 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
schedule_work(&ht->run_work);
exit:
- if (tbl != old_tbl) {
- hash = head_hashfn(ht, tbl, obj);
+ if (tbl != old_tbl)
spin_unlock(bucket_lock(tbl, hash));
- }
- hash = head_hashfn(ht, old_tbl, obj);
- spin_unlock_bh(bucket_lock(old_tbl, hash));
+ spin_unlock_bh(old_lock);
rcu_read_unlock();