summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-tag.c
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2016-09-17 01:28:25 -0700
committerJens Axboe <axboe@fb.com>2016-09-17 08:39:14 -0600
commit98d95416dbfaf4910caadfb4ddc75e4aacbdff8c (patch)
treeed6a08e6d4358da522265ac1e6a595fe8db35572 /block/blk-mq-tag.c
parentf4a644db86669d938c71f19560aebf69d4720d63 (diff)
downloadlinux-0-day-98d95416dbfaf4910caadfb4ddc75e4aacbdff8c.tar.gz
linux-0-day-98d95416dbfaf4910caadfb4ddc75e4aacbdff8c.tar.xz
sbitmap: randomize initial alloc_hint values
In order to get good cache behavior from a sbitmap, we want each CPU to stick to its own cacheline(s) as much as possible. This might happen naturally as the bitmap gets filled up and the alloc_hint values spread out, but we really want this behavior from the start. blk-mq apparently intended to do this, but the code to do this was never wired up. Get rid of the dead code and make it part of the sbitmap library. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r--block/blk-mq-tag.c8
1 files changed, 0 insertions, 8 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index e1c2bedb0bf9c..cef618f6fc921 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -7,7 +7,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/random.h>
#include <linux/blk-mq.h>
#include "blk.h"
@@ -419,13 +418,6 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
kfree(tags);
}
-void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
-{
- unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
-
- *tag = prandom_u32() % depth;
-}
-
int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
{
tdepth -= tags->nr_reserved_tags;