summaryrefslogtreecommitdiffstats
path: root/block/blk-stat.c
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2017-10-06 17:55:59 -0700
committerJens Axboe <axboe@kernel.dk>2017-10-10 13:48:14 -0600
commiteca8b53a6769e60d6d8240d71202d73b0af81901 (patch)
tree6a042bf2b35595851ca83b9e77eec8ca05f1e38a /block/blk-stat.c
parent53cfdc10a95d03fbc82970d682a32696d19ef886 (diff)
downloadlinux-0-day-eca8b53a6769e60d6d8240d71202d73b0af81901.tar.gz
linux-0-day-eca8b53a6769e60d6d8240d71202d73b0af81901.tar.xz
blk-stat: delete useless code
Fix two issues: - the per-cpu stat flush is unnecessary, nobody uses per-cpu stat except sum it to global stat. We can do the calculation there. The flush just wastes cpu time. - some fields are signed int/s64. I don't see the point. Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-stat.c')
-rw-r--r--block/blk-stat.c45
1 files changed, 7 insertions, 38 deletions
diff --git a/block/blk-stat.c b/block/blk-stat.c
index c52356d90fe38..3a2f3c96f3672 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -11,8 +11,6 @@
#include "blk-mq.h"
#include "blk.h"
-#define BLK_RQ_STAT_BATCH 64
-
struct blk_queue_stats {
struct list_head callbacks;
spinlock_t lock;
@@ -23,45 +21,21 @@ static void blk_stat_init(struct blk_rq_stat *stat)
{
stat->min = -1ULL;
stat->max = stat->nr_samples = stat->mean = 0;
- stat->batch = stat->nr_batch = 0;
-}
-
-static void blk_stat_flush_batch(struct blk_rq_stat *stat)
-{
- const s32 nr_batch = READ_ONCE(stat->nr_batch);
- const s32 nr_samples = READ_ONCE(stat->nr_samples);
-
- if (!nr_batch)
- return;
- if (!nr_samples)
- stat->mean = div64_s64(stat->batch, nr_batch);
- else {
- stat->mean = div64_s64((stat->mean * nr_samples) +
- stat->batch,
- nr_batch + nr_samples);
- }
-
- stat->nr_samples += nr_batch;
- stat->nr_batch = stat->batch = 0;
+ stat->batch = 0;
}
+/* src is a per-cpu stat, mean isn't initialized */
static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
- blk_stat_flush_batch(src);
-
if (!src->nr_samples)
return;
dst->min = min(dst->min, src->min);
dst->max = max(dst->max, src->max);
- if (!dst->nr_samples)
- dst->mean = src->mean;
- else {
- dst->mean = div64_s64((src->mean * src->nr_samples) +
- (dst->mean * dst->nr_samples),
- dst->nr_samples + src->nr_samples);
- }
+ dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
+ dst->nr_samples + src->nr_samples);
+
dst->nr_samples += src->nr_samples;
}
@@ -69,13 +43,8 @@ static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
{
stat->min = min(stat->min, value);
stat->max = max(stat->max, value);
-
- if (stat->batch + value < stat->batch ||
- stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
- blk_stat_flush_batch(stat);
-
stat->batch += value;
- stat->nr_batch++;
+ stat->nr_samples++;
}
void blk_stat_add(struct request *rq)
@@ -84,7 +53,7 @@ void blk_stat_add(struct request *rq)
struct blk_stat_callback *cb;
struct blk_rq_stat *stat;
int bucket;
- s64 now, value;
+ u64 now, value;
now = __blk_stat_time(ktime_to_ns(ktime_get()));
if (now < blk_stat_time(&rq->issue_stat))