[PATCH v2 1/2] blk-iolatency: Fix zero mean in previous stats

From: Pavel Begunkov (Silence)
Date: Fri Sep 06 2019 - 10:42:22 EST


From: Pavel Begunkov <asml.silence@xxxxxxxxx>

struct blk_rq_stat has two implicit states in which it can be:
(1) per-cpu intermediate stats (i.e. staging, intermediate)
(2) final stats / aggregation of (1) (see blk_rq_stat_collect)

The states use different sets of fields. E.g. (1) uses @batch but not
@mean, and vise versa for (2). Functions operating on struct blk_rq_stat
have implicit assumptions about its state.

blk_rq_stat_sum() require @src argument to be in (1) and @dst in (2).
iolatency_check_latencies() violates that, and as a result,
iolat->cur_stat.rqs.mean is always 0 for non-ssd devices.

Use 2 distinct functions instead:
blk_rq_stat_collect() to collect intermediate stats (1)
blk_rq_stat_merge() to merge accumulated stats (2)

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
block/blk-iolatency.c | 21 ++++++++++++++++-----
block/blk-stat.c | 20 ++++++++++++++++++--
block/blk-stat.h | 3 ++-
3 files changed, 36 insertions(+), 8 deletions(-)

diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index c128d50cb410..895c6e955f97 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -199,7 +199,7 @@ static inline void latency_stat_init(struct iolatency_grp *iolat,
blk_rq_stat_init(&stat->rqs);
}

-static inline void latency_stat_sum(struct iolatency_grp *iolat,
+static inline void latency_stat_merge(struct iolatency_grp *iolat,
struct latency_stat *sum,
struct latency_stat *stat)
{
@@ -207,7 +207,18 @@ static inline void latency_stat_sum(struct iolatency_grp *iolat,
sum->ps.total += stat->ps.total;
sum->ps.missed += stat->ps.missed;
} else
- blk_rq_stat_sum(&sum->rqs, &stat->rqs);
+ blk_rq_stat_merge(&sum->rqs, &stat->rqs);
+}
+
+static inline void latency_stat_collect(struct iolatency_grp *iolat,
+ struct latency_stat *sum,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd) {
+ sum->ps.total += stat->ps.total;
+ sum->ps.missed += stat->ps.missed;
+ } else
+ blk_rq_stat_collect(&sum->rqs, &stat->rqs);
}

static inline void latency_stat_record_time(struct iolatency_grp *iolat,
@@ -531,7 +542,7 @@ static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
for_each_online_cpu(cpu) {
struct latency_stat *s;
s = per_cpu_ptr(iolat->stats, cpu);
- latency_stat_sum(iolat, &stat, s);
+ latency_stat_collect(iolat, &stat, s);
latency_stat_init(iolat, s);
}
preempt_enable();
@@ -552,7 +563,7 @@ static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
/* Somebody beat us to the punch, just bail. */
spin_lock_irqsave(&lat_info->lock, flags);

- latency_stat_sum(iolat, &iolat->cur_stat, &stat);
+ latency_stat_merge(iolat, &iolat->cur_stat, &stat);
lat_info->nr_samples -= iolat->nr_samples;
lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
@@ -896,7 +907,7 @@ static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
for_each_online_cpu(cpu) {
struct latency_stat *s;
s = per_cpu_ptr(iolat->stats, cpu);
- latency_stat_sum(iolat, &stat, s);
+ latency_stat_collect(iolat, &stat, s);
}
preempt_enable();

diff --git a/block/blk-stat.c b/block/blk-stat.c
index 940f15d600f8..78389182b5d0 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -26,7 +26,7 @@ void blk_rq_stat_init(struct blk_rq_stat *stat)
}

/* src is a per-cpu stat, mean isn't initialized */
-void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
+void blk_rq_stat_collect(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
if (!src->nr_samples)
return;
@@ -40,6 +40,21 @@ void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
dst->nr_samples += src->nr_samples;
}

+void blk_rq_stat_merge(struct blk_rq_stat *dst, struct blk_rq_stat *src)
+{
+ if (!src->nr_samples)
+ return;
+
+ dst->min = min(dst->min, src->min);
+ dst->max = max(dst->max, src->max);
+
+ dst->mean = div_u64(src->mean * src->nr_samples +
+ dst->mean * dst->nr_samples,
+ dst->nr_samples + src->nr_samples);
+
+ dst->nr_samples += src->nr_samples;
+}
+
void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
{
stat->min = min(stat->min, value);
@@ -90,7 +105,8 @@ static void blk_stat_timer_fn(struct timer_list *t)

cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
for (bucket = 0; bucket < cb->buckets; bucket++) {
- blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
+ blk_rq_stat_collect(&cb->stat[bucket],
+ &cpu_stat[bucket]);
blk_rq_stat_init(&cpu_stat[bucket]);
}
}
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 17b47a86eefb..5597ecc34ef5 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -165,7 +165,8 @@ static inline void blk_stat_activate_msecs(struct blk_stat_callback *cb,
}

void blk_rq_stat_add(struct blk_rq_stat *, u64);
-void blk_rq_stat_sum(struct blk_rq_stat *, struct blk_rq_stat *);
+void blk_rq_stat_collect(struct blk_rq_stat *dst, struct blk_rq_stat *src);
+void blk_rq_stat_merge(struct blk_rq_stat *dst, struct blk_rq_stat *src);
void blk_rq_stat_init(struct blk_rq_stat *);

#endif
--
2.22.0