Re: Bug in kernel 2.6.31, Slow wb_kupdate writeout

From: Wu Fengguang
Date: Fri Jul 31 2009 - 22:02:58 EST


On Fri, Jul 31, 2009 at 03:55:44AM +0800, Martin Bligh wrote:
> > Note that this is a simple fix that may have suboptimal write performance.
> > Here is an old reasoning:
> >
> > Â Â Â Âhttp://lkml.org/lkml/2009/3/28/235
>
> The other thing I've been experimenting with is to disable the per-page
> check in write_cache_pages, ie:
>
> if (wbc->nonblocking && bdi_write_congested(bdi)) {
> wb_stats_inc(WB_STATS_WCP_SECTION_CONG);
> wbc->encountered_congestion = 1;
> /* done = 1; */
>
> This treats the congestion limits as soft, but encourages us to write
> back in larger, more efficient chunks. If that's not going to scare
> people unduly, I can submit that as well.

This risks hitting the hard limit (nr_requests), and block everyone,
including the ones with higher priority (ie. kswapd).

On the other hand, the simple fix in previous mails won't necessarily
act too sub-optimal. It's only a potential one. There is a window of
(1/16)*(nr_requests)*request_size (= 128*256KB/16 = 4MB) between
congestion-on and congestion-off states. So for the best we can inject
a big 4MB chunk into the async write queue once it becomes uncongested.

I have a writeback debug patch that can help find out how
that works out in your real world workloads (by monitoring
nr_to_write). You can also try doubling the ratio (1/16) in
blk_queue_congestion_threshold(), to see how an increased
congestion-on-off window may help.

Thanks,
Fengguang
mm/page-writeback.c | 38 ++++++++++++++++++++++++++++++++++++++
1 file changed, 38 insertions(+)

--- sound-2.6.orig/mm/page-writeback.c
+++ sound-2.6/mm/page-writeback.c
@@ -116,6 +116,33 @@ EXPORT_SYMBOL(laptop_mode);

/* End of sysctl-exported parameters */

+#define writeback_debug_report(n, wbc) do { \
+ __writeback_debug_report(n, wbc, __FILE__, __LINE__, __FUNCTION__); \
+} while (0)
+
+void print_writeback_control(struct writeback_control *wbc)
+{
+ printk(KERN_DEBUG
+ "global dirty %lu writeback %lu nfs %lu "
+ "flags %c%c towrite %ld skipped %ld\n",
+ global_page_state(NR_FILE_DIRTY),
+ global_page_state(NR_WRITEBACK),
+ global_page_state(NR_UNSTABLE_NFS),
+ wbc->encountered_congestion ? 'C':'_',
+ wbc->more_io ? 'M':'_',
+ wbc->nr_to_write,
+ wbc->pages_skipped);
+}
+
+void __writeback_debug_report(long n, struct writeback_control *wbc,
+ const char *file, int line, const char *func)
+{
+ printk(KERN_DEBUG "%s %d %s: %s(%d) %ld\n",
+ file, line, func,
+ current->comm, current->pid,
+ n);
+ print_writeback_control(wbc);
+}

static void background_writeout(unsigned long _min_pages);

@@ -550,6 +577,7 @@ static void balance_dirty_pages(struct a
pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);
+ writeback_debug_report(pages_written, &wbc);
}

/*
@@ -576,6 +604,7 @@ static void balance_dirty_pages(struct a
break; /* We've done our duty */

congestion_wait(BLK_RW_ASYNC, HZ/10);
+ writeback_debug_report(-pages_written, &wbc);
}

if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
@@ -670,6 +699,11 @@ void throttle_vm_writeout(gfp_t gfp_mask
global_page_state(NR_WRITEBACK) <= dirty_thresh)
break;
congestion_wait(BLK_RW_ASYNC, HZ/10);
+ printk(KERN_DEBUG "throttle_vm_writeout: "
+ "congestion_wait on %lu+%lu > %lu\n",
+ global_page_state(NR_UNSTABLE_NFS),
+ global_page_state(NR_WRITEBACK),
+ dirty_thresh);

/*
* The caller might hold locks which can prevent IO completion
@@ -719,7 +753,9 @@ static void background_writeout(unsigned
else
break;
}
+ writeback_debug_report(min_pages, &wbc);
}
+ writeback_debug_report(min_pages, &wbc);
}

/*
@@ -792,7 +828,9 @@ static void wb_kupdate(unsigned long arg
break; /* All the old data is written */
}
nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+ writeback_debug_report(nr_to_write, &wbc);
}
+ writeback_debug_report(nr_to_write, &wbc);
if (time_before(next_jif, jiffies + HZ))
next_jif = jiffies + HZ;
if (dirty_writeback_interval)