[lkp] [f2fs] 7b51bf49f4: -31.2% fsmark.files_per_sec

From: kernel test robot
Date: Mon Feb 22 2016 - 19:59:51 EST


FYI, we noticed the below changes on

https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs dev-test
commit 7b51bf49f4825da09206c6d89e4aad5b4faa0a14 ("f2fs: set flush_merge by default")


=========================================================================================
compiler/cpufreq_governor/disk/filesize/fs/iterations/kconfig/nr_directories/nr_files_per_directory/nr_threads/rootfs/sync_method/tbox_group/test_size/testcase:
gcc-4.9/performance/1HDD/8K/f2fs/1x/x86_64-rhel/16d/256fpd/32t/debian-x86_64-2015-02-07.cgz/fsyncBeforeClose/lkp-ne04/400M/fsmark

commit:
312564ace97b1a18d02cd49c35948c82da441f14
7b51bf49f4825da09206c6d89e4aad5b4faa0a14

312564ace97b1a18 7b51bf49f4825da09206c6d89e
---------------- --------------------------
%stddev %change %stddev
\ | \
4229542 ± 7% -49.8% 2123569 ± 4% fsmark.app_overhead
480.02 ± 0% -31.2% 330.03 ± 0% fsmark.files_per_sec
107.44 ± 0% +44.7% 155.46 ± 0% fsmark.time.elapsed_time
107.44 ± 0% +44.7% 155.46 ± 0% fsmark.time.elapsed_time.max
1317328 ± 0% -2.9% 1279518 ± 0% fsmark.time.file_system_outputs
16.50 ± 3% -30.3% 11.50 ± 4% fsmark.time.percent_of_cpu_this_job_got
479210 ± 0% -5.3% 453623 ± 1% fsmark.time.voluntary_context_switches
135.22 ± 3% +35.3% 183.00 ± 2% uptime.boot
1384 ± 5% +23.4% 1708 ± 3% uptime.idle
20471 ± 1% -42.0% 11875 ± 0% softirqs.BLOCK
19962 ± 1% +28.8% 25718 ± 11% softirqs.RCU
23562 ± 10% +19.2% 28089 ± 8% softirqs.SCHED
12.75 ± 1% -15.8% 10.74 ± 6% turbostat.CPU%c1
30.81 ± 1% -12.0% 27.13 ± 4% turbostat.CPU%c3
46.03 ± 1% +12.8% 51.95 ± 1% turbostat.CPU%c6
5992 ± 0% -32.3% 4058 ± 0% vmstat.io.bo
12690 ± 0% -21.8% 9921 ± 4% vmstat.system.cs
1082 ± 1% -29.0% 768.50 ± 16% vmstat.system.in
107.44 ± 0% +44.7% 155.46 ± 0% time.elapsed_time
107.44 ± 0% +44.7% 155.46 ± 0% time.elapsed_time.max
16.50 ± 3% -30.3% 11.50 ± 4% time.percent_of_cpu_this_job_got
0.79 ± 6% +20.5% 0.95 ± 2% time.user_time
197837 ± 3% +21.5% 240374 ± 1% numa-numastat.node0.local_node
197837 ± 3% +21.5% 240376 ± 1% numa-numastat.node0.numa_hit
0.50 ±173% +450.0% 2.75 ± 47% numa-numastat.node0.other_node
181435 ± 4% +23.8% 224592 ± 2% numa-numastat.node1.local_node
181437 ± 4% +23.8% 224592 ± 2% numa-numastat.node1.numa_hit
374626 ± 0% +22.8% 460072 ± 0% proc-vmstat.numa_hit
374623 ± 0% +22.8% 460069 ± 0% proc-vmstat.numa_local
33085 ± 0% -29.8% 23229 ± 3% proc-vmstat.pgactivate
77779 ± 3% +20.9% 94024 ± 1% proc-vmstat.pgalloc_dma32
322625 ± 1% +22.4% 394983 ± 0% proc-vmstat.pgalloc_normal
246180 ± 0% +40.7% 346265 ± 0% proc-vmstat.pgfault
215983 ± 0% +40.3% 303074 ± 0% proc-vmstat.pgfree
20880 ± 1% -46.6% 11151 ± 4% cpuidle.C1-NHM.usage
31361970 ± 4% -21.7% 24554439 ± 14% cpuidle.C1E-NHM.time
30519 ± 1% -46.3% 16394 ± 3% cpuidle.C1E-NHM.usage
4.521e+08 ± 1% +27.7% 5.772e+08 ± 6% cpuidle.C3-NHM.time
177038 ± 1% +20.7% 213730 ± 14% cpuidle.C3-NHM.usage
1.045e+09 ± 0% +54.9% 1.618e+09 ± 1% cpuidle.C6-NHM.time
260935 ± 2% +22.0% 318337 ± 6% cpuidle.C6-NHM.usage
1.562e+08 ± 2% +46.7% 2.292e+08 ± 1% cpuidle.POLL.time
0.00 ± -1% +Inf% 46222 ± 0% latency_stats.avg.f2fs_issue_flush.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
26436 ± 0% -100.0% 0.00 ± -1% latency_stats.avg.submit_bio_wait.f2fs_issue_flush.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 51120 ± 0% latency_stats.hits.f2fs_issue_flush.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
51073 ± 0% -100.0% 0.00 ± -1% latency_stats.hits.submit_bio_wait.f2fs_issue_flush.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 131989 ± 1% latency_stats.max.f2fs_issue_flush.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
104864 ± 4% -100.0% 0.00 ± -1% latency_stats.max.submit_bio_wait.f2fs_issue_flush.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
43003 ± 17% -78.7% 9176 ± 49% latency_stats.sum.alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
74613055 ± 3% -85.8% 10617474 ± 11% latency_stats.sum.call_rwsem_down_read_failed.f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
3206072 ± 9% -65.8% 1097191 ± 7% latency_stats.sum.call_rwsem_down_read_failed.f2fs_new_inode.[f2fs].f2fs_mkdir.[f2fs].vfs_mkdir.SyS_mkdir.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 2.363e+09 ± 0% latency_stats.sum.f2fs_issue_flush.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
4821755 ± 4% +137.7% 11463656 ± 38% latency_stats.sum.f2fs_sync_fs.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
1.35e+09 ± 0% -100.0% 0.00 ± -1% latency_stats.sum.submit_bio_wait.f2fs_issue_flush.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
44021 ± 1% +20.0% 52824 ± 2% numa-vmstat.node0.nr_active_file
117.75 ± 5% +42.0% 167.25 ± 4% numa-vmstat.node0.nr_dirty
113890 ± 1% +10.0% 125333 ± 1% numa-vmstat.node0.nr_file_pages
82.75 ± 16% +1248.0% 1115 ± 91% numa-vmstat.node0.nr_inactive_anon
125.25 ± 17% +828.7% 1163 ± 87% numa-vmstat.node0.nr_shmem
11813 ± 2% +15.0% 13580 ± 1% numa-vmstat.node0.nr_slab_reclaimable
4274 ± 13% -24.3% 3234 ± 14% numa-vmstat.node1.nr_active_anon
28009 ± 1% -22.5% 21700 ± 4% numa-vmstat.node1.nr_active_file
4244 ± 13% -24.4% 3209 ± 14% numa-vmstat.node1.nr_anon_pages
40753 ± 33% -47.2% 21497 ± 61% numa-vmstat.node1.nr_dirtied
87167 ± 1% -14.0% 75003 ± 1% numa-vmstat.node1.nr_file_pages
8376 ± 2% -22.2% 6517 ± 3% numa-vmstat.node1.nr_slab_reclaimable
40696 ± 33% -47.3% 21465 ± 61% numa-vmstat.node1.nr_written
188090 ± 1% +20.9% 227409 ± 1% numa-meminfo.node0.Active
176095 ± 1% +20.0% 211303 ± 2% numa-meminfo.node0.Active(file)
455580 ± 1% +10.0% 501341 ± 1% numa-meminfo.node0.FilePages
333.75 ± 16% +1237.2% 4462 ± 91% numa-meminfo.node0.Inactive(anon)
47258 ± 2% +15.0% 54325 ± 1% numa-meminfo.node0.SReclaimable
503.25 ± 17% +824.8% 4654 ± 87% numa-meminfo.node0.Shmem
129176 ± 3% -22.8% 99752 ± 2% numa-meminfo.node1.Active
17133 ± 13% -24.4% 12949 ± 14% numa-meminfo.node1.Active(anon)
112043 ± 1% -22.5% 86802 ± 4% numa-meminfo.node1.Active(file)
17007 ± 13% -24.4% 12851 ± 14% numa-meminfo.node1.AnonPages
348677 ± 1% -14.0% 300017 ± 1% numa-meminfo.node1.FilePages
236455 ± 1% -9.9% 213055 ± 1% numa-meminfo.node1.Inactive
470973 ± 3% -12.6% 411429 ± 3% numa-meminfo.node1.MemUsed
33510 ± 2% -22.2% 26069 ± 3% numa-meminfo.node1.SReclaimable
584.68 ± 18% +51.8% 887.72 ± 5% sched_debug.cfs_rq:/.exec_clock.10
531.49 ± 42% +170.7% 1438 ± 72% sched_debug.cfs_rq:/.exec_clock.11
610.54 ± 17% +94.1% 1185 ± 47% sched_debug.cfs_rq:/.exec_clock.14
1172 ± 20% +31.4% 1540 ± 6% sched_debug.cfs_rq:/.exec_clock.2
981.99 ± 7% +38.9% 1364 ± 9% sched_debug.cfs_rq:/.exec_clock.4
1133 ± 31% +78.3% 2020 ± 13% sched_debug.cfs_rq:/.exec_clock.5
637.47 ± 24% +76.7% 1126 ± 44% sched_debug.cfs_rq:/.exec_clock.8
1381 ± 0% +16.0% 1602 ± 0% sched_debug.cfs_rq:/.exec_clock.avg
361.27 ± 16% +31.3% 474.40 ± 17% sched_debug.cfs_rq:/.exec_clock.min
146.25 ± 17% +55.6% 227.50 ± 18% sched_debug.cfs_rq:/.load_avg.0
36.25 ± 72% -69.0% 11.25 ±113% sched_debug.cfs_rq:/.load_avg.14
67.25 ± 83% -83.6% 11.00 ± 52% sched_debug.cfs_rq:/.load_avg.6
0.20 ± 17% -44.0% 0.11 ± 28% sched_debug.cfs_rq:/.nr_running.avg
0.38 ± 4% -20.1% 0.31 ± 13% sched_debug.cfs_rq:/.nr_running.stddev
2.75 ± 90% -100.0% 0.00 ± -1% sched_debug.cfs_rq:/.nr_spread_over.12
3.25 ±102% -100.0% 0.00 ± 0% sched_debug.cfs_rq:/.runnable_load_avg.14
4562 ±126% -167.7% -3089 ±-133% sched_debug.cfs_rq:/.spread0.6
9049 ± 29% -49.5% 4566 ± 61% sched_debug.cfs_rq:/.spread0.max
126.25 ± 32% -55.8% 55.75 ± 39% sched_debug.cfs_rq:/.util_avg.1
124.29 ± 17% -41.1% 73.21 ± 15% sched_debug.cfs_rq:/.util_avg.avg
322.00 ± 35% -44.3% 179.50 ± 27% sched_debug.cfs_rq:/.util_avg.max
879630 ± 8% -15.9% 739419 ± 11% sched_debug.cpu.avg_idle.14
906683 ± 8% -15.2% 768624 ± 10% sched_debug.cpu.avg_idle.3
58510 ± 7% +50.9% 88305 ± 4% sched_debug.cpu.clock.0
58514 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock.1
58516 ± 7% +50.9% 88309 ± 4% sched_debug.cpu.clock.10
58510 ± 7% +50.9% 88308 ± 4% sched_debug.cpu.clock.11
58516 ± 7% +50.9% 88308 ± 4% sched_debug.cpu.clock.12
58517 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock.13
58518 ± 7% +50.9% 88308 ± 4% sched_debug.cpu.clock.14
58517 ± 7% +50.9% 88297 ± 4% sched_debug.cpu.clock.15
58511 ± 7% +50.9% 88303 ± 4% sched_debug.cpu.clock.2
58512 ± 7% +50.9% 88307 ± 4% sched_debug.cpu.clock.3
58513 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock.4
58515 ± 7% +50.9% 88309 ± 4% sched_debug.cpu.clock.5
58516 ± 7% +50.9% 88307 ± 4% sched_debug.cpu.clock.6
58516 ± 7% +50.9% 88309 ± 4% sched_debug.cpu.clock.7
58516 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock.8
58516 ± 7% +50.9% 88303 ± 4% sched_debug.cpu.clock.9
58515 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock.avg
58518 ± 7% +50.9% 88310 ± 4% sched_debug.cpu.clock.max
58504 ± 7% +50.9% 88291 ± 4% sched_debug.cpu.clock.min
58510 ± 7% +50.9% 88305 ± 4% sched_debug.cpu.clock_task.0
58514 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock_task.1
58516 ± 7% +50.9% 88309 ± 4% sched_debug.cpu.clock_task.10
58510 ± 7% +50.9% 88308 ± 4% sched_debug.cpu.clock_task.11
58516 ± 7% +50.9% 88308 ± 4% sched_debug.cpu.clock_task.12
58517 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock_task.13
58518 ± 7% +50.9% 88308 ± 4% sched_debug.cpu.clock_task.14
58517 ± 7% +50.9% 88297 ± 4% sched_debug.cpu.clock_task.15
58511 ± 7% +50.9% 88303 ± 4% sched_debug.cpu.clock_task.2
58512 ± 7% +50.9% 88307 ± 4% sched_debug.cpu.clock_task.3
58513 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock_task.4
58515 ± 7% +50.9% 88309 ± 4% sched_debug.cpu.clock_task.5
58516 ± 7% +50.9% 88307 ± 4% sched_debug.cpu.clock_task.6
58516 ± 7% +50.9% 88309 ± 4% sched_debug.cpu.clock_task.7
58516 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock_task.8
58516 ± 7% +50.9% 88303 ± 4% sched_debug.cpu.clock_task.9
58515 ± 7% +50.9% 88306 ± 4% sched_debug.cpu.clock_task.avg
58518 ± 7% +50.9% 88310 ± 4% sched_debug.cpu.clock_task.max
58504 ± 7% +50.9% 88291 ± 4% sched_debug.cpu.clock_task.min
1791 ± 0% +34.7% 2413 ± 9% sched_debug.cpu.curr->pid.max
530.14 ± 8% +16.6% 618.02 ± 8% sched_debug.cpu.curr->pid.stddev
209.00 ± 33% -100.0% 0.00 ± -1% sched_debug.cpu.load.14
38.27 ± 24% -28.4% 27.41 ± 16% sched_debug.cpu.load.avg
9384 ± 3% +58.1% 14840 ± 12% sched_debug.cpu.nr_load_updates.0
7128 ± 10% +50.5% 10727 ± 5% sched_debug.cpu.nr_load_updates.10
5615 ± 10% +22.5% 6878 ± 4% sched_debug.cpu.nr_load_updates.11
6988 ± 5% +58.1% 11047 ± 4% sched_debug.cpu.nr_load_updates.12
6989 ± 2% +58.4% 11074 ± 3% sched_debug.cpu.nr_load_updates.14
5668 ± 11% +27.4% 7223 ± 6% sched_debug.cpu.nr_load_updates.15
9117 ± 2% +54.8% 14117 ± 7% sched_debug.cpu.nr_load_updates.2
9689 ± 4% +44.3% 13978 ± 5% sched_debug.cpu.nr_load_updates.4
7610 ± 7% +18.6% 9027 ± 8% sched_debug.cpu.nr_load_updates.5
9283 ± 4% +46.6% 13611 ± 5% sched_debug.cpu.nr_load_updates.6
7378 ± 7% +28.5% 9482 ± 10% sched_debug.cpu.nr_load_updates.7
7014 ± 5% +47.6% 10350 ± 2% sched_debug.cpu.nr_load_updates.8
5428 ± 9% +24.7% 6772 ± 4% sched_debug.cpu.nr_load_updates.9
7531 ± 4% +37.4% 10346 ± 3% sched_debug.cpu.nr_load_updates.avg
10658 ± 2% +46.5% 15612 ± 12% sched_debug.cpu.nr_load_updates.max
5170 ± 8% +23.5% 6383 ± 5% sched_debug.cpu.nr_load_updates.min
1642 ± 5% +74.6% 2868 ± 16% sched_debug.cpu.nr_load_updates.stddev
0.19 ± 11% -41.7% 0.11 ± 15% sched_debug.cpu.nr_running.avg
0.40 ± 9% -21.9% 0.31 ± 7% sched_debug.cpu.nr_running.stddev
42853 ± 9% +57.2% 67344 ± 18% sched_debug.cpu.nr_switches.0
20979 ± 6% +101.3% 42226 ± 11% sched_debug.cpu.nr_switches.12
20834 ± 10% +110.4% 43828 ± 23% sched_debug.cpu.nr_switches.14
31923 ± 3% +111.3% 67446 ± 21% sched_debug.cpu.nr_switches.2
42296 ± 12% +72.2% 72821 ± 30% sched_debug.cpu.nr_switches.4
32511 ± 3% +88.7% 61355 ± 20% sched_debug.cpu.nr_switches.6
22802 ± 9% +64.6% 37532 ± 1% sched_debug.cpu.nr_switches.8
26514 ± 1% +51.3% 40127 ± 5% sched_debug.cpu.nr_switches.avg
58800 ± 28% +55.9% 91652 ± 11% sched_debug.cpu.nr_switches.max
14014 ± 19% +68.8% 23661 ± 19% sched_debug.cpu.nr_switches.stddev
-171.00 ±-29% -83.5% -28.25 ±-65% sched_debug.cpu.nr_uninterruptible.1
591.00 ± 3% -33.0% 395.75 ± 16% sched_debug.cpu.nr_uninterruptible.10
240.00 ± 24% -60.5% 94.75 ± 35% sched_debug.cpu.nr_uninterruptible.11
535.25 ± 11% -41.4% 313.75 ± 25% sched_debug.cpu.nr_uninterruptible.12
525.75 ± 18% -37.3% 329.75 ± 15% sched_debug.cpu.nr_uninterruptible.14
212.75 ± 19% -53.2% 99.50 ± 58% sched_debug.cpu.nr_uninterruptible.15
-201.50 ±-28% -65.1% -70.25 ±-17% sched_debug.cpu.nr_uninterruptible.3
-252.50 ±-31% -51.1% -123.50 ±-18% sched_debug.cpu.nr_uninterruptible.4
-235.75 ±-15% -84.1% -37.50 ±-61% sched_debug.cpu.nr_uninterruptible.5
-284.75 ±-14% -94.2% -16.50 ±-317% sched_debug.cpu.nr_uninterruptible.7
162.50 ± 13% -43.1% 92.50 ± 17% sched_debug.cpu.nr_uninterruptible.9
1.06 ± 13% +29.9% 1.38 ± 1% sched_debug.cpu.nr_uninterruptible.avg
527.73 ± 5% -18.1% 432.06 ± 12% sched_debug.cpu.nr_uninterruptible.stddev
21300 ± 5% +98.4% 42250 ± 11% sched_debug.cpu.sched_count.12
20849 ± 10% +110.3% 43854 ± 23% sched_debug.cpu.sched_count.14
34572 ± 5% +100.5% 69304 ± 18% sched_debug.cpu.sched_count.2
22820 ± 9% +67.8% 38294 ± 1% sched_debug.cpu.sched_count.8
148192 ± 0% +9.7% 162582 ± 1% sched_debug.cpu.sched_count.avg
19147 ± 10% +52.7% 29229 ± 21% sched_debug.cpu.sched_goidle.0
8541 ± 6% +120.4% 18821 ± 12% sched_debug.cpu.sched_goidle.12
8536 ± 11% +130.5% 19673 ± 26% sched_debug.cpu.sched_goidle.14
13824 ± 3% +124.6% 31048 ± 23% sched_debug.cpu.sched_goidle.2
19038 ± 13% +78.5% 33981 ± 32% sched_debug.cpu.sched_goidle.4
14186 ± 3% +99.3% 28272 ± 22% sched_debug.cpu.sched_goidle.6
9322 ± 10% +71.9% 16023 ± 1% sched_debug.cpu.sched_goidle.8
11604 ± 2% +57.5% 18282 ± 6% sched_debug.cpu.sched_goidle.avg
27549 ± 30% +59.8% 44033 ± 10% sched_debug.cpu.sched_goidle.max
6704 ± 20% +66.7% 11174 ± 19% sched_debug.cpu.sched_goidle.stddev
53560 ± 3% +48.0% 79293 ± 3% sched_debug.cpu.ttwu_count.0
9727 ± 27% +107.8% 20210 ± 45% sched_debug.cpu.ttwu_count.10
7802 ± 8% +152.5% 19704 ± 33% sched_debug.cpu.ttwu_count.12
8037 ± 11% +204.4% 24466 ± 48% sched_debug.cpu.ttwu_count.14
15381 ± 5% +43.2% 22034 ± 13% sched_debug.cpu.ttwu_count.4
15299 ± 10% +52.0% 23253 ± 19% sched_debug.cpu.ttwu_count.6
14342 ± 1% +47.0% 21090 ± 5% sched_debug.cpu.ttwu_count.avg
53630 ± 3% +47.9% 79315 ± 3% sched_debug.cpu.ttwu_count.max
11752 ± 5% +52.6% 17935 ± 9% sched_debug.cpu.ttwu_count.stddev
13481 ± 4% +23.5% 16655 ± 4% sched_debug.cpu.ttwu_local.0
3872 ± 9% -22.1% 3016 ± 9% sched_debug.cpu.ttwu_local.1
1972 ± 19% +62.3% 3200 ± 7% sched_debug.cpu.ttwu_local.10
1900 ± 8% +85.7% 3528 ± 11% sched_debug.cpu.ttwu_local.12
2000 ± 17% +71.8% 3436 ± 7% sched_debug.cpu.ttwu_local.14
3086 ± 6% +20.9% 3732 ± 8% sched_debug.cpu.ttwu_local.2
2968 ± 14% -21.6% 2326 ± 17% sched_debug.cpu.ttwu_local.3
2882 ± 9% -23.8% 2195 ± 18% sched_debug.cpu.ttwu_local.7
2057 ± 9% +61.1% 3313 ± 3% sched_debug.cpu.ttwu_local.8
13534 ± 4% +23.4% 16700 ± 4% sched_debug.cpu.ttwu_local.max
1344 ± 10% -18.5% 1095 ± 10% sched_debug.cpu.ttwu_local.min
2862 ± 4% +25.1% 3581 ± 5% sched_debug.cpu.ttwu_local.stddev
58515 ± 7% +50.9% 88309 ± 4% sched_debug.cpu_clk
56859 ± 8% +52.4% 86644 ± 5% sched_debug.ktime
58515 ± 7% +50.9% 88309 ± 4% sched_debug.sched_clk


lkp-ne04: Nehalem-EP
Memory: 12G





1.2e+06 ++----------------------------------------------------------------+
| |
1e+06 ++ O O O O O O O O O O |
O O O O O O |
| O O O O
800000 ++ |
| |
600000 ++ |
| |
400000 ++ |
| |
| |
200000 ++ |
| |
0 ++---------------------O------------------------------------------+




60000 ++------------------------------------------------------------------+
| |
50000 O+O O O O O O O O O O O O O O O O O O O O O O O O O O O
| |
| |
40000 ++ |
| |
30000 ++ |
| |
20000 ++ |
| |
| |
10000 ++ |
| |
0 ++---------------O--------------------------------------------------+




2.5e+09 ++----------------------------------------------------------------+
O O O O O O O O O O O O O O O O O O O O O O O O O O O O
| |
2e+09 ++ |
| |
| |
1.5e+09 ++ |
| |
1e+09 ++ |
| |
| |
5e+08 ++ |
| |
| |
0 ++---------------O------------------------------------------------+




50000 ++------------------------------------------------------------------+
45000 O+O O O O O O O O O O O O O O O O O O O O O O O O O O O
| |
40000 ++ |
35000 ++ |
| |
30000 ++ |
25000 ++ |
20000 ++ |
| |
15000 ++ |
10000 ++ |
| |
5000 ++ |
0 ++---------------O--------------------------------------------------+




160000 ++-----------------------------------------------------------------+
O O O O O O |
140000 ++ O O O O O O O O |
120000 ++O O O O O O O O O O O O O O
| |
100000 ++ |
| |
80000 ++ |
| |
60000 ++ |
40000 ++ |
| |
20000 ++ |
| |
0 ++---------------O-------------------------------------------------+


softirqs.BLOCK

25000 ++------------------------------------------------------------------+
| |
*.**.**.**.**.**. .**.**.**.**.* **.**.**.**.* **.**.* .**. *.|
20000 ++ ** * : : : : * * *
| : : : : : : |
| : : : : : : |
15000 ++ : : : : : : |
O OO OO OO O OO OO:OO OO OO OO OO OO:OO OO : : |
10000 ++ : : :: :: |
| :: :: :: |
| :: :: :: |
5000 ++ :: :: :: |
| : : : |
| : : : |
0 ++---------O--------*---------------*--------------*----------------+


cpuidle.C1-NHM.usage

25000 *+---**--------------------*-**-*--*------*-**-*--*-----------------+
|+ * **.**.**. : * : **.* * : **.**. |
| * ** *.**.* : : : : **.**.* .*
20000 ++ : : : : : : * |
| : : : : : : |
| : : : : : : |
15000 ++ : : : : : : |
| : : :: :: |
10000 O+OO OO OO O OO OO:OO OO OO OO OO OO:OO OO :: |
| :: :: :: |
| :: :: :: |
5000 ++ : : : |
| : : : |
| : : : |
0 ++---------O--------*---------------*--------------*----------------+


cpuidle.C1E-NHM.usage

40000 ++------------------------------------------------------------------+
*. *.**. *.**.**. .**.* .* .**.* .* |
35000 ++* * ** *.**.** * : **.** * : **.**.* |
30000 ++ : : : : : : *.**.**.*
| : : : : : : |
25000 ++ : : : : : : |
| : : : : : : |
20000 ++ : : : : : : |
O O OO O OO OO:OO OO OO OO OO OO:OO OO :: |
15000 ++OO O :: :: :: |
10000 ++ :: :: :: |
| : :: :: |
5000 ++ : : : |
| : : : |
0 ++---------O--------*---------------*--------------*----------------+


cpuidle.C3-NHM.time

7e+08 ++------------------------------------------------------------------+
| O O |
6e+08 O+OO OO O O O O |
| O OO OO OO O OO OO OO O OO |
5e+08 ++ |
*.**.**.* .**.**.** *.**.**. *.**.* **. *. *.**.* **. *.**.**.**.*
4e+08 ++ * : : * : : * * : : * |
| : : : : : : |
3e+08 ++ : : : : : : |
| : : : : : : |
2e+08 ++ :: :: :: |
| :: :: :: |
1e+08 ++ : : : |
| : : : |
0 ++---------O--------*---------------*--------------*----------------+


cpuidle.C6-NHM.time

1.8e+09 ++----------------------------------------------------------------+
| OO O OO OO OO OOO OO OO OO OO O OO |
1.6e+09 OO O O O |
1.4e+09 ++ |
| |
1.2e+09 ++ |
1e+09 **.**.**.**.**.**.* ***.**.**.**.* **.**.***.** *.**.**.**.**.**
| : : : : : : |
8e+08 ++ : : : : : : |
6e+08 ++ : : : : : : |
| :: :: : : |
4e+08 ++ :: :: :: |
2e+08 ++ :: :: : |
| : : : |
0 ++--------O--------*---------------*--------------*---------------+


turbostat.CPU_c6

60 ++---------------------------------------------------------------------+
| O |
50 O+OO OO OO OO OO O O OO OO O OO OO OO O OO |
*.**.**.**.*.**.**.* .**.**.*.**.** .*.**.**.**.* .**.*.**.**.**.*
| : * : * : * |
40 ++ : : : : : : |
| : : : : : : |
30 ++ : : : : : : |
| : : : : : : |
20 ++ : : : : : : |
| :: :: :: |
| :: :: :: |
10 ++ : : : |
| : : : |
0 ++---------O---------*----------------*---------------*----------------+


fsmark.files_per_sec

500 ++------------------------------------------------------------*--*----+
450 *+**.**.**.**.*.**.* **.**.*.**.**.* **.*.**.**.** *.*.**.* * **.*
| : : : : : : |
400 ++ : : : : : : |
350 ++ : : : : : : |
O OO OO OO O O OO OO:OO OO O OO OO OO:OO O O : : |
300 ++ : : : : : : |
250 ++ : : : : : : |
200 ++ :: :: :: |
| :: :: :: |
150 ++ :: :: :: |
100 ++ : : : |
| : : : |
50 ++ : : : |
0 ++---------O--------*----------------*---------------*----------------+


fsmark.time.elapsed_time

160 O+OO-OO-OO--O-O-OO-OO-OO-OO-O-OO-OO-OO-OO-O-O-------------------------+
| |
140 ++ |
120 ++ |
| .**. .**. .* .* *.* .*.**.**.* *. .**.**.** .*. *. |
100 *+** ** * * : * * : * * : * * **.**.**.*
| : : : : : : |
80 ++ : : : : : : |
| : : : : : : |
60 ++ :: :: : : |
40 ++ :: :: :: |
| :: :: :: |
20 ++ : : : |
| : : : |
0 ++---------O--------*----------------*---------------*----------------+


fsmark.time.elapsed_time.max

160 O+OO-OO-OO--O-O-OO-OO-OO-OO-O-OO-OO-OO-OO-O-O-------------------------+
| |
140 ++ |
120 ++ |
| .**. .**. .* .* *.* .*.**.**.* *. .**.**.** .*. *. |
100 *+** ** * * : * * : * * : * * **.**.**.*
| : : : : : : |
80 ++ : : : : : : |
| : : : : : : |
60 ++ :: :: : : |
40 ++ :: :: :: |
| :: :: :: |
20 ++ : : : |
| : : : |
0 ++---------O--------*----------------*---------------*----------------+


time.elapsed_time

160 O+OO-OO-OO--O-O-OO-OO-OO-OO-O-OO-OO-OO-OO-O-O-------------------------+
| |
140 ++ |
120 ++ |
| .**. .**. .* .* *.* .*.**.**.* *. .**.**.** .*. *. |
100 *+** ** * * : * * : * * : * * **.**.**.*
| : : : : : : |
80 ++ : : : : : : |
| : : : : : : |
60 ++ :: :: : : |
40 ++ :: :: :: |
| :: :: :: |
20 ++ : : : |
| : : : |
0 ++---------O--------*----------------*---------------*----------------+


time.elapsed_time.max

160 O+OO-OO-OO--O-O-OO-OO-OO-OO-O-OO-OO-OO-OO-O-O-------------------------+
| |
140 ++ |
120 ++ |
| .**. .**. .* .* *.* .*.**.**.* *. .**.**.** .*. *. |
100 *+** ** * * : * * : * * : * * **.**.**.*
| : : : : : : |
80 ++ : : : : : : |
| : : : : : : |
60 ++ :: :: : : |
40 ++ :: :: :: |
| :: :: :: |
20 ++ : : : |
| : : : |
0 ++---------O--------*----------------*---------------*----------------+


vmstat.io.bo

7000 ++-------------------------------------------------------------------+
| |
6000 *+**.**.**.**.**.** *.*.**.**.**.** *.**.**.*.**.* **.**.**.**.**.*
| : : : : : : |
5000 ++ : : : : : : |
| : : : : : : |
4000 O+OO OO OO O OO OO:OO O OO OO OO OO:OO OO O : : |
| : : : : : : |
3000 ++ : : : : :: |
| :: :: :: |
2000 ++ :: :: :: |
| : : :: |
1000 ++ : : : |
| : : : |
0 ++---------O--------*----------------*--------------*----------------+


proc-vmstat.numa_hit

500000 ++-----------------------------------------------------------------+
450000 OO OO OO O OO OO OO OO OO OO OO OO OO OO O |
| |
400000 **. *.* .**.**.**.* **.* .**.**.** *.**.**.**.** *.**.**. *. *
350000 ++ * * : : * : : : : **.* *|
| : : : : : : |
300000 ++ : : : : : : |
250000 ++ : : : : : : |
200000 ++ : : : : : : |
| :: :: :: |
150000 ++ :: :: :: |
100000 ++ :: :: :: |
| : : : |
50000 ++ : : : |
0 ++--------O--------*----------------*--------------*---------------+


proc-vmstat.numa_local

500000 ++-----------------------------------------------------------------+
450000 OO OO OO O OO OO OO OO OO OO OO OO OO OO O |
| |
400000 **. *.* .**.**.**.* **.* .**.**.** *.**.**.**.** *.**.**. *. *
350000 ++ * * : : * : : : : **.* *|
| : : : : : : |
300000 ++ : : : : : : |
250000 ++ : : : : : : |
200000 ++ : : : : : : |
| :: :: :: |
150000 ++ :: :: :: |
100000 ++ :: :: :: |
| : : : |
50000 ++ : : : |
0 ++--------O--------*----------------*--------------*---------------+


proc-vmstat.pgalloc_normal

450000 ++-----------------------------------------------------------------+
| |
400000 OO OO OO O OO OO OO OO OO OO OO OO OO OO O |
350000 ++ |
**.**.**.**.**.**.* **.**.**.**.** *.**.**.**.** *.**.**.**.**.**
300000 ++ : : : : : : |
250000 ++ : : : : : : |
| : : : : : : |
200000 ++ : : : : : : |
150000 ++ :: : : : : |
| :: :: :: |
100000 ++ :: :: :: |
50000 ++ : : : |
| : : : |
0 ++--------O--------*----------------*--------------*---------------+


proc-vmstat.pgfree

350000 ++-----------------------------------------------------------------+
| |
300000 OO OO OO O OO OO OO OO OO OO OO OO OO OO O |
| |
250000 ++ |
**.**.**.**.**.**.* **.**.**.**.** *.**.**.**.** *.**.**.**.**.**
200000 ++ : : : : : : |
| : : : : : : |
150000 ++ : : : : : : |
| : : : : : : |
100000 ++ :: :: :: |
| :: :: :: |
50000 ++ : : : |
| : : : |
0 ++--------O--------*----------------*--------------*---------------+


proc-vmstat.pgfault

350000 OO-OO-OO-O--OO-OO-OO-OO-OO-OO-OO-OO-OO-OO-O------------------------+
| |
300000 ++ |
| |
250000 **.**.**.**.**.**.* **.**.**.**.** *.**.**.**.** *.**.**.**.**.**
| : : : : : : |
200000 ++ : : : : : : |
| : : : : : : |
150000 ++ : : : : : : |
| :: : : : : |
100000 ++ :: :: :: |
| :: :: :: |
50000 ++ : : : |
| : : : |
0 ++--------O--------*----------------*--------------*---------------+


sched_debug.cpu.nr_load_updates.2

16000 ++------------------------------------------------------------------+
O O O OO OO |
14000 ++O O O O O OO O OO OO O OO O |
12000 ++ O O O O O |
| |
10000 ++ *. *. *. * * * |
|.* .**.* * * ** *.**.* + : *.* + : *.**.* + *.**.*
8000 *+ * : : * *.**.* * * *.**.* * * |
| : : : : : : |
6000 ++ : : : : : : |
4000 ++ : : : : : : |
| :: :: :: |
2000 ++ : :: :: |
| : : : |
0 ++---------O--------*---------------*--------------*----------------+


sched_debug.cpu.nr_load_updates.6

16000 ++------------------------------------------------------------------+
| O |
14000 O+ O O O OO O O O OO OO OO O O O |
12000 ++OO O O O O O O O |
| |
10000 ++ * |
|.* .**. *.**.**.** :+ *.* .* *.* *.* .* *.* *.**.**.**.**.*
8000 *+ * * : : * * *.* : * * *.* : * |
| : : : : : : |
6000 ++ : : : : : : |
4000 ++ :: :: :: |
| :: :: :: |
2000 ++ : :: :: |
| : : : |
0 ++---------O--------*---------------*--------------*----------------+


sched_debug.cpu.nr_switches.8

60000 ++------------------------------------------------------------------+
| O |
50000 ++ O |
| |
| OO O O O OO OO |
40000 O+ O O OO OO O O O OO OO |
| O O O |
30000 ++ |
| .* .* *.|
20000 *+**.**.**.**.**.** *.**.**.** :.* **.**.** :.* **.**.**.**.* *
| : : * : : * : : |
| : : : : : : |
10000 ++ :: :: :: |
| : : : |
0 ++---------O--------*---------------*--------------*----------------+


sched_debug.cpu.nr_load_updates.8

12000 ++------------------------------------------------------------------+
O O OO OO O OO O O O O O O O |
10000 ++O O O O O O O O O O OO |
| |
| |
8000 ++ .* * *. .* .* *.|
|.* .**. * *.**.*: *. : **. * *. * *.**. *.* *
6000 *+ * * : : * **.**.* * **.**.* * * |
| : : : : : : |
4000 ++ : : : : : : |
| : : : : : : |
| :: :: :: |
2000 ++ : :: :: |
| : : : |
0 ++---------O--------*---------------*--------------*----------------+


sched_debug.cpu.sched_count.8

80000 ++------------------------------------------------------------------+
| O |
70000 ++ |
60000 ++ |
| O |
50000 ++ O |
| O O O |
40000 O+O O OO O OO O OO O O O O OO OO |
| O O O |
30000 ++ *.|
20000 *+ *.**. *.**.**.** *.**.**. *.**.* * .**. *.**.* * .**. *.**. : *
| * * : : * : :* * : :* * * |
10000 ++ : : :: :: |
| : :: :: |
0 ++---------O--------*---------------*--------------*----------------+


sched_debug.cpu.sched_goidle.8

25000 ++-------------------O----------------------------------------------+
| O |
| |
20000 ++ |
| OO O O OO OO OO |
O O O OO OO OO O OO OO |
15000 ++ O O |
| |
10000 ++ .* .* *.|
*. *.**. .**.**.** *.**.**. * :.* .**. * :.* .**. *.**. : *
| * ** : : * * : ** * * : ** * * |
5000 ++ : : : : : : |
| :: :: :: |
| : :: :: |
0 ++---------O--------*---------------*--------------*----------------+


sched_debug.cpu.nr_load_updates.12

12000 ++-------------O-----------------------O----------------------------+
O OO O OO O O O O O OO OO O OO O O |
10000 ++ O O O O O O |
| |
| |
8000 ++ .**. *.* *. *. *.|
|.* .**. * * * *. *.* : * : **.**. .* *
6000 *+ * * : : * *.**.**.* * *.**.**.* * ** |
| : : : : : : |
4000 ++ : : : : : : |
| : : : : : : |
| :: :: :: |
2000 ++ : :: :: |
| : : : |
0 ++---------O--------*---------------*--------------*----------------+

[*] bisect-good sample
[O] bisect-bad sample

To reproduce:

git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: fsmark
default-monitors:
wait: activate-monitor
kmsg:
uptime:
iostat:
heartbeat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
cpufreq_governor: performance
default-watchdogs:
oom-killer:
watchdog:
commit: 7b51bf49f4825da09206c6d89e4aad5b4faa0a14
model: Nehalem-EP
memory: 12G
hdd_partitions: "/dev/disk/by-id/ata-ST3500514NS_9WJ03EBA-part3"
swap_partitions: "/dev/disk/by-id/ata-ST3120026AS_5MS07HA2-part2"
rootfs_partition: "/dev/disk/by-id/ata-ST3500514NS_9WJ03EBA-part1"
category: benchmark
iterations: 1x
nr_threads: 32t
disk: 1HDD
fs: f2fs
fs2:
fsmark:
filesize: 8K
test_size: 400M
sync_method: fsyncBeforeClose
nr_directories: 16d
nr_files_per_directory: 256fpd
queue: bisect
testbox: lkp-ne04
tbox_group: lkp-ne04
kconfig: x86_64-rhel
enqueue_time: 2016-02-20 15:23:41.150317439 +08:00
compiler: gcc-4.9
rootfs: debian-x86_64-2015-02-07.cgz
id: a83dad66c2d28d23d61f7db436e6861bf87bb018
user: lkp
head_commit: 40ab400dacd1bd3ffbdeb8aa078e2885bdd3cfd5
base_commit: 18558cae0272f8fd9647e69d3fec1565a7949865
branch: linux-devel/devel-hourly-2016021912
result_root: "/result/fsmark/performance-1x-32t-1HDD-f2fs-8K-400M-fsyncBeforeClose-16d-256fpd/lkp-ne04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/7b51bf49f4825da09206c6d89e4aad5b4faa0a14/0"
job_file: "/lkp/scheduled/lkp-ne04/bisect_fsmark-performance-1x-32t-1HDD-f2fs-8K-400M-fsyncBeforeClose-16d-256fpd-debian-x86_64-2015-02-07.cgz-x86_64-rhel-7b51bf49f4825da09206c6d89e4aad5b4faa0a14-20160220-49742-16x72kv-0.yaml"
nr_cpu: "$(nproc)"
max_uptime: 933.5200000000001
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- job=/lkp/scheduled/lkp-ne04/bisect_fsmark-performance-1x-32t-1HDD-f2fs-8K-400M-fsyncBeforeClose-16d-256fpd-debian-x86_64-2015-02-07.cgz-x86_64-rhel-7b51bf49f4825da09206c6d89e4aad5b4faa0a14-20160220-49742-16x72kv-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2016021912
- commit=7b51bf49f4825da09206c6d89e4aad5b4faa0a14
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/7b51bf49f4825da09206c6d89e4aad5b4faa0a14/vmlinuz-4.5.0-rc2-00306-g7b51bf4
- max_uptime=933
- RESULT_ROOT=/result/fsmark/performance-1x-32t-1HDD-f2fs-8K-400M-fsyncBeforeClose-16d-256fpd/lkp-ne04/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/7b51bf49f4825da09206c6d89e4aad5b4faa0a14/0
- LKP_SERVER=inn
- |2-


earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal

rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/7b51bf49f4825da09206c6d89e4aad5b4faa0a14/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs2.cgz,/lkp/benchmarks/fsmark.cgz"
linux_headers_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/7b51bf49f4825da09206c6d89e4aad5b4faa0a14/linux-headers.cgz"
repeat_to: 2
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/7b51bf49f4825da09206c6d89e4aad5b4faa0a14/vmlinuz-4.5.0-rc2-00306-g7b51bf4"
dequeue_time: 2016-02-20 15:32:56.765912672 +08:00
job_state: finished
loadavg: 30.80 13.82 5.29 2/251 5046
start_time: '1455953610'
end_time: '1455953766'
version: "/lkp/lkp/.src-20160219-224205"

Attachment: reproduce.sh
Description: Bourne shell script