[lkp] [x86/entry/64] 62c79204783: 5.9% aim7.jobs-per-min

From: Huang Ying
Date: Mon Jul 13 2015 - 22:35:58 EST


FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/entry
commit 62c79204783e188291d880f23d49c02d8c8f498b ("x86/entry/64: When returning via SYSRET, POP regs instead of using MOV")


=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/load/test:
lkp-a06/aim7/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4000/misc_rtns_1

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
108044 Â 0% +5.9% 114416 Â 0% aim7.jobs-per-min
223.70 Â 0% -5.6% 211.11 Â 0% aim7.time.elapsed_time
223.70 Â 0% -5.6% 211.11 Â 0% aim7.time.elapsed_time.max
2113772 Â 0% -83.5% 349283 Â 2% aim7.time.involuntary_context_switches
599.10 Â 0% -2.2% 585.90 Â 0% aim7.time.system_time
203.87 Â 1% -4.7% 194.25 Â 0% aim7.time.user_time
2113772 Â 0% -83.5% 349283 Â 2% time.involuntary_context_switches

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/load/test:
lkp-a06/aim7/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4000/new_raph

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
184357 Â 0% +10.8% 204299 Â 0% aim7.jobs-per-min
131.71 Â 0% -9.8% 118.82 Â 0% aim7.time.elapsed_time
131.71 Â 0% -9.8% 118.82 Â 0% aim7.time.elapsed_time.max
2193692 Â 0% -92.5% 164787 Â 0% aim7.time.involuntary_context_switches
18.86 Â 2% -68.5% 5.94 Â 4% aim7.time.system_time
435.63 Â 0% -2.7% 424.04 Â 0% aim7.time.user_time
58879 Â 2% -19.5% 47402 Â 2% aim7.time.voluntary_context_switches
131.71 Â 0% -9.8% 118.82 Â 0% time.elapsed_time
131.71 Â 0% -9.8% 118.82 Â 0% time.elapsed_time.max
2193692 Â 0% -92.5% 164787 Â 0% time.involuntary_context_switches
18.86 Â 2% -68.5% 5.94 Â 4% time.system_time
58879 Â 2% -19.5% 47402 Â 2% time.voluntary_context_switches

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/load/test:
lkp-a06/aim7/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/4000/pipe_cpy

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
240551 Â 0% +13.9% 273904 Â 2% aim7.jobs-per-min
101.42 Â 0% -12.2% 89.04 Â 2% aim7.time.elapsed_time
101.42 Â 0% -12.2% 89.04 Â 2% aim7.time.elapsed_time.max
1981819 Â 0% -93.0% 138430 Â 3% aim7.time.involuntary_context_switches
278.03 Â 0% -6.5% 259.97 Â 2% aim7.time.system_time
54936 Â 1% -19.6% 44147 Â 2% aim7.time.voluntary_context_switches
101.42 Â 0% -12.2% 89.04 Â 2% time.elapsed_time
101.42 Â 0% -12.2% 89.04 Â 2% time.elapsed_time.max
1981819 Â 0% -93.0% 138430 Â 3% time.involuntary_context_switches
54936 Â 1% -19.6% 44147 Â 2% time.voluntary_context_switches

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
lkp-sb02/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/btrfs/9B/400M/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
2554844 Â 1% -13.1% 2221395 Â 1% fsmark.app_overhead
223789 Â 0% -57.2% 95777 Â 6% fsmark.time.involuntary_context_switches
32.50 Â 1% +5.4% 34.25 Â 1% fsmark.time.percent_of_cpu_this_job_got
2118350 Â 0% +5.2% 2229178 Â 0% fsmark.time.voluntary_context_switches
223789 Â 0% -57.2% 95777 Â 6% time.involuntary_context_switches
39831 Â 0% -9.9% 35903 Â 1% softirqs.SCHED
59651 Â 1% -8.7% 54478 Â 0% softirqs.TIMER
64594 Â 0% -69.9% 19433 Â 0% vmstat.system.cs
25161 Â 0% -94.2% 1465 Â 3% vmstat.system.in
4366124 Â 0% -90.0% 434486 Â 4% cpuidle.C1-SNB.usage
17993557 Â 4% +17.6% 21163962 Â 3% cpuidle.C1E-SNB.time
42154 Â 3% +15.4% 48646 Â 6% cpuidle.C1E-SNB.usage
9261281 Â 5% +28.4% 11895838 Â 4% cpuidle.C3-SNB.time
6639 Â 2% +14.5% 7601 Â 3% cpuidle.C3-SNB.usage
9194 Â 2% -97.7% 208.25 Â 6% cpuidle.POLL.usage
11.44 Â 0% -20.0% 9.14 Â 0% turbostat.%Busy
333.25 Â 0% -21.0% 263.25 Â 1% turbostat.Avg_MHz
1.32 Â 5% +28.4% 1.70 Â 4% turbostat.CPU%c3
11.04 Â 1% -13.0% 9.61 Â 1% turbostat.CorWatt
1.47 Â 3% +26.7% 1.86 Â 4% turbostat.Pkg%pc2
1.19 Â 8% +58.6% 1.89 Â 5% turbostat.Pkg%pc3
19.86 Â 1% +35.2% 26.86 Â 1% turbostat.Pkg%pc6
14.61 Â 0% -9.9% 13.16 Â 1% turbostat.PkgWatt
2143346 Â 57% -100.0% 0.00 Â -1% latency_stats.avg.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
30524 Â 3% +20.3% 36728 Â 1% latency_stats.hits.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].btrfs_insert_delayed_items.[btrfs].btrfs_commit_inode_delayed_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync
18837 Â 4% +56.0% 29390 Â 7% latency_stats.hits.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
12096 Â 6% +46.5% 17718 Â 4% latency_stats.hits.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].drop_objectid_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
62486 Â 1% +19.6% 74731 Â 4% latency_stats.hits.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
52038 Â 2% +16.7% 60742 Â 3% latency_stats.hits.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].drop_objectid_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
3558104 Â 57% -100.0% 0.00 Â -1% latency_stats.max.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
4.111e+08 Â 57% -100.0% 0.00 Â -1% latency_stats.sum.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
1403516 Â 2% +31.1% 1840040 Â 2% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].btrfs_insert_delayed_items.[btrfs].btrfs_commit_inode_delayed_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync
3349730 Â 2% +19.6% 4005849 Â 2% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].btrfs_new_inode.[btrfs].btrfs_create.[btrfs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
178060 Â 6% +74.3% 310377 Â 11% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
1322666 Â 4% +27.8% 1690982 Â 1% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].insert_with_overflow.[btrfs].btrfs_insert_dir_item.[btrfs].btrfs_add_link.[btrfs].btrfs_create.[btrfs].vfs_create.path_openat.do_filp_open.do_sys_open
42794 Â 5% +63.1% 69810 Â 11% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_truncate_inode_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
122546 Â 7% +60.4% 196523 Â 7% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].drop_objectid_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
24192 Â 3% +29.8% 31401 Â 6% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
697815 Â 1% +11.1% 775306 Â 2% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_lookup_inode.[btrfs].__btrfs_update_delayed_inode.[btrfs].btrfs_commit_inode_delayed_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
566092 Â 2% +29.3% 732008 Â 7% latency_stats.sum.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].copy_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync
283356 Â 4% +33.1% 377256 Â 7% latency_stats.sum.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].btrfs_truncate_inode_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
525841 Â 2% +24.2% 653227 Â 4% latency_stats.sum.btrfs_tree_read_lock.[btrfs].btrfs_read_lock_root_node.[btrfs].btrfs_search_slot.[btrfs].drop_objectid_items.[btrfs].btrfs_log_inode.[btrfs].btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
759116 Â 2% -9.4% 687617 Â 1% latency_stats.sum.btrfs_tree_read_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_lookup_dir_item.[btrfs].btrfs_lookup_dentry.[btrfs].btrfs_lookup.[btrfs].lookup_real.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
12244 Â 8% -11.0% 10892 Â 3% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
8078 Â 6% +20.1% 9700 Â 5% sched_debug.cfs_rq[0]:/.exec_clock
12997 Â 8% +25.1% 16258 Â 2% sched_debug.cfs_rq[0]:/.min_vruntime
269.00 Â 8% -11.3% 238.50 Â 3% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
553.11 Â128% -403.8% -1680 Â-72% sched_debug.cfs_rq[2]:/.spread0
28.75 Â 35% -43.5% 16.25 Â 42% sched_debug.cfs_rq[3]:/.nr_spread_over
42568 Â 29% -52.2% 20355 Â 4% sched_debug.cpu#0.nr_load_updates
4438150 Â 51% -92.6% 328982 Â 4% sched_debug.cpu#0.nr_switches
4438484 Â 51% -92.6% 329332 Â 4% sched_debug.cpu#0.sched_count
2060550 Â 53% -96.5% 72130 Â 7% sched_debug.cpu#0.sched_goidle
2323810 Â 48% -87.5% 290944 Â 5% sched_debug.cpu#0.ttwu_count
2136144 Â 53% -97.7% 48220 Â 2% sched_debug.cpu#0.ttwu_local
1705455 Â133% -80.1% 339746 Â 5% sched_debug.cpu#1.nr_switches
1705520 Â133% -80.1% 339819 Â 5% sched_debug.cpu#1.sched_count
739354 Â148% -89.5% 77619 Â 11% sched_debug.cpu#1.sched_goidle
926939 Â122% -74.2% 238908 Â 6% sched_debug.cpu#1.ttwu_count
742260 Â155% -95.8% 31432 Â 1% sched_debug.cpu#1.ttwu_local
968.25 Â 11% -89.3% 104.00 Â102% sched_debug.cpu#2.nr_uninterruptible
63733 Â 9% +21.4% 77348 Â 12% sched_debug.cpu#2.sched_goidle
33525 Â 3% -13.7% 28932 Â 1% sched_debug.cpu#2.ttwu_local
1264 Â 17% -101.5% -18.50 Â-378% sched_debug.cpu#3.nr_uninterruptible

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/btrfs/9B/400M/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
3294133 Â 2% -6.2% 3088824 Â 2% fsmark.app_overhead
463365 Â 1% -51.7% 223905 Â 7% fsmark.time.involuntary_context_switches
140.50 Â 1% +8.2% 152.00 Â 1% fsmark.time.percent_of_cpu_this_job_got
213.09 Â 1% +7.2% 228.37 Â 1% fsmark.time.system_time
4278018 Â 1% +3.6% 4432123 Â 1% fsmark.time.voluntary_context_switches
463365 Â 1% -51.7% 223905 Â 7% time.involuntary_context_switches
7.75 Â 5% +18.4% 9.17 Â 0% turbostat.CPU%c6
5214507 Â 0% -70.1% 1561193 Â 2% cpuidle.C1-NHM.usage
23195 Â 5% -97.2% 641.50 Â 5% cpuidle.POLL.usage
96711 Â 1% -51.9% 46555 Â 0% vmstat.system.cs
30013 Â 1% -87.8% 3649 Â 3% vmstat.system.in
5154 Â 4% -10.8% 4599 Â 3% slabinfo.btrfs_extent_buffer.active_objs
5154 Â 4% -10.8% 4599 Â 3% slabinfo.btrfs_extent_buffer.num_objs
1674 Â 7% -13.2% 1453 Â 5% slabinfo.buffer_head.active_objs
1680 Â 7% -13.1% 1459 Â 5% slabinfo.buffer_head.num_objs
1145192 Â 70% -100.0% 0.00 Â -1% latency_stats.avg.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
137982 Â 2% +73.7% 239720 Â 75% latency_stats.hits.btrfs_tree_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].insert_with_overflow.[btrfs].btrfs_insert_dir_item.[btrfs].btrfs_add_link.[btrfs].btrfs_create.[btrfs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
1540569 Â 70% -100.0% 0.00 Â -1% latency_stats.max.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
1.741e+08 Â 70% -100.0% 0.00 Â -1% latency_stats.sum.btrfs_log_inode_parent.[btrfs].btrfs_log_dentry_safe.[btrfs].btrfs_sync_file.[btrfs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
2427124 Â 2% +60.9% 3906177 Â 78% latency_stats.sum.btrfs_tree_lock.[btrfs].btrfs_search_slot.[btrfs].btrfs_insert_empty_items.[btrfs].insert_with_overflow.[btrfs].btrfs_insert_dir_item.[btrfs].btrfs_add_link.[btrfs].btrfs_create.[btrfs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
47047 Â 9% +15.6% 54373 Â 2% sched_debug.cfs_rq[0]:/.min_vruntime
23933 Â 18% +24.7% 29834 Â 2% sched_debug.cfs_rq[0]:/.tg_load_avg
788.25 Â 74% +312.2% 3249 Â 37% sched_debug.cfs_rq[1]:/.blocked_load_avg
-297.49 Â-1471% +989.4% -3240 Â-11% sched_debug.cfs_rq[1]:/.spread0
23934 Â 18% +24.7% 29840 Â 2% sched_debug.cfs_rq[1]:/.tg_load_avg
815.00 Â 73% +303.0% 3284 Â 37% sched_debug.cfs_rq[1]:/.tg_load_contrib
42646 Â 8% +20.4% 51331 Â 2% sched_debug.cfs_rq[2]:/.min_vruntime
23848 Â 17% +23.9% 29555 Â 4% sched_debug.cfs_rq[2]:/.tg_load_avg
4349 Â 24% -41.6% 2541 Â 40% sched_debug.cfs_rq[3]:/.blocked_load_avg
119.25 Â 42% -86.6% 16.00 Â141% sched_debug.cfs_rq[3]:/.load
133.50 Â 32% -93.0% 9.33 Â141% sched_debug.cfs_rq[3]:/.runnable_load_avg
23820 Â 17% +23.6% 29446 Â 3% sched_debug.cfs_rq[3]:/.tg_load_avg
4486 Â 24% -41.7% 2617 Â 42% sched_debug.cfs_rq[3]:/.tg_load_contrib
23822 Â 17% +23.5% 29431 Â 3% sched_debug.cfs_rq[4]:/.tg_load_avg
43836 Â 2% +10.4% 48383 Â 2% sched_debug.cfs_rq[5]:/.min_vruntime
23750 Â 17% +23.9% 29419 Â 3% sched_debug.cfs_rq[5]:/.tg_load_avg
41692 Â 4% +15.9% 48332 Â 3% sched_debug.cfs_rq[6]:/.min_vruntime
23751 Â 17% +23.8% 29398 Â 3% sched_debug.cfs_rq[6]:/.tg_load_avg
42923 Â 6% +13.2% 48585 Â 3% sched_debug.cfs_rq[7]:/.min_vruntime
23749 Â 17% +23.7% 29373 Â 3% sched_debug.cfs_rq[7]:/.tg_load_avg
253.00 Â 69% -180.0% -202.50 Â-55% sched_debug.cpu#1.nr_uninterruptible
150.00 Â 43% -81.6% 27.67 Â141% sched_debug.cpu#3.cpu_load[0]
88.50 Â 32% -80.8% 17.00 Â136% sched_debug.cpu#3.cpu_load[1]
59.50 Â 30% -70.2% 17.75 Â 92% sched_debug.cpu#3.cpu_load[2]
46.50 Â 34% -53.2% 21.75 Â 55% sched_debug.cpu#3.cpu_load[3]
1208 Â 19% -91.8% 99.33 Â141% sched_debug.cpu#3.curr->pid
127.50 Â 49% -87.5% 16.00 Â141% sched_debug.cpu#3.load
1447 Â 34% -50.6% 715.00 Â 21% sched_debug.cpu#4.nr_uninterruptible
1029 Â 18% -38.7% 631.00 Â 10% sched_debug.cpu#5.nr_uninterruptible
1244821 Â 70% -70.6% 366365 Â 3% sched_debug.cpu#6.nr_switches
1136 Â 14% -60.6% 447.75 Â 15% sched_debug.cpu#6.nr_uninterruptible
1244958 Â 70% -70.6% 366498 Â 3% sched_debug.cpu#6.sched_count
845.25 Â 17% -30.1% 591.00 Â 7% sched_debug.cpu#7.nr_uninterruptible

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/f2fs/16MB/60G/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
98844 Â 2% -75.2% 24496 Â 1% fsmark.time.involuntary_context_switches
98844 Â 2% -75.2% 24496 Â 1% time.involuntary_context_switches
22613 Â 0% -78.7% 4810 Â 0% vmstat.system.cs
9686 Â 0% -93.5% 631.00 Â 2% vmstat.system.in
1.703e+08 Â 3% -21.2% 1.342e+08 Â 5% cpuidle.C1-NHM.time
4972587 Â 0% -88.7% 560540 Â 2% cpuidle.C1-NHM.usage
2092 Â 5% -91.8% 171.75 Â 6% cpuidle.POLL.usage
3.44 Â 0% -11.9% 3.04 Â 0% turbostat.%Busy
112.00 Â 0% -12.9% 97.50 Â 0% turbostat.Avg_MHz
12.01 Â 1% -15.8% 10.12 Â 3% turbostat.CPU%c1
42309 Â125% -92.6% 3129 Â 72% latency_stats.avg.wait_on_page_bit.f2fs_wait_on_page_writeback.[f2fs].f2fs_wait_on_page_writeback.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write
153873 Â159% -90.8% 14192 Â 51% latency_stats.max.alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
1458401 Â133% -69.6% 444016 Â 59% latency_stats.sum.alloc_nid.[f2fs].f2fs_new_inode.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
20021 Â 29% +6098.0% 1240911 Â137% latency_stats.sum.wait_on_page_bit.f2fs_wait_on_page_writeback.[f2fs].f2fs_wait_on_page_writeback.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
13943 Â132% -53.5% 6479 Â100% latency_stats.sum.wait_on_page_bit.find_data_page.[f2fs].f2fs_find_entry.[f2fs].f2fs_lookup.[f2fs].lookup_real.path_openat.do_filp_open.do_sys_open.SyS_open.entry_SYSCALL_64_fastpath
4129 Â 27% -34.1% 2721 Â 21% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
89.00 Â 27% -34.0% 58.75 Â 22% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
30.50 Â 36% -59.0% 12.50 Â 74% sched_debug.cfs_rq[3]:/.runnable_load_avg
3952 Â 34% -41.6% 2306 Â 39% sched_debug.cfs_rq[6]:/.avg->runnable_avg_sum
85.25 Â 34% -41.9% 49.50 Â 40% sched_debug.cfs_rq[6]:/.tg_runnable_contrib
3552 Â 36% -44.5% 1971 Â 23% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
207.00 Â 47% -72.9% 56.00 Â 93% sched_debug.cfs_rq[7]:/.load
76.50 Â 36% -45.1% 42.00 Â 23% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
3.67 Â 89% +445.5% 20.00 Â 23% sched_debug.cpu#0.cpu_load[0]
4.75 Â102% +163.2% 12.50 Â 6% sched_debug.cpu#0.cpu_load[1]
16.50 Â 53% -78.8% 3.50 Â140% sched_debug.cpu#1.cpu_load[1]
12.25 Â 49% -77.6% 2.75 Â 39% sched_debug.cpu#1.cpu_load[4]
33933 Â 41% -54.9% 15287 Â 1% sched_debug.cpu#2.nr_load_updates
3238629 Â 74% -96.6% 108909 Â 12% sched_debug.cpu#2.nr_switches
3238752 Â 74% -96.6% 109056 Â 12% sched_debug.cpu#2.sched_count
1570598 Â 75% -97.8% 35193 Â 18% sched_debug.cpu#2.sched_goidle
1582367 Â 76% -98.6% 21962 Â 2% sched_debug.cpu#2.ttwu_local
3380681 Â 73% -96.1% 132270 Â 13% sched_debug.cpu#6.nr_switches
3380800 Â 73% -96.1% 132424 Â 13% sched_debug.cpu#6.sched_count
1648415 Â 74% -97.0% 50035 Â 17% sched_debug.cpu#6.sched_goidle
203.25 Â 47% -72.4% 56.00 Â 93% sched_debug.cpu#7.load

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/f2fs/5K/400M/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
35663 Â 8% -44.4% 19828 Â 0% fsmark.time.involuntary_context_switches
13.00 Â 0% +7.7% 14.00 Â 0% fsmark.time.percent_of_cpu_this_job_got
645071 Â 0% +1.9% 657186 Â 0% fsmark.time.voluntary_context_switches
35663 Â 8% -44.4% 19828 Â 0% time.involuntary_context_switches
69916 Â 0% -83.1% 11789 Â 0% vmstat.system.cs
30442 Â 0% -96.3% 1137 Â 1% vmstat.system.in
3.49 Â 0% -37.3% 2.19 Â 0% turbostat.%Busy
110.50 Â 1% -42.8% 63.25 Â 0% turbostat.Avg_MHz
41.09 Â 1% -11.4% 36.40 Â 0% turbostat.CPU%c1
7.23 Â 4% +22.1% 8.83 Â 5% turbostat.CPU%c6
20352 Â 83% -59.3% 8281 Â 49% latency_stats.sum.call_rwsem_down_read_failed.f2fs_mkdir.[f2fs].vfs_mkdir.SyS_mkdir.entry_SYSCALL_64_fastpath
6985436 Â 1% +13.7% 7940904 Â 3% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write.SyS_write
3675763 Â 1% +12.9% 4149821 Â 4% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write
5179210 Â 1% +13.6% 5882087 Â 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].get_new_data_page.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open
412246 Â 0% +8.4% 446769 Â 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].write_data_page.[f2fs].do_write_data_page.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].write_cache_pages.f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs]
2.715e+08 Â 2% -11.1% 2.414e+08 Â 1% cpuidle.C1-NHM.time
4779213 Â 0% -93.2% 324049 Â 2% cpuidle.C1-NHM.usage
2.285e+08 Â 2% +13.2% 2.587e+08 Â 2% cpuidle.C6-NHM.time
105567 Â 1% +13.5% 119864 Â 2% cpuidle.C6-NHM.usage
31639 Â 11% -87.1% 4072 Â 57% cpuidle.POLL.time
8304 Â 10% -98.8% 101.00 Â 7% cpuidle.POLL.usage
-1817 Â -4% +37.1% -2491 Â-25% sched_debug.cfs_rq[3]:/.spread0
1313 Â 3% +12.7% 1480 Â 3% sched_debug.cfs_rq[5]:/.exec_clock
-1878 Â -5% +16.6% -2190 Â -1% sched_debug.cpu#0.nr_uninterruptible
375233 Â118% -90.3% 36371 Â 4% sched_debug.cpu#3.ttwu_count
503266 Â 30% +63.0% 820371 Â 8% sched_debug.cpu#5.avg_idle
15.50 Â 29% -59.7% 6.25 Â 70% sched_debug.cpu#5.cpu_load[3]
13.75 Â 18% -47.3% 7.25 Â 44% sched_debug.cpu#5.cpu_load[4]
743.00 Â 7% -18.3% 607.25 Â 5% sched_debug.cpu#5.nr_uninterruptible
774.25 Â 15% -21.1% 610.50 Â 6% sched_debug.cpu#6.nr_uninterruptible
23789 Â 58% -59.0% 9764 Â 2% sched_debug.cpu#7.nr_load_updates

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/iterations/nr_threads/disk/fs/filesize/test_size/sync_method/nr_directories/nr_files_per_directory:
nhm4/fsmark/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/1x/32t/1HDD/f2fs/8K/400M/fsyncBeforeClose/16d/256fpd

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
28399 Â 12% -57.0% 12208 Â 1% fsmark.time.involuntary_context_switches
12.50 Â 4% +12.0% 14.00 Â 0% fsmark.time.percent_of_cpu_this_job_got
400598 Â 0% +1.9% 408272 Â 0% fsmark.time.voluntary_context_switches
104101 Â 0% -88.9% 11556 Â 0% vmstat.system.cs
47821 Â 0% -97.6% 1125 Â 0% vmstat.system.in
27507 Â 3% -7.3% 25490 Â 3% meminfo.Active(anon)
27135 Â 3% -7.4% 25120 Â 3% meminfo.AnonPages
13576 Â 0% -11.3% 12048 Â 0% meminfo.Mapped
6878 Â 3% -7.4% 6370 Â 3% proc-vmstat.nr_active_anon
6796 Â 3% -7.5% 6288 Â 3% proc-vmstat.nr_anon_pages
3392 Â 0% -11.2% 3011 Â 0% proc-vmstat.nr_mapped
19880 Â 6% -17.3% 16446 Â 4% softirqs.RCU
17934 Â 2% -16.1% 15052 Â 4% softirqs.SCHED
29061 Â 3% -13.6% 25095 Â 6% softirqs.TIMER
28399 Â 12% -57.0% 12208 Â 1% time.involuntary_context_switches
11.65 Â 1% +12.1% 13.06 Â 0% time.system_time
0.49 Â 2% +20.7% 0.60 Â 3% time.user_time
1.821e+08 Â 1% -17.5% 1.502e+08 Â 2% cpuidle.C1-NHM.time
4650494 Â 0% -95.7% 199638 Â 1% cpuidle.C1-NHM.usage
1.391e+08 Â 1% +14.3% 1.59e+08 Â 1% cpuidle.C6-NHM.time
61904 Â 1% +20.1% 74370 Â 1% cpuidle.C6-NHM.usage
34499 Â 12% -90.5% 3270 Â113% cpuidle.POLL.time
8367 Â 16% -99.2% 64.00 Â 9% cpuidle.POLL.usage
4.26 Â 0% -48.8% 2.18 Â 0% turbostat.%Busy
139.00 Â 0% -54.7% 63.00 Â 0% turbostat.Avg_MHz
3255 Â 0% -11.1% 2892 Â 0% turbostat.Bzy_MHz
43.70 Â 1% -16.5% 36.48 Â 1% turbostat.CPU%c1
44.77 Â 1% +18.3% 52.95 Â 1% turbostat.CPU%c3
7.27 Â 2% +15.3% 8.38 Â 3% turbostat.CPU%c6
8144 Â 20% +112.7% 17320 Â 59% latency_stats.sum.call_rwsem_down_read_failed.f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
161602 Â 5% +27.7% 206366 Â 6% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
1263747 Â 3% +19.4% 1508845 Â 4% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].new_node_page.[f2fs].new_inode_page.[f2fs].init_inode_metadata.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
662165 Â 2% +17.5% 777739 Â 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].f2fs_convert_inline_inode.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write
3956363 Â 1% +22.3% 4840014 Â 1% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write.SyS_write
436691 Â 1% +15.4% 504042 Â 0% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].do_write_data_page.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].write_cache_pages.f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range
2101052 Â 1% +20.7% 2536800 Â 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].f2fs_write_begin.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write
2931560 Â 2% +21.5% 3562381 Â 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].f2fs_reserve_block.[f2fs].get_new_data_page.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open
882333 Â 1% +18.1% 1042172 Â 1% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].get_dnode_of_data.[f2fs].get_read_data_page.[f2fs].find_data_page.[f2fs].f2fs_find_entry.[f2fs].f2fs_lookup.[f2fs].lookup_real.path_openat.do_filp_open
86763 Â 3% +24.8% 108290 Â 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].update_inode_page.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
1293620 Â 2% +18.0% 1526759 Â 2% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].read_node_page.[f2fs].get_node_page.[f2fs].update_inode_page.[f2fs].f2fs_write_end.[f2fs].generic_perform_write.__generic_file_write_iter.generic_file_write_iter.f2fs_file_write_iter.[f2fs].__vfs_write.vfs_write
241754 Â 1% +14.7% 277276 Â 3% latency_stats.sum.call_rwsem_down_read_failed.get_node_info.[f2fs].write_data_page.[f2fs].do_write_data_page.[f2fs].f2fs_write_data_page.[f2fs].__f2fs_writepage.[f2fs].write_cache_pages.f2fs_write_data_pages.[f2fs].do_writepages.__filemap_fdatawrite_range.filemap_write_and_wait_range.f2fs_sync_file.[f2fs]
285314 Â 4% +31.5% 375080 Â 6% latency_stats.sum.call_rwsem_down_read_failed.need_dentry_mark.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
298289 Â 4% +36.2% 406142 Â 7% latency_stats.sum.call_rwsem_down_read_failed.need_inode_block_update.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
1885703 Â 2% +17.4% 2213119 Â 2% latency_stats.sum.call_rwsem_down_write_failed.get_node_info.[f2fs].new_node_page.[f2fs].new_inode_page.[f2fs].init_inode_metadata.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
198811 Â 3% +34.9% 268119 Â 3% latency_stats.sum.call_rwsem_down_write_failed.set_node_addr.[f2fs].f2fs_write_node_page.[f2fs].sync_node_pages.[f2fs].f2fs_sync_file.[f2fs].vfs_fsync_range.do_fsync.SyS_fsync.entry_SYSCALL_64_fastpath
910989 Â 2% +17.8% 1072972 Â 2% latency_stats.sum.call_rwsem_down_write_failed.set_node_addr.[f2fs].new_node_page.[f2fs].new_inode_page.[f2fs].init_inode_metadata.[f2fs].__f2fs_add_link.[f2fs].f2fs_create.[f2fs].vfs_create.path_openat.do_filp_open.do_sys_open.SyS_open
363528 Â 0% +3.4% 375985 Â 0% latency_stats.sum.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
9590 Â 9% -20.1% 7663 Â 5% sched_debug.cfs_rq[0]:/.avg->runnable_avg_sum
211.25 Â 9% -21.1% 166.75 Â 6% sched_debug.cfs_rq[0]:/.tg_runnable_contrib
7864 Â 14% -22.2% 6117 Â 10% sched_debug.cfs_rq[1]:/.avg->runnable_avg_sum
172.00 Â 13% -22.7% 133.00 Â 11% sched_debug.cfs_rq[1]:/.tg_runnable_contrib
6243 Â 10% +32.8% 8292 Â 12% sched_debug.cfs_rq[4]:/.avg->runnable_avg_sum
1910 Â 14% +19.0% 2273 Â 4% sched_debug.cfs_rq[4]:/.min_vruntime
136.50 Â 11% +32.8% 181.25 Â 13% sched_debug.cfs_rq[4]:/.tg_runnable_contrib
726.92 Â 6% +13.6% 825.51 Â 5% sched_debug.cfs_rq[6]:/.exec_clock
1844 Â 9% +17.6% 2169 Â 6% sched_debug.cfs_rq[6]:/.min_vruntime
8.00 Â 45% +209.4% 24.75 Â 11% sched_debug.cpu#0.cpu_load[3]
9.00 Â 22% +141.7% 21.75 Â 8% sched_debug.cpu#0.cpu_load[4]
-701.75 Â-39% +56.6% -1098 Â -2% sched_debug.cpu#0.nr_uninterruptible
546261 Â 90% -85.3% 80139 Â 3% sched_debug.cpu#0.ttwu_count
483716 Â101% -95.6% 21310 Â 2% sched_debug.cpu#0.ttwu_local
547901 Â166% -96.5% 19272 Â 14% sched_debug.cpu#3.ttwu_count
24.00 Â 59% -64.6% 8.50 Â 55% sched_debug.cpu#4.cpu_load[3]
20.50 Â 32% -57.3% 8.75 Â 38% sched_debug.cpu#4.cpu_load[4]
13541 Â 61% -62.1% 5134 Â 3% sched_debug.cpu#4.nr_load_updates
1533535 Â 99% -97.1% 43964 Â 29% sched_debug.cpu#4.nr_switches
1533560 Â 99% -97.1% 43984 Â 29% sched_debug.cpu#4.sched_count
760621 Â 99% -97.5% 19182 Â 34% sched_debug.cpu#4.sched_goidle
750180 Â102% -99.8% 1305 Â 5% sched_debug.cpu#4.ttwu_local
414.75 Â 6% -23.3% 318.25 Â 5% sched_debug.cpu#6.nr_uninterruptible
20.25 Â 30% -56.8% 8.75 Â 74% sched_debug.cpu#7.cpu_load[4]

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
lituya/ftq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%/20x/100000ss

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
7640 Â 1% +5.5% 8056 Â 0% ftq.counts
0.17 Â 11% -82.1% 0.03 Â 6% ftq.stddev
1806627 Â 4% -98.6% 25037 Â 32% ftq.time.involuntary_context_switches
1437 Â 2% +5.6% 1518 Â 0% ftq.time.percent_of_cpu_this_job_got
546.95 Â 0% +3.5% 566.01 Â 0% ftq.time.user_time
16653 Â 0% -13.2% 14452 Â 0% meminfo.Mapped
4163 Â 0% -13.3% 3611 Â 0% proc-vmstat.nr_mapped
9329 Â 4% -17.0% 7746 Â 9% softirqs.SCHED
1.20 Â 2% -52.7% 0.57 Â 12% turbostat.CPU%c1
330.20 Â 5% -7.2% 306.54 Â 4% uptime.idle
96844 Â 0% -97.0% 2925 Â 4% vmstat.system.cs
61693 Â 0% -76.0% 14816 Â 1% vmstat.system.in
1806627 Â 4% -98.6% 25037 Â 32% time.involuntary_context_switches
2.15 Â 8% -50.9% 1.06 Â 1% time.system_time
820.50 Â 33% +45.4% 1192 Â 3% time.voluntary_context_switches
1857689 Â 17% -85.0% 278914 Â 73% cpuidle.C1-HSW.time
110169 Â 31% -99.3% 786.25 Â 4% cpuidle.C1-HSW.usage
779.50 Â 69% +78.4% 1390 Â 4% cpuidle.C3-HSW.usage
2663 Â 41% -48.2% 1380 Â 1% cpuidle.C6-HSW.usage
193.50 Â 95% -99.6% 0.75 Â173% cpuidle.POLL.time
125.00 Â 94% -99.8% 0.25 Â173% cpuidle.POLL.usage
6.25 Â 17% -36.0% 4.00 Â 17% sched_debug.cfs_rq[0]:/.nr_spread_over
118.31 Â 19% +50.0% 177.51 Â 18% sched_debug.cfs_rq[11]:/.exec_clock
1410 Â 23% -32.8% 948.03 Â 23% sched_debug.cfs_rq[14]:/.min_vruntime
16706 Â 38% -72.2% 4641 Â 40% sched_debug.cfs_rq[15]:/.avg->runnable_avg_sum
367.50 Â 38% -72.2% 102.00 Â 41% sched_debug.cfs_rq[15]:/.tg_runnable_contrib
6553 Â 29% -34.8% 4269 Â 12% sched_debug.cfs_rq[2]:/.avg->runnable_avg_sum
143.25 Â 30% -35.1% 93.00 Â 12% sched_debug.cfs_rq[2]:/.tg_runnable_contrib
1304 Â 17% +43.7% 1873 Â 28% sched_debug.cfs_rq[3]:/.min_vruntime
3651 Â139% +234.7% 12223 Â 54% sched_debug.cfs_rq[6]:/.blocked_load_avg
3651 Â139% +240.9% 12449 Â 53% sched_debug.cfs_rq[6]:/.tg_load_contrib
1316 Â 70% +500.7% 7907 Â 51% sched_debug.cfs_rq[8]:/.blocked_load_avg
1330 Â 71% +494.4% 7907 Â 51% sched_debug.cfs_rq[8]:/.tg_load_contrib
111.43 Â 6% -14.1% 95.77 Â 7% sched_debug.cfs_rq[9]:/.exec_clock
238565 Â 59% +271.1% 885357 Â 15% sched_debug.cpu#0.avg_idle
5.75 Â 85% +152.2% 14.50 Â 55% sched_debug.cpu#0.cpu_load[3]
3.25 Â 95% +1215.4% 42.75 Â 78% sched_debug.cpu#1.cpu_load[1]
5.25 Â 78% -119.0% -1.00 Â-300% sched_debug.cpu#10.nr_uninterruptible
2.50 Â 34% +230.0% 8.25 Â 40% sched_debug.cpu#12.nr_uninterruptible
468.25 Â 12% -21.3% 368.50 Â 8% sched_debug.cpu#13.sched_goidle
1.25 Â131% -420.0% -4.00 Â-20% sched_debug.cpu#14.nr_uninterruptible
73.25 Â 4% +39.6% 102.25 Â 13% sched_debug.cpu#14.ttwu_local
-3.50 Â-76% -228.6% 4.50 Â 57% sched_debug.cpu#15.nr_uninterruptible
319447 Â 52% +169.7% 861647 Â 13% sched_debug.cpu#2.avg_idle
3181 Â 38% -65.7% 1090 Â 20% sched_debug.cpu#4.nr_load_updates
787.75 Â 15% -23.5% 602.50 Â 6% sched_debug.cpu#5.sched_goidle
4.25 Â 45% +494.1% 25.25 Â 41% sched_debug.cpu#8.cpu_load[3]
9.00 Â 54% +200.0% 27.00 Â 27% sched_debug.cpu#8.cpu_load[4]
2045 Â 34% -41.3% 1201 Â 14% sched_debug.cpu#8.nr_switches
2048 Â 33% -41.2% 1203 Â 14% sched_debug.cpu#8.sched_count
789.00 Â 24% -47.5% 414.25 Â 26% sched_debug.cpu#8.sched_goidle
20.59 Â 92% -74.0% 5.36 Â 12% sched_debug.rt_rq[0]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
lituya/ftq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/powersave/100%/20x/100000ss

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
7580 Â 0% +6.1% 8044 Â 0% ftq.counts
0.18 Â 7% -82.1% 0.03 Â 8% ftq.stddev
1761823 Â 3% -98.3% 29477 Â 38% ftq.time.involuntary_context_switches
1454 Â 1% +4.3% 1517 Â 0% ftq.time.percent_of_cpu_this_job_got
547.59 Â 0% +3.4% 566.04 Â 0% ftq.time.user_time
562.25 Â 23% +96.8% 1106 Â 5% ftq.time.voluntary_context_switches
16809 Â 0% -13.8% 14482 Â 0% meminfo.Mapped
4198 Â 0% -13.7% 3623 Â 0% proc-vmstat.nr_mapped
484.00 Â 6% +18.2% 572.00 Â 7% slabinfo.blkdev_requests.active_objs
484.00 Â 6% +18.2% 572.00 Â 7% slabinfo.blkdev_requests.num_objs
1.17 Â 2% -50.9% 0.57 Â 8% turbostat.CPU%c1
0.21 Â127% +215.5% 0.66 Â 33% turbostat.RAMWatt
96716 Â 0% -96.9% 2979 Â 10% vmstat.system.cs
61578 Â 0% -75.7% 14933 Â 1% vmstat.system.in
1761823 Â 3% -98.3% 29477 Â 38% time.involuntary_context_switches
1.52 Â 5% -28.3% 1.09 Â 2% time.system_time
562.25 Â 23% +96.8% 1106 Â 5% time.voluntary_context_switches
1633723 Â 55% -89.4% 172951 Â 3% cpuidle.C1-HSW.time
124268 Â 20% -99.4% 713.50 Â 8% cpuidle.C1-HSW.usage
104432 Â 22% +110.9% 220296 Â 63% cpuidle.C1E-HSW.time
552.75 Â 33% +33.6% 738.50 Â 3% cpuidle.C1E-HSW.usage
23.00 Â 43% -100.0% 0.00 Â 0% cpuidle.POLL.time
11.25 Â 51% -100.0% 0.00 Â 0% cpuidle.POLL.usage
49106 Â 3% +32.3% 64986 Â 8% sched_debug.cfs_rq[0]:/.tg_load_avg
47692 Â 6% +35.4% 64593 Â 8% sched_debug.cfs_rq[10]:/.tg_load_avg
47692 Â 6% +35.4% 64593 Â 8% sched_debug.cfs_rq[11]:/.tg_load_avg
47692 Â 6% +35.4% 64593 Â 8% sched_debug.cfs_rq[12]:/.tg_load_avg
47692 Â 6% +35.4% 64593 Â 8% sched_debug.cfs_rq[13]:/.tg_load_avg
860.75 Â116% +570.6% 5772 Â 87% sched_debug.cfs_rq[14]:/.blocked_load_avg
47692 Â 6% +35.4% 64593 Â 8% sched_debug.cfs_rq[14]:/.tg_load_avg
860.75 Â116% +617.8% 6178 Â 92% sched_debug.cfs_rq[14]:/.tg_load_contrib
2268 Â 20% -42.9% 1296 Â 24% sched_debug.cfs_rq[15]:/.min_vruntime
47692 Â 6% +35.4% 64593 Â 8% sched_debug.cfs_rq[15]:/.tg_load_avg
48510 Â 5% +34.0% 64986 Â 8% sched_debug.cfs_rq[1]:/.tg_load_avg
48510 Â 5% +33.9% 64934 Â 8% sched_debug.cfs_rq[2]:/.tg_load_avg
48510 Â 5% +33.9% 64934 Â 8% sched_debug.cfs_rq[3]:/.tg_load_avg
48510 Â 5% +33.9% 64934 Â 8% sched_debug.cfs_rq[4]:/.tg_load_avg
48494 Â 5% +33.9% 64916 Â 8% sched_debug.cfs_rq[5]:/.tg_load_avg
48494 Â 5% +33.9% 64916 Â 8% sched_debug.cfs_rq[6]:/.tg_load_avg
48455 Â 5% +33.9% 64905 Â 8% sched_debug.cfs_rq[7]:/.tg_load_avg
0.50 Â100% +300.0% 2.00 Â 0% sched_debug.cfs_rq[8]:/.nr_spread_over
48404 Â 5% +34.1% 64905 Â 8% sched_debug.cfs_rq[8]:/.tg_load_avg
47692 Â 6% +35.5% 64637 Â 8% sched_debug.cfs_rq[9]:/.tg_load_avg
12.00 Â 27% -52.1% 5.75 Â 43% sched_debug.cpu#1.nr_uninterruptible
347.00 Â 7% +42.0% 492.75 Â 20% sched_debug.cpu#10.sched_goidle
518.00 Â 8% +24.0% 642.50 Â 6% sched_debug.cpu#11.sched_goidle
9.25 Â 62% +278.4% 35.00 Â 67% sched_debug.cpu#12.cpu_load[4]
5.75 Â 74% -100.0% 0.00 Â 0% sched_debug.cpu#14.nr_uninterruptible
1.00 Â 70% +2800.0% 29.00 Â 88% sched_debug.cpu#15.cpu_load[2]
3.00 Â 91% +608.3% 21.25 Â 62% sched_debug.cpu#15.cpu_load[3]
4.00 Â117% +275.0% 15.00 Â 54% sched_debug.cpu#15.cpu_load[4]
3.00 Â131% +91.7% 5.75 Â125% sched_debug.cpu#2.nr_uninterruptible
2443 Â 35% -46.9% 1296 Â 14% sched_debug.cpu#3.nr_load_updates
6257 Â 21% -47.1% 3309 Â 36% sched_debug.cpu#3.nr_switches
6262 Â 21% -47.1% 3315 Â 36% sched_debug.cpu#3.sched_count
2859 Â 24% -54.1% 1313 Â 35% sched_debug.cpu#3.sched_goidle
3022 Â 19% -46.9% 1604 Â 19% sched_debug.cpu#3.ttwu_count
1491 Â 55% -78.3% 323.00 Â 69% sched_debug.cpu#3.ttwu_local
994307 Â 0% -27.9% 716731 Â 21% sched_debug.cpu#6.avg_idle

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
lituya/fwq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%/20x/100000ss

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
0.11 Â 3% -43.2% 0.06 Â 1% fwq.stddev
3230702 Â 1% -86.7% 430932 Â 3% fwq.time.involuntary_context_switches
159977 Â 11% -25.2% 119739 Â 0% latency_stats.sum.do_wait.SyS_wait4.entry_SYSCALL_64_fastpath
3230702 Â 1% -86.7% 430932 Â 3% time.involuntary_context_switches
1.28 Â 35% -32.7% 0.86 Â 2% time.system_time
0.21 Â 42% -70.6% 0.06 Â 17% turbostat.CPU%c1
0.73 Â 23% -31.1% 0.51 Â 15% turbostat.CPU%c6
16514 Â 0% -79.4% 3404 Â 2% vmstat.system.cs
23255 Â 0% -28.0% 16754 Â 0% vmstat.system.in
1081329 Â 53% -93.7% 67884 Â 33% cpuidle.C1-HSW.time
45064 Â 0% -99.4% 278.25 Â 22% cpuidle.C1-HSW.usage
64741983 Â 8% -39.7% 39054828 Â 14% cpuidle.C6-HSW.time
2214 Â 19% -45.3% 1211 Â 11% cpuidle.C6-HSW.usage
4.50 Â 74% -100.0% 0.00 Â -1% cpuidle.POLL.time
2.50 Â 60% -100.0% 0.00 Â -1% cpuidle.POLL.usage
152.75 Â127% +449.4% 839.25 Â 60% sched_debug.cfs_rq[10]:/.blocked_load_avg
204.75 Â 94% +339.8% 900.50 Â 56% sched_debug.cfs_rq[10]:/.tg_load_contrib
-73145 Â-135% -141.7% 30484 Â 48% sched_debug.cfs_rq[14]:/.spread0
51.50 Â 0% +14.1% 58.75 Â 9% sched_debug.cpu#0.cpu_load[1]
51.75 Â 1% +13.0% 58.50 Â 7% sched_debug.cpu#0.cpu_load[2]
51.75 Â 1% +11.1% 57.50 Â 6% sched_debug.cpu#0.cpu_load[3]
11.25 Â 9% -64.4% 4.00 Â 63% sched_debug.cpu#11.nr_uninterruptible
-7.50 Â-35% -153.3% 4.00 Â 93% sched_debug.cpu#2.nr_uninterruptible
1419 Â 26% +119.7% 3117 Â 22% sched_debug.cpu#3.sched_goidle
124974 Â 61% -72.4% 34432 Â108% sched_debug.cpu#5.nr_switches
4.50 Â 71% -177.8% -3.50 Â-123% sched_debug.cpu#5.nr_uninterruptible
125045 Â 61% -72.4% 34489 Â108% sched_debug.cpu#5.sched_count
47153 Â 75% -52.2% 22527 Â124% sched_debug.cpu#5.ttwu_count
33719 Â 76% -70.3% 10005 Â131% sched_debug.cpu#5.ttwu_local
53.75 Â 4% +18.1% 63.50 Â 18% sched_debug.cpu#7.cpu_load[1]
5925 Â 34% +483.7% 34588 Â 82% sched_debug.cpu#8.sched_count
56.25 Â 8% -10.2% 50.50 Â 0% sched_debug.cpu#9.cpu_load[1]
58.75 Â 10% -13.2% 51.00 Â 1% sched_debug.cpu#9.cpu_load[2]
61.50 Â 14% -16.3% 51.50 Â 2% sched_debug.cpu#9.cpu_load[3]
63.00 Â 17% -18.3% 51.50 Â 2% sched_debug.cpu#9.cpu_load[4]

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/iterations/samples:
lituya/fwq/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/powersave/100%/20x/100000ss

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
0.11 Â 2% -41.0% 0.06 Â 1% fwq.stddev
3249189 Â 1% -86.8% 428991 Â 8% fwq.time.involuntary_context_switches
0.19 Â 24% -58.1% 0.08 Â 10% turbostat.CPU%c1
3249189 Â 1% -86.8% 428991 Â 8% time.involuntary_context_switches
1.81 Â 12% -48.6% 0.93 Â 2% time.system_time
16527 Â 1% -79.2% 3430 Â 3% vmstat.system.cs
23258 Â 0% -28.0% 16746 Â 0% vmstat.system.in
41773 Â 2% -99.2% 318.75 Â 13% cpuidle.C1-HSW.usage
61756882 Â 15% -30.4% 43002599 Â 8% cpuidle.C6-HSW.time
2117 Â 9% -33.5% 1407 Â 3% cpuidle.C6-HSW.usage
66.00 Â 26% -21.6% 51.75 Â 5% sched_debug.cfs_rq[0]:/.load
0.75 Â 57% +633.3% 5.50 Â 58% sched_debug.cfs_rq[13]:/.nr_spread_over
181.50 Â139% +229.6% 598.25 Â 44% sched_debug.cfs_rq[15]:/.blocked_load_avg
239.00 Â104% +175.8% 659.25 Â 42% sched_debug.cfs_rq[15]:/.tg_load_contrib
893.00 Â 0% +14.5% 1022 Â 12% sched_debug.cfs_rq[1]:/.utilization_load_avg
-66095 Â-174% -144.5% 29384 Â 74% sched_debug.cfs_rq[4]:/.spread0
-48436 Â-144% -146.3% 22425 Â 36% sched_debug.cfs_rq[5]:/.spread0
66.00 Â 26% -21.6% 51.75 Â 5% sched_debug.cpu#0.load
807.00 Â 0% +7.3% 866.00 Â 6% sched_debug.cpu#11.curr->pid
4.25 Â 76% -84.3% 0.67 Â604% sched_debug.cpu#14.nr_uninterruptible
370327 Â158% -95.9% 15153 Â 80% sched_debug.cpu#15.nr_switches
370400 Â158% -95.9% 15239 Â 80% sched_debug.cpu#15.sched_count
186416 Â158% -97.0% 5614 Â 81% sched_debug.cpu#15.ttwu_count
-4.00 Â-81% -187.5% 3.50 Â 24% sched_debug.cpu#5.nr_uninterruptible
0.00 Â 71% +10603.7% 0.19 Â 13% sched_debug.rt_rq[2]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/runtime:
wsm/ku-latency/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/300s

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
1207 Â 4% -27.5% 876.00 Â 22% proc-vmstat.pgactivate
26029 Â 2% -21.2% 20524 Â 2% softirqs.SCHED
12.52 Â 5% -19.2% 10.12 Â 9% turbostat.CPU%c1
46.75 Â 2% -7.0% 43.50 Â 3% turbostat.CoreTmp
31128 Â 0% -92.1% 2449 Â 1% vmstat.system.cs
15692 Â 0% -92.3% 1214 Â 0% vmstat.system.in
5760097 Â125% -100.0% 0.00 Â -1% latency_stats.avg.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
5760097 Â125% -100.0% 0.00 Â -1% latency_stats.max.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
5760097 Â125% -100.0% 0.00 Â -1% latency_stats.sum.nfs_wait_on_request.nfs_updatepage.nfs_write_end.generic_perform_write.__generic_file_write_iter.generic_file_write_iter.nfs_file_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
55981055 Â 4% -65.3% 19428998 Â 21% cpuidle.C1-NHM.time
4373782 Â 0% -99.5% 22463 Â 9% cpuidle.C1-NHM.usage
312.50 Â 78% -72.3% 86.50 Â 14% cpuidle.C1E-NHM.usage
2690 Â 28% -37.2% 1689 Â 16% cpuidle.C3-NHM.usage
1071 Â 23% -80.9% 204.25 Â 63% cpuidle.POLL.usage
97.00 Â 45% -83.8% 15.67 Â 70% sched_debug.cfs_rq[10]:/.runnable_load_avg
425.75 Â 34% -74.4% 109.00 Â 70% sched_debug.cfs_rq[10]:/.utilization_load_avg
-6567 Â-65% -1590.9% 97909 Â167% sched_debug.cfs_rq[11]:/.spread0
85.25 Â 74% +2038.1% 1822 Â 58% sched_debug.cfs_rq[2]:/.blocked_load_avg
217733 Â 95% -57.1% 93407 Â164% sched_debug.cfs_rq[2]:/.min_vruntime
209.25 Â 66% +813.3% 1911 Â 58% sched_debug.cfs_rq[2]:/.tg_load_contrib
2123 Â 49% -68.8% 662.75 Â100% sched_debug.cfs_rq[4]:/.blocked_load_avg
86.00 Â 53% -100.0% 0.00 Â -1% sched_debug.cfs_rq[4]:/.load
58.50 Â 51% -100.0% 0.00 Â -1% sched_debug.cfs_rq[4]:/.runnable_load_avg
2197 Â 48% -69.5% 671.25 Â 98% sched_debug.cfs_rq[4]:/.tg_load_contrib
300.25 Â 48% -100.0% 0.00 Â -1% sched_debug.cfs_rq[4]:/.utilization_load_avg
3463 Â 55% -48.3% 1789 Â 4% sched_debug.cfs_rq[6]:/.min_vruntime
1227502 Â 99% -98.3% 20386 Â 42% sched_debug.cpu#0.ttwu_count
62332 Â 85% -77.9% 13783 Â 3% sched_debug.cpu#1.nr_load_updates
14318 Â 18% +37.4% 19675 Â 19% sched_debug.cpu#1.ttwu_count
4361 Â 11% -20.1% 3483 Â 7% sched_debug.cpu#1.ttwu_local
146.25 Â 33% -82.2% 26.00 Â 13% sched_debug.cpu#10.cpu_load[0]
79.00 Â 36% -81.4% 14.67 Â 21% sched_debug.cpu#10.cpu_load[1]
44.00 Â 36% -83.5% 7.25 Â 58% sched_debug.cpu#10.cpu_load[2]
25.50 Â 37% -77.5% 5.75 Â 33% sched_debug.cpu#10.cpu_load[3]
15.00 Â 41% -70.0% 4.50 Â 24% sched_debug.cpu#10.cpu_load[4]
89157 Â 69% -47.0% 47258 Â126% sched_debug.cpu#2.nr_load_updates
9.50 Â 33% -76.3% 2.25 Â148% sched_debug.cpu#4.cpu_load[3]
7.25 Â 36% -82.8% 1.25 Â131% sched_debug.cpu#4.cpu_load[4]
1030 Â 36% -100.0% 0.00 Â -1% sched_debug.cpu#4.curr->pid
86.00 Â 53% -100.0% 0.00 Â -1% sched_debug.cpu#4.load
850245 Â 5% +11.7% 949380 Â 3% sched_debug.cpu#5.avg_idle
3578 Â 8% -48.9% 1830 Â 64% sched_debug.cpu#5.ttwu_local
925565 Â 7% -17.7% 761616 Â 6% sched_debug.cpu#6.avg_idle
649243 Â 95% -98.0% 12905 Â 31% sched_debug.cpu#6.ttwu_count
2529 Â 49% -63.9% 911.75 Â 83% sched_debug.cpu#9.ttwu_local

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/runtime/nr_threads/cluster/test:
lkp-t410/netperf/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/300s/200%/cs-localhost/TCP_SENDFILE

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
4404 Â 0% +6.5% 4689 Â 0% netperf.Throughput_Mbps
1902815 Â 4% -98.0% 38389 Â 9% netperf.time.involuntary_context_switches
208.00 Â 0% +2.3% 212.75 Â 0% netperf.time.percent_of_cpu_this_job_got
610.08 Â 0% +1.6% 619.92 Â 0% netperf.time.system_time
291566 Â 0% +12.0% 326523 Â 0% netperf.time.voluntary_context_switches
1.03 Â 4% +10.4% 1.14 Â 3% perf-profile.cpu-cycles.rw_verify_area.do_splice_to.splice_direct_to_actor.do_splice_direct.do_sendfile
2545 Â 2% -17.3% 2103 Â 1% proc-vmstat.pgactivate
0.21 Â 3% -95.2% 0.01 Â 0% turbostat.CPU%c1
0.22 Â 5% +146.0% 0.54 Â 3% turbostat.CPU%c6
21522 Â 1% -76.5% 5056 Â 0% vmstat.system.cs
11704 Â 1% -62.7% 4363 Â 0% vmstat.system.in
1902815 Â 4% -98.0% 38389 Â 9% time.involuntary_context_switches
17.50 Â 0% +19.8% 20.97 Â 0% time.user_time
291566 Â 0% +12.0% 326523 Â 0% time.voluntary_context_switches
291365 Â 0% +12.0% 326363 Â 0% latency_stats.hits.sk_stream_wait_memory.tcp_sendpage.inet_sendpage.kernel_sendpage.sock_sendpage.pipe_to_sendpage.__splice_from_pipe.splice_from_pipe.generic_splice_sendpage.direct_splice_actor.splice_direct_to_actor.do_splice_direct
975206 Â 4% -61.5% 374990 Â 1% latency_stats.hits.sk_wait_data.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom.SyS_recvfrom.entry_SYSCALL_64_fastpath
52419094 Â 1% +7.2% 56170711 Â 0% latency_stats.sum.sk_stream_wait_memory.tcp_sendpage.inet_sendpage.kernel_sendpage.sock_sendpage.pipe_to_sendpage.__splice_from_pipe.splice_from_pipe.generic_splice_sendpage.direct_splice_actor.splice_direct_to_actor.do_splice_direct
1.009e+09 Â 0% +5.3% 1.062e+09 Â 0% latency_stats.sum.sk_wait_data.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom.SyS_recvfrom.entry_SYSCALL_64_fastpath
794782 Â 8% -99.4% 4498 Â 40% cpuidle.C1-NHM.time
104769 Â 2% -99.8% 166.25 Â 31% cpuidle.C1-NHM.usage
482872 Â 14% -32.6% 325328 Â 28% cpuidle.C3-NHM.time
4632646 Â 1% +44.6% 6697379 Â 1% cpuidle.C6-NHM.time
337.25 Â 8% +35.4% 456.75 Â 9% cpuidle.C6-NHM.usage
1156 Â 15% -100.0% 0.00 Â 0% cpuidle.POLL.time
180.50 Â 17% -100.0% 0.00 Â 0% cpuidle.POLL.usage
505.75 Â 19% -25.4% 377.25 Â 7% sched_debug.cfs_rq[0]:/.load
443.50 Â 5% -13.0% 386.00 Â 6% sched_debug.cfs_rq[0]:/.runnable_load_avg
6167 Â 13% +18.1% 7285 Â 16% sched_debug.cfs_rq[0]:/.tg_load_avg
6057 Â 13% +17.6% 7121 Â 15% sched_debug.cfs_rq[1]:/.tg_load_avg
431.50 Â 14% +228.2% 1416 Â 5% sched_debug.cfs_rq[2]:/.blocked_load_avg
33.75 Â 11% -18.5% 27.50 Â 11% sched_debug.cfs_rq[2]:/.nr_spread_over
818.50 Â 6% +118.3% 1786 Â 5% sched_debug.cfs_rq[2]:/.tg_load_contrib
301.25 Â 14% +38.5% 417.25 Â 16% sched_debug.cfs_rq[3]:/.load
5609 Â 10% +21.4% 6807 Â 16% sched_debug.cfs_rq[3]:/.tg_load_avg
607848 Â 32% +35.5% 823868 Â 6% sched_debug.cpu#0.avg_idle
435380 Â 40% -52.0% 208833 Â 35% sched_debug.cpu#0.nr_switches
435948 Â 40% -52.0% 209079 Â 35% sched_debug.cpu#0.sched_count
1118781 Â122% -82.2% 198660 Â 26% sched_debug.cpu#1.nr_switches
1119297 Â122% -82.2% 198883 Â 26% sched_debug.cpu#1.sched_count
649029 Â121% -77.9% 143430 Â 19% sched_debug.cpu#1.ttwu_count
594549 Â120% -77.7% 132447 Â 20% sched_debug.cpu#1.ttwu_local
273528 Â 94% +214.0% 858787 Â 11% sched_debug.cpu#2.avg_idle
2797014 Â 51% -92.5% 209149 Â 21% sched_debug.cpu#2.nr_switches
2797284 Â 51% -92.5% 209354 Â 21% sched_debug.cpu#2.sched_count
1635953 Â 51% -90.9% 148709 Â 15% sched_debug.cpu#2.ttwu_count
1457289 Â 50% -90.5% 137841 Â 15% sched_debug.cpu#2.ttwu_local
331.25 Â 11% +26.0% 417.50 Â 15% sched_debug.cpu#3.load
617159 Â 28% -70.0% 185105 Â 33% sched_debug.cpu#3.nr_switches
617564 Â 28% -70.0% 185318 Â 33% sched_debug.cpu#3.sched_count
271272 Â 20% -49.9% 135909 Â 21% sched_debug.cpu#3.ttwu_count
254215 Â 19% -50.3% 126227 Â 22% sched_debug.cpu#3.ttwu_local

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test/cluster:
wsm/netpipe/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/tcp/cs-localhost

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
9.39 Â 1% -7.6% 8.68 Â 1% netpipe.less_8K_usec.avg
17994 Â 3% -19.9% 14406 Â 5% softirqs.RCU
0.29 Â 4% -13.7% 0.25 Â 6% time.user_time
106713 Â 1% -67.9% 34294 Â 4% vmstat.system.cs
37772 Â 0% -95.5% 1693 Â 0% vmstat.system.in
87490550 Â 4% -54.1% 40120043 Â 2% cpuidle.C1-NHM.time
6115692 Â 1% -72.2% 1698584 Â 4% cpuidle.C1-NHM.usage
2583 Â 5% -93.4% 170.00 Â 23% cpuidle.POLL.usage
16.39 Â 3% -10.9% 14.59 Â 2% turbostat.%Busy
585.25 Â 2% -9.7% 528.25 Â 2% turbostat.Avg_MHz
26.28 Â 2% -25.9% 19.47 Â 2% turbostat.CPU%c1
56.33 Â 0% +14.6% 64.54 Â 1% turbostat.CPU%c6
2320 Â 6% -10.0% 2087 Â 2% sched_debug.cfs_rq[0]:/.tg->runnable_avg
2328 Â 5% -9.6% 2104 Â 2% sched_debug.cfs_rq[10]:/.tg->runnable_avg
2329 Â 5% -9.4% 2109 Â 2% sched_debug.cfs_rq[11]:/.tg->runnable_avg
8.50 Â 50% -64.7% 3.00 Â-33% sched_debug.cfs_rq[1]:/.nr_spread_over
3012 Â687% +900.1% 30128 Â106% sched_debug.cfs_rq[1]:/.spread0
2326 Â 6% -10.2% 2088 Â 2% sched_debug.cfs_rq[1]:/.tg->runnable_avg
2326 Â 6% -10.0% 2094 Â 2% sched_debug.cfs_rq[2]:/.tg->runnable_avg
2327 Â 6% -9.9% 2096 Â 2% sched_debug.cfs_rq[3]:/.tg->runnable_avg
2330 Â 6% -10.0% 2096 Â 2% sched_debug.cfs_rq[4]:/.tg->runnable_avg
2326 Â 6% -9.8% 2097 Â 2% sched_debug.cfs_rq[5]:/.tg->runnable_avg
2326 Â 6% -9.6% 2102 Â 2% sched_debug.cfs_rq[6]:/.tg->runnable_avg
34300 Â 99% -96.8% 1105 Â 16% sched_debug.cfs_rq[7]:/.min_vruntime
2326 Â 6% -9.6% 2102 Â 2% sched_debug.cfs_rq[7]:/.tg->runnable_avg
2326 Â 6% -9.6% 2103 Â 2% sched_debug.cfs_rq[8]:/.tg->runnable_avg
2328 Â 5% -9.7% 2103 Â 2% sched_debug.cfs_rq[9]:/.tg->runnable_avg
29.00 Â141% +330.2% 124.75 Â 65% sched_debug.cpu#2.cpu_load[1]
520151 Â141% -87.1% 66855 Â 50% sched_debug.cpu#2.ttwu_count
52023 Â 71% -88.3% 6101 Â 92% sched_debug.cpu#6.nr_switches
52035 Â 71% -88.3% 6112 Â 92% sched_debug.cpu#6.sched_count
25756 Â 72% -89.0% 2837 Â100% sched_debug.cpu#6.sched_goidle

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/group:
lkp-t410/piglit/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/igt-044

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
83.99 Â 0% -14.7% 71.66 Â 0% piglit.time.elapsed_time
83.99 Â 0% -14.7% 71.66 Â 0% piglit.time.elapsed_time.max
3034690 Â 0% -97.2% 85516 Â 2% piglit.time.involuntary_context_switches
352.25 Â 0% +8.9% 383.75 Â 0% piglit.time.percent_of_cpu_this_job_got
285.25 Â 0% -5.8% 268.60 Â 0% piglit.time.user_time
16.75 Â 13% -25.4% 12.50 Â 12% vmstat.procs.r
85019 Â 0% -86.1% 11835 Â 3% vmstat.system.cs
42758 Â 0% -88.2% 5029 Â 0% vmstat.system.in
83.99 Â 0% -14.7% 71.66 Â 0% time.elapsed_time
83.99 Â 0% -14.7% 71.66 Â 0% time.elapsed_time.max
3034690 Â 0% -97.2% 85516 Â 2% time.involuntary_context_switches
11.15 Â 0% -40.4% 6.65 Â 0% time.system_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/nr_threads/blocksize:
lkp-sb02/pigz/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/100%/512K

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
65777721 Â 0% +1.5% 66758347 Â 0% pigz.throughput
4731453 Â 0% -95.3% 223426 Â 2% pigz.time.involuntary_context_switches
17391 Â 1% +2.3% 17800 Â 0% pigz.time.minor_page_faults
394.00 Â 0% +1.0% 398.00 Â 0% pigz.time.percent_of_cpu_this_job_got
1170 Â 0% +1.3% 1185 Â 0% pigz.time.user_time
244236 Â 0% +2.8% 250954 Â 0% pigz.time.voluntary_context_switches
34446 Â 32% -99.6% 129.50 Â 60% latency_stats.sum.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
13004 Â 2% -6.2% 12197 Â 4% meminfo.AnonHugePages
4731453 Â 0% -95.3% 223426 Â 2% time.involuntary_context_switches
13.98 Â 0% -13.6% 12.07 Â 0% time.system_time
0.06 Â 9% -81.8% 0.01 Â 0% turbostat.CPU%c1
0.14 Â 3% +27.3% 0.18 Â 2% turbostat.CPU%c6
33869 Â 0% -89.4% 3581 Â 2% vmstat.system.cs
19774 Â 0% -76.7% 4609 Â 0% vmstat.system.in
591398 Â 9% -94.5% 32236 Â 4% cpuidle.C1-SNB.time
59726 Â 10% -97.7% 1370 Â 84% cpuidle.C1-SNB.usage
2880102 Â 1% +17.3% 3377630 Â 0% cpuidle.C6-SNB.time
76.75 Â 10% -100.0% 0.00 Â 0% cpuidle.POLL.time
43.25 Â 18% -100.0% 0.00 Â 0% cpuidle.POLL.usage
0.74 Â 33% -59.1% 0.30 Â 75% perf-profile.cpu-cycles.call_timer_fn.run_timer_softirq.__do_softirq.irq_exit.smp_apic_timer_interrupt
1.14 Â 72% -100.0% 0.00 Â -1% perf-profile.cpu-cycles.copy_process.part.27._do_fork.sys_clone.entry_SYSCALL_64_fastpath
0.00 Â -1% +Inf% 0.56 Â 56% perf-profile.cpu-cycles.copy_process.part.28._do_fork.sys_clone.entry_SYSCALL_64_fastpath
0.95 Â 21% +33.9% 1.27 Â 3% perf-profile.cpu-cycles.free_hot_cold_page.put_page.anon_pipe_buf_release.pipe_read.__vfs_read
0.51 Â 43% +243.1% 1.75 Â 34% perf-profile.cpu-cycles.kthread.ret_from_fork
0.26 Â 66% +316.3% 1.08 Â 39% perf-profile.cpu-cycles.rcu_nocb_kthread.kthread.ret_from_fork
0.51 Â 43% +243.1% 1.75 Â 34% perf-profile.cpu-cycles.ret_from_fork
198.25 Â 10% +26.1% 250.00 Â 9% sched_debug.cfs_rq[1]:/.runnable_load_avg
26484 Â 49% -107.5% -1995 Â-124% sched_debug.cfs_rq[2]:/.spread0
26370 Â 45% -105.4% -1415 Â-116% sched_debug.cfs_rq[3]:/.spread0
5582478 Â 33% -97.5% 142112 Â 16% sched_debug.cpu#0.nr_switches
5582876 Â 33% -97.4% 142504 Â 16% sched_debug.cpu#0.sched_count
40451 Â 39% -87.2% 5176 Â 26% sched_debug.cpu#0.sched_goidle
2803309 Â 33% -97.2% 77183 Â 14% sched_debug.cpu#0.ttwu_count
2781915 Â 33% -98.0% 55288 Â 16% sched_debug.cpu#0.ttwu_local
2354598 Â 80% -93.5% 153545 Â 7% sched_debug.cpu#1.nr_switches
2354718 Â 80% -93.5% 153683 Â 7% sched_debug.cpu#1.sched_count
1185125 Â 79% -93.2% 80529 Â 10% sched_debug.cpu#1.ttwu_count
1164849 Â 81% -94.8% 60184 Â 14% sched_debug.cpu#1.ttwu_local
3834 Â 21% -35.2% 2483 Â 8% sched_debug.cpu#2.sched_goidle

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/test:
nhm-white/unixbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/shell1

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
2494800 Â 0% -6.0% 2345617 Â 0% unixbench.time.involuntary_context_switches
1.085e+09 Â 0% -2.4% 1.059e+09 Â 0% latency_stats.sum.do_wait.SyS_wait4.entry_SYSCALL_64_fastpath
52548 Â 5% -33.6% 34875 Â 0% vmstat.system.cs
14831 Â 9% -59.3% 6034 Â 0% vmstat.system.in
47.37 Â 0% -2.0% 46.41 Â 0% turbostat.%Busy
1360 Â 0% -2.9% 1321 Â 0% turbostat.Avg_MHz
7.69 Â 0% +16.3% 8.94 Â 3% turbostat.CPU%c6
3.51 Â 0% +31.3% 4.61 Â 6% turbostat.Pkg%pc6
65744708 Â 2% -50.6% 32458256 Â 0% cpuidle.C1-NHM.time
3629185 Â 14% -88.0% 435580 Â 2% cpuidle.C1-NHM.usage
4.728e+08 Â 0% +13.3% 5.358e+08 Â 2% cpuidle.C6-NHM.time
602583 Â 0% +11.6% 672654 Â 0% cpuidle.C6-NHM.usage
141592 Â 20% -75.6% 34556 Â 25% cpuidle.POLL.time
14080 Â 14% -91.1% 1252 Â 2% cpuidle.POLL.usage
18338 Â 14% +16.4% 21343 Â 4% sched_debug.cfs_rq[2]:/.blocked_load_avg
18489 Â 14% +16.1% 21467 Â 4% sched_debug.cfs_rq[2]:/.tg_load_contrib
146020 Â 5% -7.3% 135370 Â 0% sched_debug.cpu#0.nr_load_updates
3000951 Â 78% -78.9% 633028 Â 2% sched_debug.cpu#0.nr_switches
3001380 Â 78% -78.9% 633584 Â 2% sched_debug.cpu#0.sched_count
1354868 Â 82% -82.8% 233428 Â 3% sched_debug.cpu#0.sched_goidle
1426794 Â 82% -83.9% 230146 Â 1% sched_debug.cpu#0.ttwu_count
1287294 Â 92% -93.0% 90490 Â 0% sched_debug.cpu#0.ttwu_local
7751 Â 16% +37.1% 10628 Â 6% sched_debug.cpu#1.curr->pid
685476 Â 64% -65.6% 235696 Â 4% sched_debug.cpu#1.ttwu_count
535746 Â 83% -83.4% 88784 Â 1% sched_debug.cpu#1.ttwu_local
69.00 Â 4% -15.9% 58.00 Â 13% sched_debug.cpu#2.cpu_load[2]
69.00 Â 3% -17.8% 56.75 Â 10% sched_debug.cpu#2.cpu_load[3]
68.25 Â 4% -17.2% 56.50 Â 10% sched_debug.cpu#2.cpu_load[4]
-69.25 Â-14% -33.2% -46.25 Â-20% sched_debug.cpu#3.nr_uninterruptible
7500 Â 19% +62.1% 12161 Â 17% sched_debug.cpu#4.curr->pid
219010 Â 3% -8.1% 201264 Â 3% sched_debug.cpu#4.sched_goidle
93751 Â 5% -7.3% 86883 Â 0% sched_debug.cpu#4.ttwu_local
137580 Â 4% -5.7% 129702 Â 0% sched_debug.cpu#5.nr_load_updates
2993388 Â 79% -80.6% 581607 Â 1% sched_debug.cpu#5.nr_switches
2993731 Â 79% -80.6% 581990 Â 1% sched_debug.cpu#5.sched_count
1373366 Â 84% -85.1% 204662 Â 1% sched_debug.cpu#5.sched_goidle
1296037 Â 93% -93.0% 90170 Â 0% sched_debug.cpu#5.ttwu_local
78.50 Â 28% -43.0% 44.75 Â 45% sched_debug.cpu#7.nr_uninterruptible
0.01 Â 59% +21923.0% 1.25 Â171% sched_debug.rt_rq[3]:/.rt_time
0.00 Â 58% +7898.3% 0.38 Â169% sched_debug.rt_rq[7]:/.rt_time

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/test:
nhm-white/unixbench/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/shell8

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
5856190 Â 1% -25.5% 4360557 Â 0% unixbench.time.involuntary_context_switches
5856190 Â 1% -25.5% 4360557 Â 0% time.involuntary_context_switches
3.14 Â 3% +29.0% 4.05 Â 2% turbostat.Pkg%pc6
61840 Â 2% -26.2% 45627 Â 0% vmstat.system.cs
18904 Â 3% -44.2% 10543 Â 0% vmstat.system.in
44596831 Â 2% -34.1% 29385365 Â 1% cpuidle.C1-NHM.time
1853539 Â 8% -87.6% 229126 Â 1% cpuidle.C1-NHM.usage
246889 Â 17% -90.1% 24414 Â 38% cpuidle.POLL.time
31710 Â 8% -96.8% 1025 Â 3% cpuidle.POLL.usage
24168 Â 12% +18.9% 28748 Â 4% sched_debug.cfs_rq[0]:/.blocked_load_avg
24325 Â 13% +18.8% 28899 Â 3% sched_debug.cfs_rq[0]:/.tg_load_contrib
91.50 Â 13% +31.7% 120.50 Â 21% sched_debug.cfs_rq[1]:/.runnable_load_avg
83.00 Â 7% +29.2% 107.25 Â 14% sched_debug.cfs_rq[5]:/.runnable_load_avg
70.75 Â 18% +31.1% 92.75 Â 11% sched_debug.cfs_rq[7]:/.runnable_load_avg
7677 Â 34% +46.0% 11210 Â 10% sched_debug.cpu#2.curr->pid
389892 Â 17% +27.3% 496226 Â 13% sched_debug.cpu#4.avg_idle
6249 Â 27% +87.1% 11693 Â 7% sched_debug.cpu#6.curr->pid

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
wsm/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/getppid1

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
18473072 Â 3% -11.3% 16379556 Â 0% will-it-scale.per_process_ops
18119608 Â 1% -9.5% 16402159 Â 0% will-it-scale.per_thread_ops
0.74 Â 7% +18.5% 0.87 Â 2% will-it-scale.scalability
628.18 Â 0% +4.4% 655.83 Â 0% will-it-scale.time.system_time
434.65 Â 0% -6.3% 407.30 Â 0% will-it-scale.time.user_time
22243 Â 4% -3.3% 21501 Â 4% meminfo.AnonPages
5560 Â 4% -3.3% 5374 Â 4% proc-vmstat.nr_anon_pages
23934 Â 4% -22.1% 18653 Â 3% softirqs.SCHED
18.03 Â 7% +14.8% 20.69 Â 0% turbostat.CPU%c6
1802 Â 2% +21.0% 2181 Â 4% slabinfo.kmalloc-512.active_objs
1802 Â 2% +22.4% 2206 Â 3% slabinfo.kmalloc-512.num_objs
30351 Â 0% -93.9% 1837 Â 0% vmstat.system.cs
21766 Â 0% -65.9% 7418 Â 0% vmstat.system.in
49070159 Â 12% -85.0% 7354447 Â 24% cpuidle.C1-NHM.time
4289833 Â 8% -97.5% 107531 Â 5% cpuidle.C1-NHM.usage
1606 Â 7% -27.1% 1171 Â 10% cpuidle.POLL.usage
21.60 Â 1% -22.8% 16.68 Â 0% perf-profile.cpu-cycles.entry_SYSCALL_64
30.96 Â 2% -22.8% 23.90 Â 0% perf-profile.cpu-cycles.entry_SYSCALL_64_after_swapgs
44.96 Â 2% +25.8% 56.58 Â 2% perf-profile.cpu-cycles.entry_SYSCALL_64_fastpath
9.80 Â 16% +156.6% 25.16 Â 2% perf-profile.cpu-cycles.pid_vnr.entry_SYSCALL_64_fastpath
6.40 Â 13% -50.6% 3.16 Â 4% perf-profile.cpu-cycles.pid_vnr.sys_getppid.entry_SYSCALL_64_fastpath
11.04 Â 7% +15.1% 12.70 Â 2% perf-profile.cpu-cycles.sys_getppid.entry_SYSCALL_64_fastpath
13943 Â 10% +25.4% 17491 Â 6% sched_debug.cfs_rq[0]:/.tg_load_avg
13886 Â 10% +25.1% 17373 Â 6% sched_debug.cfs_rq[10]:/.tg_load_avg
13888 Â 10% +24.9% 17352 Â 6% sched_debug.cfs_rq[11]:/.tg_load_avg
2.25 Â 19% +66.7% 3.75 Â 22% sched_debug.cfs_rq[1]:/.nr_spread_over
13949 Â 10% +25.1% 17452 Â 6% sched_debug.cfs_rq[1]:/.tg_load_avg
13949 Â 10% +25.1% 17456 Â 6% sched_debug.cfs_rq[2]:/.tg_load_avg
13948 Â 10% +25.1% 17447 Â 6% sched_debug.cfs_rq[3]:/.tg_load_avg
13929 Â 10% +25.2% 17441 Â 6% sched_debug.cfs_rq[4]:/.tg_load_avg
13931 Â 10% +24.9% 17406 Â 6% sched_debug.cfs_rq[5]:/.tg_load_avg
13929 Â 10% +24.8% 17379 Â 6% sched_debug.cfs_rq[6]:/.tg_load_avg
13910 Â 10% +24.9% 17379 Â 6% sched_debug.cfs_rq[7]:/.tg_load_avg
13890 Â 10% +25.1% 17371 Â 6% sched_debug.cfs_rq[8]:/.tg_load_avg
13888 Â 10% +25.1% 17370 Â 6% sched_debug.cfs_rq[9]:/.tg_load_avg
144.75 Â 52% -47.8% 75.50 Â 31% sched_debug.cpu#1.cpu_load[0]
128.25 Â 38% -43.3% 72.75 Â 14% sched_debug.cpu#1.cpu_load[1]
106.25 Â 25% -33.4% 70.75 Â 7% sched_debug.cpu#1.cpu_load[2]
90.25 Â 15% -23.0% 69.50 Â 4% sched_debug.cpu#1.cpu_load[3]
82.00 Â 7% -15.9% 69.00 Â 3% sched_debug.cpu#1.cpu_load[4]
1005269 Â166% -98.4% 16346 Â 25% sched_debug.cpu#1.nr_switches
1005304 Â166% -98.4% 16385 Â 25% sched_debug.cpu#1.sched_count
499712 Â168% -98.6% 6885 Â 37% sched_debug.cpu#1.sched_goidle
1.50 Â145% +83.3% 2.75 Â150% sched_debug.cpu#10.nr_uninterruptible
2154 Â 14% -38.3% 1330 Â 32% sched_debug.cpu#3.curr->pid
1562 Â 33% +40.0% 2187 Â 9% sched_debug.cpu#4.curr->pid
42.25 Â 2% +31.4% 55.50 Â 20% sched_debug.cpu#7.cpu_load[1]
42.75 Â 3% +19.3% 51.00 Â 12% sched_debug.cpu#7.cpu_load[2]
43.50 Â 5% +13.2% 49.25 Â 10% sched_debug.cpu#7.cpu_load[3]
441090 Â167% -99.5% 2153 Â 51% sched_debug.cpu#7.ttwu_count
72.75 Â 32% -39.9% 43.75 Â 9% sched_debug.cpu#8.cpu_load[0]

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
wsm/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/open1

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
801101 Â 4% +6.5% 853345 Â 0% will-it-scale.per_process_ops
0.56 Â 10% -11.5% 0.50 Â 0% will-it-scale.scalability
2004 Â100% +233.7% 6688 Â100% latency_stats.sum.wait_woken.inotify_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
40.09 Â 1% -9.6% 36.25 Â 0% time.user_time
357.00 Â 13% +32.4% 472.50 Â 7% slabinfo.mnt_cache.active_objs
357.00 Â 13% +32.4% 472.50 Â 7% slabinfo.mnt_cache.num_objs
90485 Â 2% -7.4% 83796 Â 0% softirqs.RCU
49819 Â 2% -10.7% 44472 Â 1% softirqs.SCHED
30306 Â 2% -91.1% 2685 Â 1% vmstat.system.cs
21928 Â 1% -63.7% 7960 Â 0% vmstat.system.in
41567129 Â 11% -90.4% 3994402 Â 30% cpuidle.C1-NHM.time
3844950 Â 16% -97.0% 115267 Â 3% cpuidle.C1-NHM.usage
16389120 Â 54% -56.8% 7074969 Â 15% cpuidle.C3-NHM.time
1491 Â 49% -91.1% 132.00 Â 17% cpuidle.POLL.usage
1.54 Â 3% -18.8% 1.26 Â 4% perf-profile.cpu-cycles.__call_rcu.call_rcu_sched.__fput.____fput.task_work_run
1.21 Â 5% -18.7% 0.98 Â 2% perf-profile.cpu-cycles.__fd_install.fd_install.do_sys_open.sys_open.entry_SYSCALL_64_fastpath
3.01 Â 4% +11.0% 3.34 Â 3% perf-profile.cpu-cycles.__inode_permission.inode_permission.may_open.path_openat.do_filp_open
1.81 Â 2% +11.1% 2.01 Â 3% perf-profile.cpu-cycles.__memset.get_empty_filp.path_openat.do_filp_open.do_sys_open
1.47 Â 2% -11.4% 1.31 Â 3% perf-profile.cpu-cycles.__slab_alloc.kmem_cache_alloc.get_empty_filp.path_openat.do_filp_open
3.61 Â 2% -10.2% 3.25 Â 1% perf-profile.cpu-cycles.call_rcu_sched.__fput.____fput.task_work_run.prepare_exit_to_usermode
10.03 Â 1% -7.9% 9.24 Â 1% perf-profile.cpu-cycles.do_dentry_open.vfs_open.path_openat.do_filp_open.do_sys_open
1.39 Â 4% -18.2% 1.14 Â 3% perf-profile.cpu-cycles.fd_install.do_sys_open.sys_open.entry_SYSCALL_64_fastpath
2.50 Â 2% +8.9% 2.72 Â 2% perf-profile.cpu-cycles.get_unused_fd_flags.do_sys_open.sys_open.entry_SYSCALL_64_fastpath
3.31 Â 5% +10.4% 3.66 Â 2% perf-profile.cpu-cycles.inode_permission.may_open.path_openat.do_filp_open.do_sys_open
1.15 Â 5% +9.8% 1.26 Â 3% perf-profile.cpu-cycles.kfree.selinux_file_free_security.security_file_free.__fput.____fput
2.14 Â 2% -13.6% 1.85 Â 2% perf-profile.cpu-cycles.kmem_cache_alloc_trace.selinux_file_alloc_security.security_file_alloc.get_empty_filp.path_openat
3.62 Â 2% -8.8% 3.30 Â 1% perf-profile.cpu-cycles.security_file_alloc.get_empty_filp.path_openat.do_filp_open.do_sys_open
1.45 Â 5% +11.9% 1.62 Â 3% perf-profile.cpu-cycles.security_file_free.__fput.____fput.task_work_run.prepare_exit_to_usermode
2.13 Â 6% +15.2% 2.46 Â 4% perf-profile.cpu-cycles.security_inode_permission.__inode_permission.inode_permission.may_open.path_openat
3.30 Â 2% -8.7% 3.01 Â 2% perf-profile.cpu-cycles.selinux_file_alloc_security.security_file_alloc.get_empty_filp.path_openat.do_filp_open
1.18 Â 5% +8.7% 1.29 Â 2% perf-profile.cpu-cycles.selinux_file_free_security.security_file_free.__fput.____fput.task_work_run
3.21 Â 3% +12.8% 3.63 Â 1% perf-profile.cpu-cycles.selinux_inode_permission.security_inode_permission.__inode_permission.inode_permission.link_path_walk
1.83 Â 7% +17.2% 2.15 Â 5% perf-profile.cpu-cycles.selinux_inode_permission.security_inode_permission.__inode_permission.inode_permission.may_open
10.79 Â 1% -9.4% 9.78 Â 1% perf-profile.cpu-cycles.vfs_open.path_openat.do_filp_open.do_sys_open.sys_open
1138 Â 30% +58.8% 1807 Â 9% sched_debug.cfs_rq[11]:/.blocked_load_avg
1199 Â 27% +57.1% 1884 Â 8% sched_debug.cfs_rq[11]:/.tg_load_contrib
3.00 Â 84% +233.3% 10.00 Â 7% sched_debug.cfs_rq[2]:/.nr_spread_over
30468 Â 13% -17.6% 25115 Â 3% sched_debug.cfs_rq[7]:/.avg->runnable_avg_sum
663.25 Â 13% -17.4% 547.75 Â 3% sched_debug.cfs_rq[7]:/.tg_runnable_contrib
3028 Â 59% -57.3% 1291 Â 56% sched_debug.cfs_rq[8]:/.blocked_load_avg
3.25 Â 50% +169.2% 8.75 Â 21% sched_debug.cfs_rq[8]:/.nr_spread_over
3092 Â 57% -56.7% 1340 Â 54% sched_debug.cfs_rq[8]:/.tg_load_contrib
109897 Â 9% -16.2% 92140 Â 3% sched_debug.cpu#0.nr_load_updates
97.75 Â 23% -32.7% 65.75 Â 14% sched_debug.cpu#1.cpu_load[0]
88.00 Â 10% -26.7% 64.50 Â 7% sched_debug.cpu#1.cpu_load[1]
2.00 Â 93% -325.0% -4.50 Â-114% sched_debug.cpu#1.nr_uninterruptible
62.25 Â 8% -12.9% 54.25 Â 13% sched_debug.cpu#11.cpu_load[4]
24550 Â 18% -31.1% 16923 Â 24% sched_debug.cpu#2.ttwu_count
12972 Â 9% -37.0% 8171 Â 30% sched_debug.cpu#2.ttwu_local
1038 Â 41% +92.4% 1998 Â 9% sched_debug.cpu#4.curr->pid
15880 Â 17% +19.8% 19022 Â 4% sched_debug.cpu#4.sched_goidle
1492076 Â 99% -99.4% 8330 Â 50% sched_debug.cpu#6.nr_switches
1492358 Â 99% -99.4% 8627 Â 48% sched_debug.cpu#6.sched_count
614310 Â100% -99.6% 2432 Â 64% sched_debug.cpu#6.sched_goidle
62.75 Â 16% -23.9% 47.75 Â 18% sched_debug.cpu#7.cpu_load[2]
59.50 Â 16% -22.7% 46.00 Â 12% sched_debug.cpu#7.cpu_load[3]
1.00 Â212% +775.0% 8.75 Â 28% sched_debug.cpu#8.nr_uninterruptible

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
wsm/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/pthread_mutex2

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
25290 Â 8% -28.3% 18121 Â 3% softirqs.SCHED
30175 Â 0% -94.6% 1620 Â 5% vmstat.system.cs
21733 Â 0% -66.0% 7399 Â 0% vmstat.system.in
50171812 Â 9% -80.7% 9701068 Â 55% cpuidle.C1-NHM.time
4178650 Â 12% -98.0% 83770 Â 12% cpuidle.C1-NHM.usage
1465 Â 6% -27.9% 1057 Â 17% cpuidle.POLL.usage
60.19 Â 0% -1.5% 59.27 Â 0% turbostat.%Busy
2118 Â 0% -1.6% 2085 Â 0% turbostat.Avg_MHz
17.82 Â 6% +15.7% 20.61 Â 0% turbostat.CPU%c6
0.07 Â133% +1864.3% 1.38 Â 73% perf-profile.cpu-cycles.__libc_start_main
0.07 Â133% +1814.3% 1.34 Â 75% perf-profile.cpu-cycles._start.main.__libc_start_main
1.72 Â110% -100.0% 0.00 Â -1% perf-profile.cpu-cycles.copy_process.part.27._do_fork.sys_clone.entry_SYSCALL_64_fastpath
0.07 Â133% +1814.3% 1.34 Â 75% perf-profile.cpu-cycles.main.__libc_start_main
807.25 Â 17% -26.5% 593.25 Â 24% sched_debug.cfs_rq[3]:/.utilization_load_avg
2069 Â 45% -80.7% 398.50 Â 40% sched_debug.cfs_rq[4]:/.blocked_load_avg
2162 Â 45% -78.0% 475.25 Â 38% sched_debug.cfs_rq[4]:/.tg_load_contrib
419067 Â169% -98.3% 7041 Â 4% sched_debug.cpu#0.sched_goidle
81.50 Â 3% -22.1% 63.50 Â 22% sched_debug.cpu#1.cpu_load[2]
16437 Â 46% -76.4% 3884 Â 57% sched_debug.cpu#10.ttwu_count
80953 Â 18% -24.0% 61501 Â 14% sched_debug.cpu#2.nr_load_updates
68.50 Â 15% -27.7% 49.50 Â 18% sched_debug.cpu#3.cpu_load[0]
67.25 Â 8% -15.2% 57.00 Â 12% sched_debug.cpu#4.cpu_load[3]
65.75 Â 8% -16.7% 54.75 Â 10% sched_debug.cpu#4.cpu_load[4]
39032 Â 14% -38.8% 23893 Â 40% sched_debug.cpu#4.nr_switches
39071 Â 14% -38.8% 23926 Â 40% sched_debug.cpu#4.sched_count
19248 Â 12% -42.4% 11089 Â 26% sched_debug.cpu#4.ttwu_count
9496 Â 6% -67.8% 3057 Â 30% sched_debug.cpu#4.ttwu_local

=========================================================================================
tbox_group/testcase/rootfs/kconfig/compiler/cpufreq_governor/test:
wsm/will-it-scale/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/performance/readseek3

commit:
cad8c9e5d6a97898d37b1a8e5cdf838d79ba6e50
62c79204783e188291d880f23d49c02d8c8f498b

cad8c9e5d6a97898 62c79204783e188291d880f23d
---------------- --------------------------
%stddev %change %stddev
\ | \
2439605 Â 0% +1.8% 2484631 Â 0% will-it-scale.per_process_ops
1842733 Â 1% +5.8% 1949535 Â 1% will-it-scale.per_thread_ops
0.54 Â 0% -1.8% 0.53 Â 1% will-it-scale.scalability
14706 Â 31% +52.6% 22440 Â 16% will-it-scale.time.involuntary_context_switches
136239 Â 2% +44.4% 196725 Â 3% latency_stats.hits.pipe_wait.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
7.40 Â 3% -9.7% 6.68 Â 2% perf-profile.cpu-cycles.rw_verify_area.vfs_read.sys_read.entry_SYSCALL_64_fastpath
25530 Â 1% -19.8% 20467 Â 1% softirqs.SCHED
14706 Â 31% +52.6% 22440 Â 16% time.involuntary_context_switches
30169 Â 0% -93.6% 1945 Â 2% vmstat.system.cs
21810 Â 0% -65.8% 7449 Â 0% vmstat.system.in
59.90 Â 0% -1.5% 59.02 Â 0% turbostat.%Busy
2108 Â 0% -1.5% 2076 Â 0% turbostat.Avg_MHz
17.43 Â 0% +17.6% 20.50 Â 0% turbostat.CPU%c6
46658026 Â 4% -77.3% 10588249 Â 14% cpuidle.C1-NHM.time
4461627 Â 0% -97.5% 112631 Â 5% cpuidle.C1-NHM.usage
135274 Â 85% +363.4% 626895 Â 80% cpuidle.C1E-NHM.time
103.25 Â 12% +71.4% 177.00 Â 20% cpuidle.C1E-NHM.usage
38945629 Â 6% -29.3% 27546659 Â 19% cpuidle.POLL.time
1523 Â 10% -26.2% 1124 Â 3% cpuidle.POLL.usage
14514 Â 6% +21.2% 17593 Â 6% sched_debug.cfs_rq[0]:/.tg_load_avg
-69524 Â-63% +63.0% -113345 Â-40% sched_debug.cfs_rq[10]:/.spread0
14486 Â 6% +20.9% 17508 Â 6% sched_debug.cfs_rq[10]:/.tg_load_avg
14468 Â 6% +21.0% 17508 Â 6% sched_debug.cfs_rq[11]:/.tg_load_avg
58.25 Â 7% +32.6% 77.25 Â 25% sched_debug.cfs_rq[1]:/.runnable_load_avg
14520 Â 6% +21.1% 17583 Â 6% sched_debug.cfs_rq[1]:/.tg_load_avg
14468 Â 6% +21.4% 17560 Â 6% sched_debug.cfs_rq[2]:/.tg_load_avg
14471 Â 6% +21.3% 17557 Â 6% sched_debug.cfs_rq[3]:/.tg_load_avg
14483 Â 6% +21.1% 17545 Â 6% sched_debug.cfs_rq[4]:/.tg_load_avg
14489 Â 6% +20.8% 17503 Â 6% sched_debug.cfs_rq[5]:/.tg_load_avg
14493 Â 6% +20.7% 17487 Â 6% sched_debug.cfs_rq[6]:/.tg_load_avg
52254 Â 84% -94.8% 2722 Â1818% sched_debug.cfs_rq[7]:/.spread0
14492 Â 6% +20.6% 17481 Â 6% sched_debug.cfs_rq[7]:/.tg_load_avg
14492 Â 6% +20.9% 17523 Â 6% sched_debug.cfs_rq[8]:/.tg_load_avg
-34317 Â-276% +228.6% -112775 Â-40% sched_debug.cfs_rq[9]:/.spread0
14486 Â 6% +20.9% 17508 Â 6% sched_debug.cfs_rq[9]:/.tg_load_avg
14454 Â 6% +32.2% 19105 Â 13% sched_debug.cpu#0.ttwu_count
11838 Â 20% -37.9% 7351 Â 30% sched_debug.cpu#1.ttwu_count
-6.75 Â-36% -96.3% -0.25 Â-1424% sched_debug.cpu#4.nr_uninterruptible
6151 Â 23% +113.3% 13119 Â 24% sched_debug.cpu#6.nr_switches
6185 Â 23% +112.6% 13147 Â 24% sched_debug.cpu#6.sched_count
1375 Â 29% +242.6% 4711 Â 35% sched_debug.cpu#6.sched_goidle
4277 Â 21% +88.5% 8063 Â 19% sched_debug.cpu#6.ttwu_count
2263 Â 12% -19.2% 1829 Â 0% sched_debug.cpu#9.curr->pid
72929 Â 11% -20.4% 58086 Â 0% sched_debug.cpu#9.nr_load_updates
356400 Â171% -99.6% 1252 Â 11% sched_debug.cpu#9.ttwu_local
0.05 Â100% +430.4% 0.28 Â 98% sched_debug.rt_rq[1]:/.rt_time


lkp-a06: Atom
Memory: 8G

lkp-sb02: Sandy Bridge-EP
Memory: 4G

nhm4: Nehalem
Memory: 4G

lituya: Grantley Haswell
Memory: 16G

wsm: Westmere
Memory: 6G

lkp-t410: Westmere
Memory: 2G

nhm-white: Nehalem
Memory: 6G


To reproduce:

git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: fsmark
default-monitors:
wait: activate-monitor
kmsg:
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
cpufreq_governor: performance
default-watchdogs:
oom-killer:
watchdog:
commit: ba19772fefe56fca1548d91218c3aeca8f207919
model: Sandy Bridge-EP
memory: 4G
hdd_partitions: "/dev/disk/by-id/ata-HDT722516DLA380_VDSD1DTCEKYAEJ-part2"
swap_partitions:
rootfs_partition: "/dev/disk/by-id/ata-HDT722516DLA380_VDSD1DTCEKYAEJ-part1"
category: benchmark
iterations: 1x
nr_threads: 32t
disk: 1HDD
fs: btrfs
fs2:
fsmark:
filesize: 9B
test_size: 400M
sync_method: fsyncBeforeClose
nr_directories: 16d
nr_files_per_directory: 256fpd
queue: cyclic
testbox: lkp-sb02
tbox_group: lkp-sb02
kconfig: x86_64-rhel
enqueue_time: 2015-07-08 12:46:57.152422017 +08:00
user: wfg
compiler: gcc-4.9
head_commit: ba19772fefe56fca1548d91218c3aeca8f207919
base_commit: d770e558e21961ad6cfdf0ff7df0eb5d7d4f0754
branch: linux-devel/devel-hourly-2015071021
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/vmlinuz-4.2.0-rc1-wl-00882-gba19772"
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/fsmark/performance-1x-32t-1HDD-btrfs-9B-400M-fsyncBeforeClose-16d-256fpd/lkp-sb02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/0"
job_file: "/lkp/scheduled/lkp-sb02/cyclic_fsmark-performance-1x-32t-1HDD-btrfs-9B-400M-fsyncBeforeClose-16d-256fpd-x86_64-rhel-CYCLIC_HEAD-ba19772fefe56fca1548d91218c3aeca8f207919-20150708-88967-m5o3fs-0.yaml"
dequeue_time: 2015-07-11 12:17:26.250837417 +08:00
nr_cpu: "$(nproc)"
max_uptime: 1063.0800000000002
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=wfg
- job=/lkp/scheduled/lkp-sb02/cyclic_fsmark-performance-1x-32t-1HDD-btrfs-9B-400M-fsyncBeforeClose-16d-256fpd-x86_64-rhel-CYCLIC_HEAD-ba19772fefe56fca1548d91218c3aeca8f207919-20150708-88967-m5o3fs-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2015071021
- commit=ba19772fefe56fca1548d91218c3aeca8f207919
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/vmlinuz-4.2.0-rc1-wl-00882-gba19772
- max_uptime=1063
- RESULT_ROOT=/result/fsmark/performance-1x-32t-1HDD-btrfs-9B-400M-fsyncBeforeClose-16d-256fpd/lkp-sb02/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/0
- LKP_SERVER=inn
- |2-


earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal

rw
lkp_initrd: "/lkp/wfg/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/ba19772fefe56fca1548d91218c3aeca8f207919/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/fs2.cgz,/lkp/benchmarks/fsmark.cgz"
job_state: finished
loadavg: 27.66 13.44 5.22 1/170 3028
start_time: '1436588285'
end_time: '1436588450'
version: "/lkp/wfg/.src-20150711-100505"
echo performance > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu2/cpufreq/scaling_governor
echo performance > /sys/devices/system/cpu/cpu3/cpufreq/scaling_governor
mkfs -t btrfs /dev/sda2
mount -t btrfs /dev/sda2 /fs/sda2
./fs_mark -d /fs/sda2/1 -d /fs/sda2/2 -d /fs/sda2/3 -d /fs/sda2/4 -d /fs/sda2/5 -d /fs/sda2/6 -d /fs/sda2/7 -d /fs/sda2/8 -d /fs/sda2/9 -d /fs/sda2/10 -d /fs/sda2/11 -d /fs/sda2/12 -d /fs/sda2/13 -d /fs/sda2/14 -d /fs/sda2/15 -d /fs/sda2/16 -d /fs/sda2/17 -d /fs/sda2/18 -d /fs/sda2/19 -d /fs/sda2/20 -d /fs/sda2/21 -d /fs/sda2/22 -d /fs/sda2/23 -d /fs/sda2/24 -d /fs/sda2/25 -d /fs/sda2/26 -d /fs/sda2/27 -d /fs/sda2/28 -d /fs/sda2/29 -d /fs/sda2/30 -d /fs/sda2/31 -d /fs/sda2/32 -D 16 -N 256 -n 3200 -L 1 -S 1 -s 9