[lkp] [mm/vmpressure.c] 3c1da7beee: No primary result change, 278.5% vm-scalability.time.involuntary_context_switches

From: kernel test robot
Date: Sun Feb 14 2016 - 01:25:04 EST


FYI, we noticed the below changes on

https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit 3c1da7beeee02560cd0f0c66c5a59fce3c6746e3 ("mm/vmpressure.c: fix subtree pressure detection")


=========================================================================================
compiler/cpufreq_governor/kconfig/rootfs/runtime/tbox_group/test/testcase:
gcc-4.9/performance/x86_64-rhel/debian-x86_64-2015-02-07.cgz/300s/ivb43/lru-file-mmap-read/vm-scalability

commit:
30bdbb78009e67767983085e302bec6d97afc679
3c1da7beeee02560cd0f0c66c5a59fce3c6746e3

30bdbb78009e6776 3c1da7beeee02560cd0f0c66c5
---------------- --------------------------
%stddev %change %stddev
\ | \
193661 ± 1% +278.5% 733007 ± 1% vm-scalability.time.involuntary_context_switches
906499 ± 2% +18.1% 1070404 ± 1% softirqs.RCU
193661 ± 1% +278.5% 733007 ± 1% time.involuntary_context_switches
4216 ± 3% +86.5% 7863 ± 1% vmstat.system.cs
0.74 ± 85% -80.1% 0.15 ±113% perf-profile.cycles-pp.__alloc_pages_nodemask.alloc_pages_current.__page_cache_alloc.__do_page_cache_readahead.filemap_fault
0.74 ± 85% -80.1% 0.15 ±113% perf-profile.cycles-pp.__page_cache_alloc.__do_page_cache_readahead.filemap_fault.xfs_filemap_fault.__do_fault
0.74 ± 85% -80.1% 0.15 ±113% perf-profile.cycles-pp.alloc_pages_current.__page_cache_alloc.__do_page_cache_readahead.filemap_fault.xfs_filemap_fault
1378 ± 5% -9.8% 1242 ± 9% slabinfo.file_lock_cache.active_objs
1378 ± 5% -9.8% 1242 ± 9% slabinfo.file_lock_cache.num_objs
14388 ± 3% -7.8% 13262 ± 7% slabinfo.kmalloc-512.num_objs
16441 ± 75% -100.0% 0.00 ± -1% latency_stats.avg.down.console_lock.do_con_write.con_write.n_tty_write.tty_write.redirected_tty_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
15932 ± 45% +233.0% 53047 ± 43% latency_stats.avg.pipe_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
16991 ± 74% -100.0% 0.00 ± -1% latency_stats.max.down.console_lock.do_con_write.con_write.n_tty_write.tty_write.redirected_tty_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
189128 ± 86% -72.1% 52770 ± 63% latency_stats.max.pipe_read.__vfs_read.vfs_read.SyS_read.entry_SYSCALL_64_fastpath
36438 ± 58% +417.4% 188546 ±112% latency_stats.max.pipe_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
7680 ±102% -90.3% 741.25 ± 17% latency_stats.max.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_run_open_task.[nfsv4]._nfs4_open_and_get_state.[nfsv4].nfs4_do_open.[nfsv4].nfs4_atomic_open.[nfsv4].nfs4_file_open.[nfsv4].do_dentry_open.vfs_open.path_openat.do_filp_open.do_sys_open
0.00 ± -1% +Inf% 20319 ±100% latency_stats.sum.call_rwsem_down_read_failed.page_lock_anon_vma_read.rmap_walk.try_to_unmap.migrate_pages.migrate_misplaced_page.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
0.00 ± -1% +Inf% 20492 ± 98% latency_stats.sum.call_rwsem_down_read_failed.rmap_walk.remove_migration_ptes.migrate_pages.migrate_misplaced_page.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
22837 ± 72% -100.0% 0.00 ± -1% latency_stats.sum.down.console_lock.do_con_write.con_write.n_tty_write.tty_write.redirected_tty_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
0.00 ± -1% +Inf% 5388 ±106% latency_stats.sum.wait_on_page_bit.__migration_entry_wait.migration_entry_wait.do_swap_page.handle_mm_fault.__do_page_fault.do_page_fault.page_fault
17.00 ± 10% +30.9% 22.25 ± 27% sched_debug.cfs_rq:/.load.18
18.00 ± 11% +56.9% 28.25 ± 24% sched_debug.cfs_rq:/.load_avg.13
15.50 ± 9% +88.7% 29.25 ± 32% sched_debug.cfs_rq:/.load_avg.44
2.00 ±-50% +25.0% 2.50 ± 66% sched_debug.cfs_rq:/.nr_spread_over.13
2.50 ±100% +690.0% 19.75 ±100% sched_debug.cfs_rq:/.nr_spread_over.7
98.25 ± 82% -83.5% 16.25 ± 2% sched_debug.cfs_rq:/.runnable_load_avg.35
7118 ±387% +637.5% 52502 ± 57% sched_debug.cfs_rq:/.spread0.16
-275108 ±-167% -132.9% 90504 ± 43% sched_debug.cfs_rq:/.spread0.27
-524687 ±-161% -118.7% 98148 ± 49% sched_debug.cfs_rq:/.spread0.35
46300 ± 50% +97.7% 91531 ± 40% sched_debug.cfs_rq:/.spread0.39
72286 ± 17% +51.4% 109469 ± 21% sched_debug.cfs_rq:/.spread0.40
913.75 ± 6% -7.6% 844.00 ± 0% sched_debug.cfs_rq:/.util_avg.0
19.75 ± 9% -17.7% 16.25 ± 2% sched_debug.cpu.cpu_load[0].0
98.25 ± 82% -83.7% 16.00 ± 0% sched_debug.cpu.cpu_load[0].35
19.75 ± 9% -16.5% 16.50 ± 3% sched_debug.cpu.cpu_load[1].0
98.25 ± 82% -83.7% 16.00 ± 0% sched_debug.cpu.cpu_load[1].35
97.75 ± 82% -83.6% 16.00 ± 0% sched_debug.cpu.cpu_load[2].35
19.50 ± 10% -16.7% 16.25 ± 2% sched_debug.cpu.cpu_load[3].0
97.25 ± 82% -83.5% 16.00 ± 0% sched_debug.cpu.cpu_load[3].35
102.50 ± 83% -84.4% 16.00 ± 0% sched_debug.cpu.cpu_load[4].35
1425 ± 7% -9.9% 1283 ± 2% sched_debug.cpu.curr->pid.0
1198 ± 12% +45.3% 1740 ± 25% sched_debug.cpu.curr->pid.11
0.00 ± 8% -13.5% 0.00 ± 3% sched_debug.cpu.next_balance.stddev
9927 ± 35% +355.5% 45218 ± 48% sched_debug.cpu.nr_switches.10
12682 ± 62% +140.1% 30446 ± 43% sched_debug.cpu.nr_switches.15
10994 ± 37% +157.0% 28251 ± 55% sched_debug.cpu.nr_switches.16
12178 ± 91% +233.5% 40619 ± 47% sched_debug.cpu.nr_switches.19
5602 ± 17% +266.9% 20553 ± 14% sched_debug.cpu.nr_switches.22
10024 ± 50% +135.1% 23568 ± 45% sched_debug.cpu.nr_switches.23
8392 ± 44% +132.9% 19546 ± 39% sched_debug.cpu.nr_switches.26
10036 ± 63% +227.2% 32837 ± 56% sched_debug.cpu.nr_switches.27
9508 ± 65% +114.9% 20434 ± 32% sched_debug.cpu.nr_switches.30
8428 ± 79% +219.0% 26886 ± 46% sched_debug.cpu.nr_switches.36
9121 ± 41% +138.2% 21724 ± 32% sched_debug.cpu.nr_switches.37
9108 ± 86% +220.0% 29147 ± 51% sched_debug.cpu.nr_switches.41
8646 ± 56% +106.7% 17867 ± 25% sched_debug.cpu.nr_switches.42
10638 ± 93% +113.8% 22749 ± 30% sched_debug.cpu.nr_switches.43
11113 ± 62% +106.3% 22927 ± 24% sched_debug.cpu.nr_switches.44
7673 ± 56% +171.6% 20841 ± 14% sched_debug.cpu.nr_switches.45
9427 ± 56% +154.6% 24003 ± 39% sched_debug.cpu.nr_switches.6
8095 ± 49% +268.0% 29790 ± 36% sched_debug.cpu.nr_switches.7
8992 ± 35% +334.1% 39034 ± 52% sched_debug.cpu.nr_switches.8
14331 ± 5% +76.4% 25278 ± 1% sched_debug.cpu.nr_switches.avg
1926 ± 11% +563.7% 12786 ± 3% sched_debug.cpu.nr_switches.min
-29.75 ±-29% -68.9% -9.25 ±-74% sched_debug.cpu.nr_uninterruptible.1
1.75 ± 74% -185.7% -1.50 ±-152% sched_debug.cpu.nr_uninterruptible.17
2.00 ± 93% -212.5% -2.25 ±-57% sched_debug.cpu.nr_uninterruptible.25
5.00 ± 28% -60.0% 2.00 ± 70% sched_debug.cpu.nr_uninterruptible.39
5.00 ± 14% -100.0% 0.00 ± 3% sched_debug.cpu.nr_uninterruptible.42
-0.25 ±-714% +1800.0% -4.75 ±-52% sched_debug.cpu.nr_uninterruptible.8
7.00 ± 8% -28.7% 4.99 ± 28% sched_debug.cpu.nr_uninterruptible.stddev
11319 ± 34% +307.9% 46174 ± 46% sched_debug.cpu.sched_count.10
13423 ± 58% +144.3% 32793 ± 47% sched_debug.cpu.sched_count.15
11721 ± 34% +148.0% 29069 ± 52% sched_debug.cpu.sched_count.16
6279 ± 15% +242.8% 21525 ± 15% sched_debug.cpu.sched_count.22
12086 ± 33% +126.7% 27396 ± 34% sched_debug.cpu.sched_count.23
9054 ± 69% +101.6% 18255 ± 13% sched_debug.cpu.sched_count.28
10732 ± 52% +93.1% 20720 ± 31% sched_debug.cpu.sched_count.30
9223 ± 57% +140.3% 22163 ± 45% sched_debug.cpu.sched_count.33
9052 ± 74% +200.7% 27217 ± 45% sched_debug.cpu.sched_count.36
9726 ± 39% +126.4% 22023 ± 31% sched_debug.cpu.sched_count.37
11611 ± 96% +153.2% 29399 ± 50% sched_debug.cpu.sched_count.41
9206 ± 52% +97.5% 18177 ± 25% sched_debug.cpu.sched_count.42
11171 ± 88% +110.1% 23466 ± 32% sched_debug.cpu.sched_count.43
11767 ± 58% +104.0% 24000 ± 24% sched_debug.cpu.sched_count.44
8230 ± 53% +156.8% 21133 ± 13% sched_debug.cpu.sched_count.45
9179 ± 45% +230.1% 30298 ± 36% sched_debug.cpu.sched_count.7
10854 ± 21% +264.5% 39568 ± 51% sched_debug.cpu.sched_count.8
107080 ± 3% +11.8% 119744 ± 0% sched_debug.cpu.sched_count.avg
2510 ± 8% +428.6% 13273 ± 4% sched_debug.cpu.sched_count.min
558.75 ± 17% +38.7% 774.75 ± 24% sched_debug.cpu.sched_goidle.17
479.50 ± 30% +90.0% 911.25 ± 34% sched_debug.cpu.sched_goidle.22
236.50 ± 23% -26.4% 174.00 ± 16% sched_debug.cpu.sched_goidle.28
397.75 ± 17% -44.8% 219.50 ± 16% sched_debug.cpu.sched_goidle.29
1191 ± 68% -50.6% 588.50 ± 10% sched_debug.cpu.sched_goidle.3
612.75 ± 94% -71.6% 174.25 ± 18% sched_debug.cpu.sched_goidle.35
642.25 ± 12% -26.0% 475.25 ± 14% sched_debug.cpu.sched_goidle.6
5010 ± 34% +356.2% 22857 ± 48% sched_debug.cpu.ttwu_count.10
5491 ± 36% +153.7% 13931 ± 57% sched_debug.cpu.ttwu_count.16
5673 ± 96% +260.9% 20475 ± 49% sched_debug.cpu.ttwu_count.19
6418 ± 67% +217.4% 20374 ± 30% sched_debug.cpu.ttwu_count.21
2930 ± 22% +261.2% 10582 ± 10% sched_debug.cpu.ttwu_count.22
5068 ± 49% +126.3% 11470 ± 45% sched_debug.cpu.ttwu_count.23
4517 ± 75% +271.6% 16787 ± 54% sched_debug.cpu.ttwu_count.27
4272 ± 64% +99.3% 8514 ± 18% sched_debug.cpu.ttwu_count.28
5269 ± 57% +95.2% 10285 ± 31% sched_debug.cpu.ttwu_count.30
4064 ± 84% +246.0% 14063 ± 49% sched_debug.cpu.ttwu_count.36
5228 ± 34% +114.1% 11196 ± 34% sched_debug.cpu.ttwu_count.37
5493 ± 66% +174.7% 15088 ± 50% sched_debug.cpu.ttwu_count.41
4911 ± 47% +81.5% 8917 ± 24% sched_debug.cpu.ttwu_count.42
5411 ± 92% +116.6% 11720 ± 25% sched_debug.cpu.ttwu_count.43
6081 ± 57% +103.1% 12350 ± 24% sched_debug.cpu.ttwu_count.44
3662 ± 55% +178.7% 10205 ± 14% sched_debug.cpu.ttwu_count.45
4798 ± 60% +153.3% 12155 ± 39% sched_debug.cpu.ttwu_count.6
3983 ± 32% +283.8% 15290 ± 32% sched_debug.cpu.ttwu_count.7
4638 ± 39% +312.2% 19120 ± 51% sched_debug.cpu.ttwu_count.8
7331 ± 5% +75.8% 12885 ± 1% sched_debug.cpu.ttwu_count.avg
885.92 ± 8% +624.4% 6417 ± 3% sched_debug.cpu.ttwu_count.min
4298 ± 39% +411.6% 21987 ± 49% sched_debug.cpu.ttwu_local.10
5488 ± 71% +164.2% 14498 ± 46% sched_debug.cpu.ttwu_local.15
4367 ± 39% +197.5% 12994 ± 62% sched_debug.cpu.ttwu_local.16
4650 ±113% +307.6% 18953 ± 53% sched_debug.cpu.ttwu_local.19
5618 ± 76% +239.1% 19051 ± 33% sched_debug.cpu.ttwu_local.21
2151 ± 26% +326.4% 9175 ± 11% sched_debug.cpu.ttwu_local.22
3923 ± 59% +168.4% 10531 ± 47% sched_debug.cpu.ttwu_local.23
4148 ± 49% +133.2% 9673 ± 39% sched_debug.cpu.ttwu_local.26
4081 ± 81% +295.7% 16152 ± 56% sched_debug.cpu.ttwu_local.27
3699 ± 75% +114.3% 7929 ± 19% sched_debug.cpu.ttwu_local.28
3528 ± 93% +266.9% 12942 ± 49% sched_debug.cpu.ttwu_local.36
4522 ± 36% +132.3% 10507 ± 32% sched_debug.cpu.ttwu_local.37
4322 ± 91% +229.3% 14233 ± 51% sched_debug.cpu.ttwu_local.41
3951 ± 55% +105.9% 8136 ± 20% sched_debug.cpu.ttwu_local.42
4658 ±107% +127.5% 10596 ± 28% sched_debug.cpu.ttwu_local.43
5376 ± 62% +112.7% 11437 ± 24% sched_debug.cpu.ttwu_local.44
2935 ± 54% +223.0% 9481 ± 17% sched_debug.cpu.ttwu_local.45
3848 ± 74% +191.2% 11208 ± 44% sched_debug.cpu.ttwu_local.6
2776 ± 58% +394.2% 13718 ± 37% sched_debug.cpu.ttwu_local.7
3609 ± 47% +407.6% 18320 ± 54% sched_debug.cpu.ttwu_local.8
6411 ± 6% +85.8% 11911 ± 1% sched_debug.cpu.ttwu_local.avg
521.25 ± 19% +1052.5% 6007 ± 3% sched_debug.cpu.ttwu_local.min

=========================================================================================
compiler/cpufreq_governor/kconfig/rootfs/runtime/tbox_group/test/testcase:
gcc-4.9/performance/x86_64-rhel/debian-x86_64-2015-02-07.cgz/300s/lkp-hsw-ep2/lru-file-readonce/vm-scalability

commit:
30bdbb78009e67767983085e302bec6d97afc679
3c1da7beeee02560cd0f0c66c5a59fce3c6746e3

30bdbb78009e6776 3c1da7beeee02560cd0f0c66c5
---------------- --------------------------
%stddev %change %stddev
\ | \
77598 ± 1% +257.0% 276990 ± 1% vm-scalability.time.involuntary_context_switches
6834637 ± 33% +94.8% 13311724 ± 31% cpuidle.C1E-HSW.time
24793 ± 42% -48.6% 12753 ± 6% latency_stats.sum.rpc_wait_bit_killable.__rpc_wait_for_completion_task.nfs4_do_close.[nfsv4].__nfs4_close.[nfsv4].nfs4_close_sync.[nfsv4].nfs4_close_context.[nfsv4].__put_nfs_open_context.nfs_file_clear_open_context.nfs_file_release.__fput.____fput.task_work_run
16965 ± 3% +14.6% 19434 ± 7% slabinfo.kmalloc-256.active_objs
77598 ± 1% +257.0% 276990 ± 1% time.involuntary_context_switches
0.53 ± 9% -65.1% 0.18 ± 68% turbostat.Pkg%pc6
4315 ± 3% +59.7% 6894 ± 3% vmstat.system.cs
11.86 ± 9% +19.3% 14.14 ± 9% perf-profile.cycles-pp._raw_spin_lock_irq.shrink_inactive_list.shrink_zone_memcg.shrink_zone.do_try_to_free_pages
11.92 ± 10% +19.2% 14.21 ± 9% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irq.shrink_inactive_list.shrink_zone_memcg.shrink_zone
22522 ± 8% -15.3% 19071 ± 11% numa-meminfo.node0.AnonPages
16685 ± 10% -24.0% 12686 ± 17% numa-meminfo.node0.Mapped
57137 ±124% +149.2% 142408 ± 50% numa-meminfo.node1.Active(anon)
6462560 ± 8% +14.7% 7415691 ± 8% numa-numastat.node0.numa_foreign
6462560 ± 8% +14.7% 7415691 ± 8% numa-numastat.node1.numa_miss
6469513 ± 8% +14.7% 7418019 ± 8% numa-numastat.node1.other_node
9909140 ± 3% +16.6% 11557657 ± 8% proc-vmstat.numa_foreign
9909140 ± 3% +16.6% 11557657 ± 8% proc-vmstat.numa_miss
9918420 ± 3% +16.6% 11566942 ± 8% proc-vmstat.numa_other
5620 ± 8% -15.2% 4765 ± 11% numa-vmstat.node0.nr_anon_pages
326.75 ± 10% -14.2% 280.25 ± 11% numa-vmstat.node0.nr_isolated_file
3316583 ± 11% +21.3% 4022573 ± 6% numa-vmstat.node0.numa_foreign
14519 ±125% +146.5% 35788 ± 50% numa-vmstat.node1.nr_active_anon
3317096 ± 11% +21.3% 4023320 ± 6% numa-vmstat.node1.numa_miss
3353496 ± 11% +20.9% 4053916 ± 6% numa-vmstat.node1.numa_other
801.94 ± 7% -9.6% 725.29 ± 2% sched_debug.cfs_rq:/.exec_clock.stddev
8.75 ± 4% +1111.4% 106.00 ±133% sched_debug.cfs_rq:/.load.28
10.00 ± 7% +287.5% 38.75 ± 88% sched_debug.cfs_rq:/.load_avg.20
58.25 ± 51% -83.3% 9.75 ± 4% sched_debug.cfs_rq:/.load_avg.3
21.75 ± 62% +109.2% 45.50 ± 51% sched_debug.cfs_rq:/.load_avg.35
17.50 ± 37% -50.0% 8.75 ± 4% sched_debug.cfs_rq:/.load_avg.41
8.75 ± 9% +797.1% 78.50 ± 97% sched_debug.cfs_rq:/.load_avg.56
5.25 ± 8% -23.8% 4.00 ± 0% sched_debug.cfs_rq:/.nr_spread_over.5
0.36 ± 8% +1233.3% 4.81 ± 1% sched_debug.cfs_rq:/.nr_spread_over.avg
6.67 ± 7% +1785.0% 125.67 ± 10% sched_debug.cfs_rq:/.nr_spread_over.max
1.09 ± 4% +1726.0% 19.85 ± 6% sched_debug.cfs_rq:/.nr_spread_over.stddev
8.50 ± 5% +288.2% 33.00 ±100% sched_debug.cfs_rq:/.runnable_load_avg.28
8.00 ± 0% +28.1% 10.25 ± 21% sched_debug.cfs_rq:/.runnable_load_avg.64
7.17 ± 5% -16.3% 6.00 ± 6% sched_debug.cfs_rq:/.runnable_load_avg.min
-6138 ±-160% -1981.1% 115460 ±162% sched_debug.cfs_rq:/.spread0.1
-9332 ±-76% -1329.1% 114701 ±165% sched_debug.cfs_rq:/.spread0.17
2045 ±1264% +6402.0% 132988 ±139% sched_debug.cfs_rq:/.spread0.44
-228895 ±-167% -147.3% 108207 ±168% sched_debug.cfs_rq:/.spread0.5
6476 ±127% +1852.9% 126479 ±150% sched_debug.cfs_rq:/.spread0.52
-138220 ±-172% -190.0% 124395 ±154% sched_debug.cfs_rq:/.spread0.70
-1043476 ± -9% -32.3% -706862 ±-34% sched_debug.cfs_rq:/.spread0.min
736.75 ± 10% +17.4% 865.00 ± 13% sched_debug.cfs_rq:/.util_avg.4
959274 ± 4% -7.8% 884275 ± 6% sched_debug.cpu.avg_idle.48
8.25 ± 5% +300.0% 33.00 ±100% sched_debug.cpu.cpu_load[0].28
8.00 ± 0% +28.1% 10.25 ± 21% sched_debug.cpu.cpu_load[0].64
7.17 ± 5% -16.3% 6.00 ± 6% sched_debug.cpu.cpu_load[0].min
8.25 ± 5% +281.8% 31.50 ± 96% sched_debug.cpu.cpu_load[1].28
8.00 ± 0% +28.1% 10.25 ± 21% sched_debug.cpu.cpu_load[1].64
7.33 ± 7% -18.2% 6.00 ± 5% sched_debug.cpu.cpu_load[1].min
8.25 ± 5% +239.4% 28.00 ± 87% sched_debug.cpu.cpu_load[2].28
8.00 ± 0% +28.1% 10.25 ± 21% sched_debug.cpu.cpu_load[2].64
7.33 ± 7% -11.4% 6.50 ± 7% sched_debug.cpu.cpu_load[2].min
8.25 ± 5% +178.8% 23.00 ± 69% sched_debug.cpu.cpu_load[3].28
8.00 ± 0% +37.5% 11.00 ± 19% sched_debug.cpu.cpu_load[3].64
7.33 ± 7% -11.4% 6.50 ± 7% sched_debug.cpu.cpu_load[3].min
8.25 ± 5% +127.3% 18.75 ± 46% sched_debug.cpu.cpu_load[4].28
8.00 ± 0% +62.5% 13.00 ± 34% sched_debug.cpu.cpu_load[4].64
7.33 ± 7% -11.4% 6.50 ± 7% sched_debug.cpu.cpu_load[4].min
1238 ± 1% +28.8% 1596 ± 25% sched_debug.cpu.curr->pid.25
1187 ± 1% +28.1% 1521 ± 30% sched_debug.cpu.curr->pid.38
1194 ± 20% +27.1% 1517 ± 17% sched_debug.cpu.curr->pid.4
1196 ± 3% +9.2% 1307 ± 3% sched_debug.cpu.curr->pid.46
8.75 ± 4% +1111.4% 106.00 ±133% sched_debug.cpu.load.28
0.00 ± 0% +Inf% 1.00 ± 0% sched_debug.cpu.nr_running.28
7011 ± 8% +61.2% 11303 ± 21% sched_debug.cpu.nr_switches.0
1997 ± 36% +356.7% 9122 ± 63% sched_debug.cpu.nr_switches.10
2119 ± 50% +236.6% 7135 ± 31% sched_debug.cpu.nr_switches.11
1955 ± 60% +470.9% 11163 ± 43% sched_debug.cpu.nr_switches.12
1654 ± 13% +727.1% 13686 ± 89% sched_debug.cpu.nr_switches.14
1632 ± 58% +668.5% 12547 ±108% sched_debug.cpu.nr_switches.19
1100 ± 17% +941.1% 11457 ±107% sched_debug.cpu.nr_switches.28
1704 ± 40% +128.6% 3896 ± 17% sched_debug.cpu.nr_switches.32
3189 ± 73% +194.5% 9391 ± 74% sched_debug.cpu.nr_switches.33
1668 ± 66% +351.9% 7539 ± 95% sched_debug.cpu.nr_switches.36
600.75 ± 24% +574.8% 4054 ± 22% sched_debug.cpu.nr_switches.38
1842 ± 71% +360.5% 8484 ± 92% sched_debug.cpu.nr_switches.43
1396 ± 73% +135.6% 3289 ± 3% sched_debug.cpu.nr_switches.44
712.50 ± 53% +1300.2% 9976 ±115% sched_debug.cpu.nr_switches.45
580.75 ± 12% +671.1% 4478 ± 41% sched_debug.cpu.nr_switches.46
597.50 ± 30% +785.6% 5291 ± 58% sched_debug.cpu.nr_switches.47
1229 ± 98% +294.0% 4843 ± 56% sched_debug.cpu.nr_switches.48
1056 ± 25% +421.6% 5510 ± 51% sched_debug.cpu.nr_switches.50
1606 ± 55% +167.9% 4304 ± 18% sched_debug.cpu.nr_switches.52
886.50 ± 64% +485.9% 5194 ± 47% sched_debug.cpu.nr_switches.54
767.25 ± 53% +1503.8% 12305 ± 27% sched_debug.cpu.nr_switches.56
1204 ± 56% +225.1% 3914 ± 30% sched_debug.cpu.nr_switches.59
554.75 ± 11% +2075.1% 12066 ±104% sched_debug.cpu.nr_switches.64
402.00 ± 5% +688.3% 3169 ± 15% sched_debug.cpu.nr_switches.66
984.00 ± 84% +320.6% 4139 ± 50% sched_debug.cpu.nr_switches.68
618.25 ± 5% +1947.4% 12657 ±118% sched_debug.cpu.nr_switches.71
1360 ± 6% +665.9% 10418 ± 63% sched_debug.cpu.nr_switches.9
4649 ± 4% +52.2% 7076 ± 4% sched_debug.cpu.nr_switches.avg
65202 ± 10% -37.3% 40871 ± 9% sched_debug.cpu.nr_switches.max
381.17 ± 2% +631.2% 2787 ± 1% sched_debug.cpu.nr_switches.min
10116 ± 12% -22.4% 7848 ± 8% sched_debug.cpu.nr_switches.stddev
-1.00 ±-141% +550.0% -6.50 ±-33% sched_debug.cpu.nr_uninterruptible.10
-1.75 ±-116% -185.7% 1.50 ±110% sched_debug.cpu.nr_uninterruptible.34
0.75 ±145% +233.3% 2.50 ± 87% sched_debug.cpu.nr_uninterruptible.45
3.25 ±117% -153.8% -1.75 ±-84% sched_debug.cpu.nr_uninterruptible.5
2860 ± 18% +232.2% 9501 ± 61% sched_debug.cpu.sched_count.10
3222 ± 53% +468.5% 18321 ±112% sched_debug.cpu.sched_count.11
3239 ± 53% +382.5% 15631 ±109% sched_debug.cpu.sched_count.18
2535 ± 36% +412.1% 12983 ±103% sched_debug.cpu.sched_count.19
1680 ± 12% +595.2% 11680 ±105% sched_debug.cpu.sched_count.28
1795 ± 69% +132.4% 4172 ± 22% sched_debug.cpu.sched_count.38
1130 ± 7% +305.4% 4583 ± 40% sched_debug.cpu.sched_count.46
1138 ± 12% +375.5% 5415 ± 57% sched_debug.cpu.sched_count.47
2161 ± 48% +160.0% 5619 ± 50% sched_debug.cpu.sched_count.50
2147 ± 41% +105.3% 4407 ± 18% sched_debug.cpu.sched_count.52
1422 ± 39% +274.7% 5331 ± 45% sched_debug.cpu.sched_count.54
1312 ± 30% +905.7% 13200 ± 33% sched_debug.cpu.sched_count.56
1757 ± 38% +131.5% 4067 ± 29% sched_debug.cpu.sched_count.59
1093 ± 6% +1106.3% 13188 ± 91% sched_debug.cpu.sched_count.64
935.00 ± 3% +255.0% 3319 ± 15% sched_debug.cpu.sched_count.66
1161 ± 1% +1002.8% 12809 ±117% sched_debug.cpu.sched_count.71
2123 ± 4% +403.3% 10689 ± 60% sched_debug.cpu.sched_count.9
916.58 ± 2% +219.9% 2931 ± 1% sched_debug.cpu.sched_count.min
366.25 ± 13% -21.0% 289.50 ± 8% sched_debug.cpu.sched_goidle.13
407.75 ± 13% -30.7% 282.50 ± 16% sched_debug.cpu.sched_goidle.17
224.75 ± 13% -42.6% 129.00 ± 18% sched_debug.cpu.sched_goidle.50
146.75 ± 6% -23.2% 112.75 ± 16% sched_debug.cpu.sched_goidle.59
173.50 ± 12% -20.2% 138.50 ± 16% sched_debug.cpu.sched_goidle.64
101.25 ± 5% +33.1% 134.75 ± 11% sched_debug.cpu.sched_goidle.66
160.75 ± 22% -33.6% 106.75 ± 7% sched_debug.cpu.sched_goidle.68
108.75 ± 7% +173.1% 297.00 ± 44% sched_debug.cpu.sched_goidle.69
346.25 ± 86% -65.1% 120.75 ± 21% sched_debug.cpu.sched_goidle.70
5894 ± 4% +30.8% 7712 ± 16% sched_debug.cpu.ttwu_count.0
922.50 ± 36% +436.3% 4947 ± 67% sched_debug.cpu.ttwu_count.10
851.00 ± 36% +419.9% 4424 ± 45% sched_debug.cpu.ttwu_count.11
899.75 ± 67% +449.4% 4943 ± 42% sched_debug.cpu.ttwu_count.12
887.25 ± 18% +587.8% 6102 ± 93% sched_debug.cpu.ttwu_count.14
898.50 ± 94% +583.8% 6144 ±110% sched_debug.cpu.ttwu_count.19
676.50 ± 72% +746.3% 5725 ±108% sched_debug.cpu.ttwu_count.28
1424 ± 46% +312.8% 5882 ± 87% sched_debug.cpu.ttwu_count.3
676.50 ± 46% +186.9% 1940 ± 22% sched_debug.cpu.ttwu_count.32
1420 ± 96% +301.4% 5701 ± 62% sched_debug.cpu.ttwu_count.33
398.25 ± 69% +452.6% 2200 ± 25% sched_debug.cpu.ttwu_count.38
555.25 ± 52% +803.5% 5016 ±118% sched_debug.cpu.ttwu_count.45
238.25 ± 21% +889.1% 2356 ± 54% sched_debug.cpu.ttwu_count.46
238.75 ± 18% +965.0% 2542 ± 55% sched_debug.cpu.ttwu_count.47
572.50 ± 23% +487.9% 3365 ± 56% sched_debug.cpu.ttwu_count.50
828.50 ± 76% +174.6% 2274 ± 29% sched_debug.cpu.ttwu_count.52
452.00 ± 94% +573.1% 3042 ± 59% sched_debug.cpu.ttwu_count.54
349.25 ± 60% +2416.0% 8787 ± 34% sched_debug.cpu.ttwu_count.56
745.00 ± 76% +193.1% 2183 ± 42% sched_debug.cpu.ttwu_count.59
246.00 ± 35% +2286.2% 5870 ±101% sched_debug.cpu.ttwu_count.64
327.00 ± 80% +395.3% 1619 ± 18% sched_debug.cpu.ttwu_count.66
460.00 ± 91% +406.3% 2329 ± 70% sched_debug.cpu.ttwu_count.68
456.50 ± 16% +1324.1% 6501 ±112% sched_debug.cpu.ttwu_count.71
1015 ± 71% +415.0% 5228 ± 59% sched_debug.cpu.ttwu_count.9
2476 ± 4% +49.3% 3696 ± 4% sched_debug.cpu.ttwu_count.avg
33209 ± 10% -38.1% 20568 ± 8% sched_debug.cpu.ttwu_count.max
156.67 ± 3% +764.3% 1354 ± 1% sched_debug.cpu.ttwu_count.min
5248 ± 12% -21.7% 4111 ± 7% sched_debug.cpu.ttwu_count.stddev
2942 ± 6% +73.1% 5095 ± 19% sched_debug.cpu.ttwu_local.0
491.75 ± 78% +756.0% 4209 ± 66% sched_debug.cpu.ttwu_local.10
375.50 ± 56% +792.9% 3353 ± 44% sched_debug.cpu.ttwu_local.11
620.25 ± 90% +641.0% 4596 ± 45% sched_debug.cpu.ttwu_local.12
352.00 ± 28% +1570.7% 5880 ± 96% sched_debug.cpu.ttwu_local.14
661.75 ±126% +788.9% 5882 ±115% sched_debug.cpu.ttwu_local.19
237.25 ± 27% +2054.8% 5112 ±121% sched_debug.cpu.ttwu_local.28
765.75 ± 75% +588.1% 5268 ± 97% sched_debug.cpu.ttwu_local.3
424.00 ± 84% +278.3% 1604 ± 21% sched_debug.cpu.ttwu_local.32
1082 ±123% +326.3% 4613 ± 80% sched_debug.cpu.ttwu_local.33
117.50 ± 10% +1533.2% 1919 ± 26% sched_debug.cpu.ttwu_local.38
518.00 ±101% +185.5% 1479 ± 1% sched_debug.cpu.ttwu_local.44
142.50 ± 30% +3284.7% 4823 ±119% sched_debug.cpu.ttwu_local.45
142.75 ± 35% +1427.7% 2180 ± 51% sched_debug.cpu.ttwu_local.46
120.75 ± 18% +1807.2% 2303 ± 50% sched_debug.cpu.ttwu_local.47
353.25 ± 36% +645.6% 2634 ± 53% sched_debug.cpu.ttwu_local.50
687.00 ± 89% +194.9% 2026 ± 24% sched_debug.cpu.ttwu_local.52
351.25 ±111% +692.1% 2782 ± 55% sched_debug.cpu.ttwu_local.54
237.75 ± 76% +2972.0% 7303 ± 35% sched_debug.cpu.ttwu_local.56
562.00 ±102% +260.7% 2027 ± 41% sched_debug.cpu.ttwu_local.59
125.75 ± 33% +4401.2% 5660 ±105% sched_debug.cpu.ttwu_local.64
101.75 ± 5% +1334.6% 1459 ± 17% sched_debug.cpu.ttwu_local.66
328.75 ±111% +519.2% 2035 ± 62% sched_debug.cpu.ttwu_local.68
124.00 ± 7% +4767.5% 6035 ±119% sched_debug.cpu.ttwu_local.71
224.75 ± 15% +2012.6% 4748 ± 66% sched_debug.cpu.ttwu_local.9
2060 ± 5% +56.6% 3226 ± 5% sched_debug.cpu.ttwu_local.avg
31657 ± 10% -38.8% 19368 ± 10% sched_debug.cpu.ttwu_local.max
95.42 ± 2% +1233.2% 1272 ± 0% sched_debug.cpu.ttwu_local.min
5013 ± 12% -23.3% 3845 ± 8% sched_debug.cpu.ttwu_local.stddev

=========================================================================================
compiler/cpufreq_governor/kconfig/rootfs/runtime/tbox_group/test/testcase:
gcc-4.9/performance/x86_64-rhel/debian-x86_64-2015-02-07.cgz/300s/ivb43/lru-file-mmap-read-rand/vm-scalability

commit:
30bdbb78009e67767983085e302bec6d97afc679
3c1da7beeee02560cd0f0c66c5a59fce3c6746e3

30bdbb78009e6776 3c1da7beeee02560cd0f0c66c5
---------------- --------------------------
%stddev %change %stddev
\ | \
215592 ± 0% +227.4% 705778 ± 2% vm-scalability.time.involuntary_context_switches
120383 ± 57% +68.5% 202856 ± 34% numa-meminfo.node0.Shmem
30143 ± 58% +68.2% 50702 ± 34% numa-vmstat.node0.nr_shmem
573860 ± 0% +24.8% 716372 ± 1% softirqs.RCU
215592 ± 0% +227.4% 705778 ± 2% time.involuntary_context_switches
4095 ± 4% +79.3% 7344 ± 3% vmstat.system.cs
874.50 ± 7% +14.0% 996.75 ± 10% slabinfo.nfs_commit_data.active_objs
874.50 ± 7% +14.0% 996.75 ± 10% slabinfo.nfs_commit_data.num_objs
87184 ±110% -80.6% 16919 ± 34% latency_stats.avg.pipe_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
276126 ±143% -90.5% 26288 ± 60% latency_stats.max.pipe_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
313115 ±129% -84.1% 49634 ± 71% latency_stats.sum.pipe_write.__vfs_write.vfs_write.SyS_write.entry_SYSCALL_64_fastpath
2.99 ± 7% -27.3% 2.17 ± 23% perf-profile.cycles-pp._raw_spin_lock.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current.__page_cache_alloc
7.14 ± 14% +23.1% 8.79 ± 3% perf-profile.cycles-pp._raw_spin_lock_irq.shrink_inactive_list.shrink_zone_memcg.shrink_zone.do_try_to_free_pages
4.82 ± 5% -15.6% 4.07 ± 9% perf-profile.cycles-pp.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current.__page_cache_alloc.filemap_fault
2.98 ± 7% -24.0% 2.26 ± 16% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current
7.43 ± 13% +22.2% 9.08 ± 2% perf-profile.cycles-pp.native_queued_spin_lock_slowpath._raw_spin_lock_irq.shrink_inactive_list.shrink_zone_memcg.shrink_zone
4037084 ± 13% -36.2% 2575072 ± 39% sched_debug.cfs_rq:/.MIN_vruntime.max
19.75 ± 19% +236.7% 66.50 ±109% sched_debug.cfs_rq:/.load.10
34.75 ± 55% -51.8% 16.75 ± 4% sched_debug.cfs_rq:/.load_avg.26
24.00 ± 25% +236.5% 80.75 ± 74% sched_debug.cfs_rq:/.load_avg.3
116.75 ± 45% -70.7% 34.25 ± 31% sched_debug.cfs_rq:/.load_avg.32
23.25 ± 25% +392.5% 114.50 ± 76% sched_debug.cfs_rq:/.load_avg.34
62.25 ± 48% -61.8% 23.75 ± 17% sched_debug.cfs_rq:/.load_avg.6
4037085 ± 13% -36.2% 2575073 ± 39% sched_debug.cfs_rq:/.max_vruntime.max
317558 ± 4% +15.0% 365053 ± 8% sched_debug.cfs_rq:/.min_vruntime.stddev
0.75 ±173% +400.0% 3.75 ± 57% sched_debug.cfs_rq:/.nr_spread_over.10
5.00 ± 54% -70.0% 1.50 ±100% sched_debug.cfs_rq:/.nr_spread_over.19
1.00 ± 70% +300.0% 4.00 ± 30% sched_debug.cfs_rq:/.nr_spread_over.34
1.75 ± 24% +200.0% 5.25 ± 34% sched_debug.cfs_rq:/.nr_spread_over.38
1.75 ± 74% +185.7% 5.00 ± 37% sched_debug.cfs_rq:/.nr_spread_over.39
0.50 ±100% +800.0% 4.50 ± 72% sched_debug.cfs_rq:/.nr_spread_over.9
2.55 ± 7% +14.8% 2.92 ± 5% sched_debug.cfs_rq:/.nr_spread_over.avg
-344654 ±-94% -107.9% 27111 ±112% sched_debug.cfs_rq:/.spread0.1
68436 ± 45% -330.2% -157521 ±-170% sched_debug.cfs_rq:/.spread0.11
100673 ± 48% -618.1% -521616 ±-151% sched_debug.cfs_rq:/.spread0.14
-22429 ±-381% -513.5% 92753 ± 47% sched_debug.cfs_rq:/.spread0.29
38527 ± 39% -282.6% -70354 ±-120% sched_debug.cfs_rq:/.spread0.3
160362 ± 28% -372.4% -436883 ±-145% sched_debug.cfs_rq:/.spread0.38
-1254087 ± -5% +33.4% -1672637 ± -9% sched_debug.cfs_rq:/.spread0.min
317498 ± 4% +15.0% 365006 ± 8% sched_debug.cfs_rq:/.spread0.stddev
15.25 ± 2% +11.5% 17.00 ± 4% sched_debug.cpu.cpu_load[0].43
15.25 ± 2% +9.8% 16.75 ± 4% sched_debug.cpu.cpu_load[1].43
21.75 ± 24% -25.3% 16.25 ± 6% sched_debug.cpu.cpu_load[2].31
58.25 ±115% -72.1% 16.25 ± 2% sched_debug.cpu.cpu_load[2].32
22.75 ± 12% -28.6% 16.25 ± 6% sched_debug.cpu.cpu_load[3].31
62.75 ±102% -74.1% 16.25 ± 2% sched_debug.cpu.cpu_load[3].32
23.50 ± 16% -30.9% 16.25 ± 6% sched_debug.cpu.cpu_load[4].31
66.75 ± 92% -75.3% 16.50 ± 3% sched_debug.cpu.cpu_load[4].32
15.25 ± 2% +47.5% 22.50 ± 44% sched_debug.cpu.cpu_load[4].46
1320 ± 1% +22.8% 1622 ± 19% sched_debug.cpu.curr->pid.11
20.00 ± 18% +223.8% 64.75 ±108% sched_debug.cpu.load.10
0.29 ± 5% +20.1% 0.35 ± 6% sched_debug.cpu.nr_running.stddev
14798 ± 65% +154.1% 37601 ± 15% sched_debug.cpu.nr_switches.10
9937 ± 21% +88.2% 18701 ± 22% sched_debug.cpu.nr_switches.14
7656 ± 21% +150.3% 19166 ± 20% sched_debug.cpu.nr_switches.16
7017 ± 50% +207.7% 21591 ± 34% sched_debug.cpu.nr_switches.17
7556 ± 53% +150.3% 18913 ± 21% sched_debug.cpu.nr_switches.21
8638 ± 28% +174.9% 23748 ± 60% sched_debug.cpu.nr_switches.22
9322 ± 55% +240.1% 31706 ± 46% sched_debug.cpu.nr_switches.26
8696 ± 43% +274.7% 32590 ± 79% sched_debug.cpu.nr_switches.27
6753 ± 78% +153.2% 17099 ± 25% sched_debug.cpu.nr_switches.28
14641 ± 77% +145.6% 35953 ± 36% sched_debug.cpu.nr_switches.3
7807 ± 28% +269.8% 28873 ± 51% sched_debug.cpu.nr_switches.30
8506 ± 47% +91.1% 16259 ± 11% sched_debug.cpu.nr_switches.31
11117 ± 68% +103.3% 22602 ± 45% sched_debug.cpu.nr_switches.32
6943 ± 65% +167.3% 18556 ± 22% sched_debug.cpu.nr_switches.33
10171 ± 68% +155.8% 26020 ± 41% sched_debug.cpu.nr_switches.37
6877 ± 24% +318.2% 28758 ± 69% sched_debug.cpu.nr_switches.39
5277 ± 58% +184.4% 15009 ± 20% sched_debug.cpu.nr_switches.40
6602 ± 52% +167.9% 17686 ± 5% sched_debug.cpu.nr_switches.41
7276 ± 51% +111.3% 15374 ± 17% sched_debug.cpu.nr_switches.42
8423 ± 41% +148.7% 20950 ± 37% sched_debug.cpu.nr_switches.43
4539 ± 24% +238.0% 15343 ± 8% sched_debug.cpu.nr_switches.46
14412 ± 59% +291.1% 56362 ± 32% sched_debug.cpu.nr_switches.9
14161 ± 4% +70.5% 24144 ± 3% sched_debug.cpu.nr_switches.avg
2185 ± 8% +440.8% 11818 ± 2% sched_debug.cpu.nr_switches.min
0.50 ±223% -750.0% -3.25 ±-70% sched_debug.cpu.nr_uninterruptible.15
3.00 ± 40% -166.7% -2.00 ±-150% sched_debug.cpu.nr_uninterruptible.19
-7.50 ±-48% -66.7% -2.50 ±-34% sched_debug.cpu.nr_uninterruptible.3
3.00 ± 33% -100.0% 0.00 ± 1% sched_debug.cpu.nr_uninterruptible.35
39452 ± 46% -47.4% 20741 ± 28% sched_debug.cpu.sched_count.1
16801 ± 57% +127.9% 38287 ± 15% sched_debug.cpu.sched_count.10
12187 ± 22% +149.2% 30367 ± 56% sched_debug.cpu.sched_count.14
9397 ± 12% +111.1% 19833 ± 19% sched_debug.cpu.sched_count.16
8959 ± 38% +167.7% 23988 ± 31% sched_debug.cpu.sched_count.17
10426 ± 36% +114.0% 22314 ± 18% sched_debug.cpu.sched_count.21
11202 ± 45% +190.9% 32583 ± 45% sched_debug.cpu.sched_count.26
8481 ± 61% +106.8% 17542 ± 24% sched_debug.cpu.sched_count.28
17072 ± 71% +114.1% 36550 ± 35% sched_debug.cpu.sched_count.3
9549 ± 23% +207.1% 29323 ± 50% sched_debug.cpu.sched_count.30
13333 ± 54% +73.7% 23165 ± 43% sched_debug.cpu.sched_count.32
8786 ± 52% +116.3% 19005 ± 22% sched_debug.cpu.sched_count.33
12829 ± 50% +105.7% 26389 ± 40% sched_debug.cpu.sched_count.37
8399 ± 19% +255.9% 29897 ± 71% sched_debug.cpu.sched_count.39
7169 ± 36% +114.4% 15372 ± 20% sched_debug.cpu.sched_count.40
8177 ± 41% +120.9% 18060 ± 5% sched_debug.cpu.sched_count.41
8804 ± 40% +79.4% 15794 ± 17% sched_debug.cpu.sched_count.42
10142 ± 32% +110.4% 21339 ± 36% sched_debug.cpu.sched_count.43
6520 ± 9% +141.1% 15723 ± 8% sched_debug.cpu.sched_count.46
16597 ± 52% +263.4% 60313 ± 32% sched_debug.cpu.sched_count.9
3900 ± 4% +216.0% 12328 ± 2% sched_debug.cpu.sched_count.min
880.50 ± 25% -48.6% 452.50 ± 12% sched_debug.cpu.sched_goidle.18
193.75 ± 9% +32.3% 256.25 ± 15% sched_debug.cpu.sched_goidle.25
1987 ±144% -88.3% 232.00 ± 7% sched_debug.cpu.sched_goidle.29
213.50 ± 16% +358.9% 979.75 ± 79% sched_debug.cpu.sched_goidle.34
986.00 ± 51% -37.2% 619.00 ± 17% sched_debug.cpu.sched_goidle.6
16830 ± 52% -44.3% 9375 ± 28% sched_debug.cpu.ttwu_count.1
7039 ± 68% +166.3% 18748 ± 19% sched_debug.cpu.ttwu_count.10
3459 ± 47% +185.6% 9878 ± 20% sched_debug.cpu.ttwu_count.16
3433 ± 81% +184.1% 9753 ± 39% sched_debug.cpu.ttwu_count.17
3440 ± 51% +177.2% 9537 ± 13% sched_debug.cpu.ttwu_count.21
4884 ± 25% +253.3% 17253 ± 77% sched_debug.cpu.ttwu_count.27
2988 ± 89% +211.2% 9302 ± 30% sched_debug.cpu.ttwu_count.28
7481 ± 83% +142.4% 18133 ± 37% sched_debug.cpu.ttwu_count.3
4037 ± 37% +262.8% 14647 ± 52% sched_debug.cpu.ttwu_count.30
5684 ± 72% +92.2% 10924 ± 49% sched_debug.cpu.ttwu_count.32
4041 ± 73% +134.9% 9491 ± 25% sched_debug.cpu.ttwu_count.33
5213 ± 76% +161.0% 13609 ± 36% sched_debug.cpu.ttwu_count.37
4406 ± 7% +257.9% 15771 ± 67% sched_debug.cpu.ttwu_count.39
3562 ± 69% +124.8% 8009 ± 27% sched_debug.cpu.ttwu_count.40
4205 ± 50% +135.0% 9883 ± 8% sched_debug.cpu.ttwu_count.41
4752 ± 41% +64.2% 7804 ± 16% sched_debug.cpu.ttwu_count.42
4313 ± 53% +162.1% 11304 ± 42% sched_debug.cpu.ttwu_count.43
3358 ± 40% +143.8% 8189 ± 5% sched_debug.cpu.ttwu_count.46
5767 ± 87% +123.1% 12865 ± 34% sched_debug.cpu.ttwu_count.8
7369 ± 50% +274.2% 27574 ± 34% sched_debug.cpu.ttwu_count.9
7267 ± 4% +69.9% 12345 ± 3% sched_debug.cpu.ttwu_count.avg
724.92 ± 24% +670.8% 5587 ± 2% sched_debug.cpu.ttwu_count.min
5707 ± 77% +195.4% 16860 ± 18% sched_debug.cpu.ttwu_local.10
3420 ± 32% +114.0% 7321 ± 25% sched_debug.cpu.ttwu_local.14
2103 ± 45% +276.7% 7923 ± 18% sched_debug.cpu.ttwu_local.16
2145 ± 88% +306.4% 8719 ± 43% sched_debug.cpu.ttwu_local.17
4593 ± 47% +106.0% 9460 ± 24% sched_debug.cpu.ttwu_local.19
2291 ± 77% +228.7% 7529 ± 19% sched_debug.cpu.ttwu_local.21
3154 ± 41% +234.0% 10536 ± 65% sched_debug.cpu.ttwu_local.22
3999 ± 64% +271.5% 14859 ± 48% sched_debug.cpu.ttwu_local.26
3459 ± 47% +343.9% 15358 ± 84% sched_debug.cpu.ttwu_local.27
2257 ±110% +247.8% 7852 ± 29% sched_debug.cpu.ttwu_local.28
5672 ± 98% +182.6% 16029 ± 42% sched_debug.cpu.ttwu_local.3
3129 ± 30% +325.9% 13329 ± 52% sched_debug.cpu.ttwu_local.30
3304 ± 65% +123.3% 7379 ± 11% sched_debug.cpu.ttwu_local.31
4521 ± 87% +122.7% 10070 ± 50% sched_debug.cpu.ttwu_local.32
2686 ± 82% +213.3% 8414 ± 25% sched_debug.cpu.ttwu_local.33
4285 ± 83% +176.6% 11854 ± 42% sched_debug.cpu.ttwu_local.37
2851 ± 15% +372.4% 13470 ± 74% sched_debug.cpu.ttwu_local.39
1973 ± 72% +234.2% 6595 ± 18% sched_debug.cpu.ttwu_local.40
2780 ± 66% +185.7% 7942 ± 5% sched_debug.cpu.ttwu_local.41
2870 ± 57% +143.5% 6990 ± 16% sched_debug.cpu.ttwu_local.42
3013 ± 51% +227.4% 9865 ± 39% sched_debug.cpu.ttwu_local.43
1891 ± 41% +257.7% 6765 ± 7% sched_debug.cpu.ttwu_local.46
5932 ± 67% +345.9% 26456 ± 35% sched_debug.cpu.ttwu_local.9
5805 ± 5% +84.3% 10697 ± 4% sched_debug.cpu.ttwu_local.avg
401.12 ± 26% +1175.6% 5116 ± 4% sched_debug.cpu.ttwu_local.min


ivb43: Ivytown Ivy Bridge-EP
Memory: 64G

lkp-hsw-ep2: Brickland Haswell-EP
Memory: 128G


To reproduce:

git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/lkp install job.yaml # job file is attached in this email
bin/lkp run job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang
---
LKP_SERVER: inn
LKP_CGI_PORT: 80
LKP_CIFS_PORT: 139
testcase: vm-scalability
default-monitors:
wait: activate-monitor
kmsg:
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
interval: 10
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 60
cpufreq_governor: performance
default-watchdogs:
oom-killer:
watchdog:
commit: 3c1da7beeee02560cd0f0c66c5a59fce3c6746e3
model: Ivytown Ivy Bridge-EP
nr_cpu: 48
memory: 64G
nr_hdd_partitions: 0
hdd_partitions:
rootfs_partition:
swap_partitions:
category: benchmark
perf-profile:
runtime: 300s
size:
vm-scalability:
test: lru-file-mmap-read-rand
queue: bisect
testbox: ivb43
tbox_group: ivb43
kconfig: x86_64-rhel
enqueue_time: 2016-02-07 15:24:35.445869585 +08:00
compiler: gcc-4.9
rootfs: debian-x86_64-2015-02-07.cgz
id: e7e7e31b789b2c834a15c63baad4abc08e6e7e71
user: lkp
head_commit: 7295f5f143aa915ec468192384088fae9b13ab3e
base_commit: 36f90b0a2ddd60823fe193a85e60ff1906c2a9b3
branch: linux-devel/devel-hourly-2016020701
result_root: "/result/vm-scalability/performance-300s-lru-file-mmap-read-rand/ivb43/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/3c1da7beeee02560cd0f0c66c5a59fce3c6746e3/0"
job_file: "/lkp/scheduled/ivb43/bisect_vm-scalability-performance-300s-lru-file-mmap-read-rand-debian-x86_64-2015-02-07.cgz-x86_64-rhel-3c1da7beeee02560cd0f0c66c5a59fce3c6746e3-20160207-109523-1pvb270-0.yaml"
max_uptime: 1500
initrd: "/osimage/debian/debian-x86_64-2015-02-07.cgz"
bootloader_append:
- root=/dev/ram0
- user=lkp
- job=/lkp/scheduled/ivb43/bisect_vm-scalability-performance-300s-lru-file-mmap-read-rand-debian-x86_64-2015-02-07.cgz-x86_64-rhel-3c1da7beeee02560cd0f0c66c5a59fce3c6746e3-20160207-109523-1pvb270-0.yaml
- ARCH=x86_64
- kconfig=x86_64-rhel
- branch=linux-devel/devel-hourly-2016020701
- commit=3c1da7beeee02560cd0f0c66c5a59fce3c6746e3
- BOOT_IMAGE=/pkg/linux/x86_64-rhel/gcc-4.9/3c1da7beeee02560cd0f0c66c5a59fce3c6746e3/vmlinuz-4.5.0-rc2-00177-g3c1da7b
- max_uptime=1500
- RESULT_ROOT=/result/vm-scalability/performance-300s-lru-file-mmap-read-rand/ivb43/debian-x86_64-2015-02-07.cgz/x86_64-rhel/gcc-4.9/3c1da7beeee02560cd0f0c66c5a59fce3c6746e3/0
- LKP_SERVER=inn
- |2-


earlyprintk=ttyS0,115200 systemd.log_level=err
debug apic=debug sysrq_always_enabled rcupdate.rcu_cpu_stall_timeout=100
panic=-1 softlockup_panic=1 nmi_watchdog=panic oops=panic load_ramdisk=2 prompt_ramdisk=0
console=ttyS0,115200 console=tty0 vga=normal

rw
lkp_initrd: "/lkp/lkp/lkp-x86_64.cgz"
modules_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/3c1da7beeee02560cd0f0c66c5a59fce3c6746e3/modules.cgz"
bm_initrd: "/osimage/deps/debian-x86_64-2015-02-07.cgz/lkp.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/run-ipconfig.cgz,/osimage/deps/debian-x86_64-2015-02-07.cgz/turbostat.cgz,/lkp/benchmarks/turbostat.cgz,/lkp/benchmarks/vm-scalability.cgz"
linux_headers_initrd: "/pkg/linux/x86_64-rhel/gcc-4.9/3c1da7beeee02560cd0f0c66c5a59fce3c6746e3/linux-headers.cgz"
repeat_to: 2
kernel: "/pkg/linux/x86_64-rhel/gcc-4.9/3c1da7beeee02560cd0f0c66c5a59fce3c6746e3/vmlinuz-4.5.0-rc2-00177-g3c1da7b"
dequeue_time: 2016-02-07 15:36:28.483289901 +08:00
job_state: finished
loadavg: 22.38 27.30 13.31 2/524 8526
start_time: '1454830630'
end_time: '1454830951'
version: "/lkp/lkp/.src-20160206-221639"

Attachment: reproduce.sh
Description: Bourne shell script