[LKP] [mm] 8a0516ed8b9: -1.7% netperf.Throughput_Mbps, +2189.6% netperf.time.minor_page_faults, +3987.5% proc-vmstat.numa_pte_updates

From: Huang Ying
Date: Thu Feb 26 2015 - 00:20:31 EST


FYI, we noticed the below changes on

git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
commit 8a0516ed8b90c95ffa1363b420caa37418149f21 ("mm: convert p[te|md]_numa users to p[te|md]_protnone_numa")


testbox/testcase/testparams: lkp-sbx04/netperf/performance-900s-200%-TCP_MAERTS

e7bb4b6d1609cce3 8a0516ed8b90c95ffa1363b420
---------------- --------------------------
%stddev %change %stddev
\ | \
226261 Â 1% +2189.6% 5180560 Â 0% netperf.time.minor_page_faults
721 Â 0% -1.7% 709 Â 0% netperf.Throughput_Mbps
12341 Â 16% -100.0% 0 Â 0% proc-vmstat.numa_pages_migrated
364595 Â 3% -100.0% 0 Â 0% proc-vmstat.numa_hint_faults_local
388922 Â 4% -100.0% 0 Â 0% proc-vmstat.numa_hint_faults
226261 Â 1% +2189.6% 5180560 Â 0% time.minor_page_faults
388831 Â 3% +3987.5% 15893407 Â 0% proc-vmstat.numa_pte_updates
12341 Â 16% -100.0% 0 Â 0% proc-vmstat.pgmigrate_success
47 Â 42% -60.3% 18 Â 13% sched_debug.cfs_rq[5]:/.blocked_load_avg
73 Â 19% -53.9% 34 Â 18% sched_debug.cfs_rq[46]:/.load
32 Â 20% +75.0% 56 Â 32% sched_debug.cpu#32.load
27 Â 37% +61.1% 43 Â 27% sched_debug.cfs_rq[15]:/.blocked_load_avg
54 Â 20% -43.8% 30 Â 5% sched_debug.cfs_rq[17]:/.load
57 Â 30% -39.8% 34 Â 17% sched_debug.cfs_rq[53]:/.load
70 Â 29% -41.3% 41 Â 8% sched_debug.cfs_rq[5]:/.tg_load_contrib
64 Â 20% -27.9% 46 Â 14% sched_debug.cpu#26.load
34 Â 21% +68.6% 57 Â 1% sched_debug.cfs_rq[15]:/.load
60 Â 21% -28.2% 43 Â 26% sched_debug.cfs_rq[6]:/.load
50 Â 18% +33.2% 67 Â 18% sched_debug.cfs_rq[15]:/.tg_load_contrib
62 Â 28% -40.6% 37 Â 32% sched_debug.cfs_rq[30]:/.load
59 Â 18% -33.5% 39 Â 14% sched_debug.cfs_rq[62]:/.load
556 Â 25% -54.2% 255 Â 36% sched_debug.cpu#59.sched_goidle
1.63 Â 2% -31.2% 1.12 Â 0% perf-profile.cpu-cycles._raw_spin_lock.free_one_page.__free_pages_ok.free_compound_page.put_compound_page
50 Â 40% -35.5% 32 Â 16% sched_debug.cpu#43.load
31 Â 18% +39.7% 44 Â 22% sched_debug.cpu#53.load
2.18 Â 3% -29.0% 1.55 Â 3% perf-profile.cpu-cycles.free_one_page.__free_pages_ok.free_compound_page.put_compound_page.put_page
46 Â 13% -37.6% 29 Â 31% sched_debug.cfs_rq[16]:/.blocked_load_avg
51 Â 26% -36.6% 32 Â 6% sched_debug.cpu#7.load
73 Â 13% -20.8% 58 Â 9% sched_debug.cfs_rq[51]:/.tg_load_contrib
1.77 Â 2% -25.1% 1.33 Â 1% perf-profile.cpu-cycles._raw_spin_lock_irqsave.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current.skb_page_frag_refill
58 Â 23% -38.4% 35 Â 24% sched_debug.cfs_rq[2]:/.load
8833788 Â 8% +22.5% 10821104 Â 12% sched_debug.cfs_rq[12]:/.max_vruntime
8833787 Â 8% +22.5% 10821104 Â 12% sched_debug.cfs_rq[12]:/.MIN_vruntime
1951 Â 12% +20.1% 2343 Â 12% sched_debug.cpu#9.curr->pid
112948 Â 2% +25.6% 141909 Â 11% sched_debug.cpu#32.sched_count
1955 Â 9% +17.3% 2293 Â 9% sched_debug.cpu#46.curr->pid
9533920 Â 16% +31.8% 12561711 Â 13% sched_debug.cfs_rq[53]:/.max_vruntime
9533919 Â 16% +31.8% 12561711 Â 13% sched_debug.cfs_rq[53]:/.MIN_vruntime
0.97 Â 10% -15.7% 0.82 Â 6% perf-profile.cpu-cycles.tcp_send_mss.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg.SYSC_sendto
59313 Â 24% -21.3% 46703 Â 2% sched_debug.cpu#25.ttwu_count
3.92 Â 2% -17.1% 3.25 Â 0% perf-profile.cpu-cycles.put_compound_page.put_page.skb_release_data.skb_release_all.__kfree_skb
3.72 Â 2% -16.4% 3.11 Â 0% perf-profile.cpu-cycles.free_compound_page.put_compound_page.put_page.skb_release_data.skb_release_all
3.65 Â 1% -16.8% 3.04 Â 0% perf-profile.cpu-cycles.__free_pages_ok.free_compound_page.put_compound_page.put_page.skb_release_data
1853 Â 9% +15.7% 2144 Â 5% sched_debug.cpu#45.curr->pid
1769 Â 4% +19.9% 2121 Â 6% sched_debug.cpu#61.curr->pid
5.97 Â 2% -16.1% 5.01 Â 0% perf-profile.cpu-cycles.tcp_rcv_established.tcp_v4_do_rcv.tcp_v4_rcv.ip_local_deliver_finish.ip_local_deliver
1.59 Â 2% -14.2% 1.37 Â 2% perf-profile.cpu-cycles.sk_stream_alloc_skb.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg.SYSC_sendto
2.65 Â 3% -17.1% 2.20 Â 1% perf-profile.cpu-cycles.tcp_transmit_skb.tcp_write_xmit.__tcp_push_pending_frames.tcp_rcv_established.tcp_v4_do_rcv
6.19 Â 1% -15.3% 5.25 Â 0% perf-profile.cpu-cycles.tcp_v4_do_rcv.tcp_v4_rcv.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish
4.45 Â 1% -15.6% 3.75 Â 0% perf-profile.cpu-cycles.put_page.skb_release_data.skb_release_all.__kfree_skb.tcp_recvmsg
693150 Â 3% +14.6% 794663 Â 6% sched_debug.cpu#1.avg_idle
8.14 Â 1% -14.7% 6.94 Â 0% perf-profile.cpu-cycles.ip_rcv.__netif_receive_skb_core.__netif_receive_skb.process_backlog.net_rx_action
7.37 Â 2% -14.8% 6.28 Â 1% perf-profile.cpu-cycles.tcp_v4_rcv.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish.ip_rcv
6.70 Â 1% -14.3% 5.74 Â 1% perf-profile.cpu-cycles.__kfree_skb.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom
1988 Â 7% -8.0% 1829 Â 7% sched_debug.cpu#33.curr->pid
5.21 Â 1% -14.0% 4.49 Â 0% perf-profile.cpu-cycles.skb_release_data.skb_release_all.__kfree_skb.tcp_recvmsg.inet_recvmsg
7.93 Â 2% -14.5% 6.78 Â 0% perf-profile.cpu-cycles.ip_rcv_finish.ip_rcv.__netif_receive_skb_core.__netif_receive_skb.process_backlog
7.73 Â 2% -14.3% 6.62 Â 0% perf-profile.cpu-cycles.ip_local_deliver.ip_rcv_finish.ip_rcv.__netif_receive_skb_core.__netif_receive_skb
7.70 Â 2% -14.6% 6.58 Â 0% perf-profile.cpu-cycles.ip_local_deliver_finish.ip_local_deliver.ip_rcv_finish.ip_rcv.__netif_receive_skb_core
8.90 Â 1% -14.1% 7.65 Â 0% perf-profile.cpu-cycles.__do_softirq.do_softirq_own_stack.do_softirq.__local_bh_enable_ip.ip_finish_output
39826 Â 8% -11.0% 35446 Â 1% sched_debug.cpu#22.ttwu_local
6.29 Â 1% -14.3% 5.38 Â 0% perf-profile.cpu-cycles.skb_release_all.__kfree_skb.tcp_recvmsg.inet_recvmsg.sock_recvmsg
8.68 Â 2% -14.1% 7.45 Â 0% perf-profile.cpu-cycles.net_rx_action.__do_softirq.do_softirq_own_stack.do_softirq.__local_bh_enable_ip
8.43 Â 2% -13.5% 7.29 Â 0% perf-profile.cpu-cycles.process_backlog.net_rx_action.__do_softirq.do_softirq_own_stack.do_softirq
8.30 Â 2% -13.5% 7.18 Â 0% perf-profile.cpu-cycles.__netif_receive_skb_core.__netif_receive_skb.process_backlog.net_rx_action.__do_softirq
8.32 Â 2% -13.4% 7.21 Â 0% perf-profile.cpu-cycles.__netif_receive_skb.process_backlog.net_rx_action.__do_softirq.do_softirq_own_stack
8.99 Â 2% -13.3% 7.79 Â 1% perf-profile.cpu-cycles.do_softirq.part.13.__local_bh_enable_ip.ip_finish_output.ip_output.ip_local_out_sk
9.02 Â 1% -13.3% 7.82 Â 1% perf-profile.cpu-cycles.__local_bh_enable_ip.ip_finish_output.ip_output.ip_local_out_sk.ip_queue_xmit
1.02 Â 6% -15.9% 0.86 Â 3% perf-profile.cpu-cycles.ip_queue_xmit.tcp_transmit_skb.tcp_write_xmit.__tcp_push_pending_frames.tcp_rcv_established
10430785 Â 7% -16.8% 8673532 Â 9% sched_debug.cfs_rq[49]:/.max_vruntime
10430785 Â 7% -16.8% 8673530 Â 9% sched_debug.cfs_rq[49]:/.MIN_vruntime
11509303 Â 9% -23.1% 8846526 Â 14% sched_debug.cfs_rq[13]:/.MIN_vruntime
11509303 Â 9% -23.1% 8846526 Â 14% sched_debug.cfs_rq[13]:/.max_vruntime
8.95 Â 1% -13.2% 7.77 Â 0% perf-profile.cpu-cycles.do_softirq_own_stack.do_softirq.__local_bh_enable_ip.ip_finish_output.ip_output
40589 Â 6% -10.4% 36365 Â 3% sched_debug.cpu#54.ttwu_local
4.34 Â 1% -12.8% 3.78 Â 1% perf-profile.cpu-cycles.alloc_pages_current.skb_page_frag_refill.sk_page_frag_refill.tcp_sendmsg.inet_sendmsg
3.74 Â 1% -14.7% 3.19 Â 1% perf-profile.cpu-cycles.get_page_from_freelist.__alloc_pages_nodemask.alloc_pages_current.skb_page_frag_refill.sk_page_frag_refill
1.02 Â 6% -14.0% 0.88 Â 4% perf-profile.cpu-cycles.skb_release_head_state.skb_release_all.__kfree_skb.tcp_recvmsg.inet_recvmsg
15.16 Â 0% +15.4% 17.50 Â 0% perf-profile.cpu-cycles.copy_user_generic_string.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg.SYSC_sendto
10.04 Â 1% -12.1% 8.82 Â 0% perf-profile.cpu-cycles.ip_output.ip_local_out_sk.ip_queue_xmit.tcp_transmit_skb.tcp_send_ack
10.11 Â 1% -12.1% 8.88 Â 0% perf-profile.cpu-cycles.ip_local_out_sk.ip_queue_xmit.tcp_transmit_skb.tcp_send_ack.tcp_cleanup_rbuf
38453 Â 6% -9.4% 34832 Â 2% sched_debug.cpu#12.ttwu_local
10.22 Â 1% -12.5% 8.94 Â 0% perf-profile.cpu-cycles.ip_finish_output.ip_output.ip_local_out_sk.ip_queue_xmit.tcp_transmit_skb
4.16 Â 1% -13.3% 3.60 Â 1% perf-profile.cpu-cycles.__alloc_pages_nodemask.alloc_pages_current.skb_page_frag_refill.sk_page_frag_refill.tcp_sendmsg
3.71 Â 2% -11.8% 3.27 Â 0% perf-profile.cpu-cycles.__tcp_push_pending_frames.tcp_rcv_established.tcp_v4_do_rcv.tcp_v4_rcv.ip_local_deliver_finish
24 Â 9% -14.4% 20 Â 3% sched_debug.cfs_rq[23]:/.runnable_load_avg
23 Â 4% -10.6% 21 Â 3% sched_debug.cpu#23.cpu_load[0]
5196 Â 6% +19.6% 6213 Â 18% numa-vmstat.node1.nr_anon_pages
20787 Â 6% +19.6% 24855 Â 18% numa-meminfo.node1.AnonPages
3.57 Â 2% -12.9% 3.11 Â 0% perf-profile.cpu-cycles.tcp_write_xmit.__tcp_push_pending_frames.tcp_rcv_established.tcp_v4_do_rcv.tcp_v4_rcv
10.44 Â 1% -12.2% 9.17 Â 0% perf-profile.cpu-cycles.ip_queue_xmit.tcp_transmit_skb.tcp_send_ack.tcp_cleanup_rbuf.tcp_recvmsg
52615 Â 7% -10.5% 47095 Â 3% sched_debug.cpu#27.ttwu_count
747189 Â 2% +9.4% 817106 Â 5% sched_debug.cpu#5.avg_idle
4.58 Â 2% -13.0% 3.98 Â 1% perf-profile.cpu-cycles.skb_page_frag_refill.sk_page_frag_refill.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg
4.95 Â 1% -12.0% 4.36 Â 1% perf-profile.cpu-cycles.sk_page_frag_refill.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg.SYSC_sendto
12.85 Â 1% -11.8% 11.33 Â 0% perf-profile.cpu-cycles.tcp_cleanup_rbuf.tcp_recvmsg.inet_recvmsg.sock_recvmsg.SYSC_recvfrom
9.45 Â 1% -11.5% 8.36 Â 0% perf-profile.cpu-cycles.tcp_transmit_skb.tcp_send_ack.tcp_cleanup_rbuf.tcp_recvmsg.inet_recvmsg
12.65 Â 1% -11.8% 11.16 Â 0% perf-profile.cpu-cycles.tcp_send_ack.tcp_cleanup_rbuf.tcp_recvmsg.inet_recvmsg.sock_recvmsg
1967 Â 5% -11.3% 1744 Â 4% sched_debug.cpu#38.curr->pid
1.50 Â 3% -11.2% 1.33 Â 2% perf-profile.cpu-cycles.__alloc_skb.sk_stream_alloc_skb.tcp_sendmsg.inet_sendmsg.do_sock_sendmsg
1796 Â 7% +16.9% 2101 Â 3% sched_debug.cpu#20.curr->pid
52803 Â 8% -9.1% 48004 Â 2% sched_debug.cpu#22.ttwu_count
121926 Â 3% -7.3% 113073 Â 3% sched_debug.cpu#27.nr_switches
122808 Â 3% -7.8% 113235 Â 3% sched_debug.cpu#27.sched_count
121572 Â 5% -7.1% 112948 Â 2% sched_debug.cpu#46.nr_switches
39590 Â 3% -6.3% 37095 Â 4% sched_debug.cpu#43.ttwu_local
18366 Â 0% -5.0% 17440 Â 0% vmstat.system.cs
70241 Â 0% -2.8% 68273 Â 0% vmstat.system.in

lkp-sbx04: Sandy Bridge-EX
Memory: 64G




time.minor_page_faults

5.5e+06 ++----------------------------------------------------------------+
5e+06 ++ O O OO O O
| |
4.5e+06 ++ |
4e+06 ++ |
3.5e+06 ++ |
3e+06 ++ |
| |
2.5e+06 ++ |
2e+06 ++ |
1.5e+06 ++ |
1e+06 ++ |
| |
500000 O+O.O.OO.O.O.O.O.OO.O.O.O.O.OO.O.O.O.O.OO.O.O.O.O OO O O |
0 ++----------------------------------------------------------------+


netperf.time.minor_page_faults

5.5e+06 ++----------------------------------------------------------------+
5e+06 ++ O O OO O O
| |
4.5e+06 ++ |
4e+06 ++ |
3.5e+06 ++ |
3e+06 ++ |
| |
2.5e+06 ++ |
2e+06 ++ |
1.5e+06 ++ |
1e+06 ++ |
| |
500000 O+O.O.OO.O.O.O.O.OO.O.O.O.O.OO.O.O.O.O.OO.O.O.O.O OO O O |
0 ++----------------------------------------------------------------+


[*] bisect-good sample
[O] bisect-bad sample

To reproduce:

apt-get install ruby
git clone git://git.kernel.org/pub/scm/linux/kernel/git/wfg/lkp-tests.git
cd lkp-tests
bin/setup-local job.yaml # the job file attached in this email
bin/run-local job.yaml


Disclaimer:
Results have been estimated based on internal Intel analysis and are provided
for informational purposes only. Any difference in system hardware or software
design or configuration may affect actual performance.


Thanks,
Ying Huang

---
testcase: netperf
default-monitors:
wait: pre-test
uptime:
iostat:
vmstat:
numa-numastat:
numa-vmstat:
numa-meminfo:
proc-vmstat:
proc-stat:
meminfo:
slabinfo:
interrupts:
lock_stat:
latency_stats:
softirqs:
bdi_dev_mapping:
diskstats:
nfsstat:
cpuidle:
cpufreq-stats:
turbostat:
pmeter:
sched_debug:
interval: 10
default_watchdogs:
watch-oom:
watchdog:
cpufreq_governor: performance
commit: 5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833
model: Sandy Bridge-EX
nr_cpu: 64
memory: 64G
nr_ssd_partitions: 4
ssd_partitions: "/dev/disk/by-id/ata-INTEL_SSDSC2CW240A3_CVCV20430*-part1"
swap_partitions:
runtime: 900s
nr_threads: 200%
perf-profile:
freq: 800
netperf:
test: TCP_MAERTS
testbox: lkp-sbx04
tbox_group: lkp-sbx04
kconfig: x86_64-rhel
enqueue_time: 2015-02-16 11:17:01.042650836 +08:00
head_commit: 5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833
base_commit: bfa76d49576599a4b9f9b7a71f23d73d6dcff735
branch: linux-devel/devel-hourly-2015021623
kernel: "/kernel/x86_64-rhel/5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833/vmlinuz-3.19.0-wl-ath-02305-g5aeb2a3"
user: lkp
queue: cyclic
rootfs: debian-x86_64-2015-02-07.cgz
result_root: "/result/lkp-sbx04/netperf/performance-900s-200%-TCP_MAERTS/debian-x86_64-2015-02-07.cgz/x86_64-rhel/5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833/0"
job_file: "/lkp/scheduled/lkp-sbx04/cyclic_netperf-performance-900s-200%-TCP_MAERTS-x86_64-rhel-HEAD-5aeb2a3dc4f0ea47fe0df3cb3af75ef813dda833-0-20150216-93206-1sf912c.yaml"
dequeue_time: 2015-02-17 06:06:04.095206083 +08:00
job_state: finished
loadavg: 108.28 143.27 98.93 1/578 21192
start_time: '1424124423'
end_time: '1424125325'
version: "/lkp/lkp/.src-20150216-162040"
netserver
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
netperf -t TCP_MAERTS -c -C -l 900
_______________________________________________
LKP mailing list
LKP@xxxxxxxxxxxxxxx