Score:0

High cpu usage over TCP

mw flag

I'm running haproxy 2.4 and when the haproxy is configured in tcp mode i get a high cpu usage.

Example:

Running the stress tool to generated arround 30k connections:

./pst -c 100000 -r 100000 -u urlist.txt -p http://192.168.58.13:3128

The cpu usage in all haproxy process spikes to near 100% and the load is 4+. Running the same test on http mode.

The cpu usage is around 10% on each process and the load is 1.20.

I also try use to nbthread instead nbproc but get the same results.

Also, this problems continues on haproxy 2.5

Config file in HTTP mode:

#  1,5
# CPU(s) 11
global
    log 127.0.0.1 local0
    maxconn 200000
    nbproc           11
    cpu-map           1 0
    cpu-map           2 1
    cpu-map           3 2
    cpu-map           4 3
    cpu-map           5 4
    cpu-map           6 5
    cpu-map           7 6
    cpu-map           8 7
    cpu-map           9 8
    cpu-map           10 9
    cpu-map           11 10
    uid 0
    gid 0
    chroot  /tmp
    pidfile /var/run/haproxy.pid
    stats socket /var/run/haproxy.stat mode 600 level admin
    stats bind-process 1
    daemon
#   debug
#   quiet

defaults
    log global
    log-format  HASTATS:::%{+Q}o:::client_ip=%ci:::client_port=%cp:::datetime_of_request=[%tr]:::frontend_name_transport=%ft:::backend_name=%b:::server_name=%s:::time_to_receive_full_request=%TR:::Tw=%Tw:::Tc=%Tc:::response_time=%Tr:::active_time_of_request=%Ta:::status_code=%ST:::bytes_read=%B:::captured_request_cookie=%CC:::captured_response_cookie=%CS:::termination_state_with_cookie_status=%tsc:::actconn=%ac:::feconn=%fc:::beconn=%bc:::srv_conn=%sc:::retries=%rc:::srv_queue=%sq:::backend_queue=%bq:::captured_request_headers_default_style=%hr:::captured_response_headers_default_style=%hs:::server_ip=%si:::server_port=%sp:::frontend_name=%f:::http_method=%HM:::http_request_uri_without_query=%HP:::http_request_query_string=%HQ:::http_request_uri=%HU:::bytes_uploaded=%U:::ssl_ciphers=%sslc:::ssl_version=%sslv:::%[capture.res.hdr(0)]
    mode    http
    option  dontlognull
    retries 3
    option  redispatch
    maxconn 200000
    timeout connect 5000
    timeout client  50000
    timeout server  50000

frontend ddsds
    bind    :3128 name ddsds
    capture request header Host len 1024
    capture request header Content-Type len 1024
    capture request header User-Agent len 1024
    capture request header Referer len 1024
    capture request header X-Forwarded-For len 1024
    capture response header Content-Type len 1024
    capture cookie Cookie_2 len 100
    http-request set-header mode mode:tcp
    http-request capture hdr(mode)  len 10
    mode http
    option  httplog
#   http-keep-alive: * 1 *
    option http-keep-alive
    timeout http-keep-alive 15000
    option prefer-last-server
    option redispatch
    no option httpclose
    option http-tunnel
    timeout client  15000
    timeout http-request    50000
    timeout queue   50000

    default_backend default_ddsds

backend default_ddsds
    mode http
    balance leastconn
#   http-keep-alive: * 1 *
    option http-keep-alive
    timeout http-keep-alive 15000
    option prefer-last-server
    option redispatch
    no option httpclose
    option http-tunnel
    timeout connect 4000
    timeout server  50000
    timeout http-request    50000
    timeout queue   50000
    retries 3

    server normal_port 192.168.58.12:50877 weight 1 maxconn 10000 check inter 60000 rise 2 fall 3 source 192.168.58.13

# ddsds no result
frontend admin_page
    bind    127.0.0.1:64741
    mode http
    stats enable
    stats refresh 10s
    stats uri /stats

Config file in TCP mode:

#  1,5
# CPU(s) 11
global
    log 127.0.0.1 local0
    maxconn 200000
    nbproc           11
    cpu-map           1 0
    cpu-map           2 1
    cpu-map           3 2
    cpu-map           4 3
    cpu-map           5 4
    cpu-map           6 5
    cpu-map           7 6
    cpu-map           8 7
    cpu-map           9 8
    cpu-map           10 9
    cpu-map           11 10
    uid 0
    gid 0
    chroot  /tmp
    pidfile /var/run/haproxy.pid
    stats socket /var/run/haproxy.stat mode 600 level admin
    stats bind-process 1
    daemon
#   debug
#   quiet

defaults
    log global
    log-format  HASTATS:::%{+Q}o:::client_ip=%ci:::client_port=%cp:::datetime_of_request=[%tr]:::frontend_name_transport=%ft:::backend_name=%b:::server_name=%s:::time_to_receive_full_request=%TR:::Tw=%Tw:::Tc=%Tc:::response_time=%Tr:::active_time_of_request=%Ta:::status_code=%ST:::bytes_read=%B:::captured_request_cookie=%CC:::captured_response_cookie=%CS:::termination_state_with_cookie_status=%tsc:::actconn=%ac:::feconn=%fc:::beconn=%bc:::srv_conn=%sc:::retries=%rc:::srv_queue=%sq:::backend_queue=%bq:::captured_request_headers_default_style=%hr:::captured_response_headers_default_style=%hs:::server_ip=%si:::server_port=%sp:::frontend_name=%f:::http_method=%HM:::http_request_uri_without_query=%HP:::http_request_query_string=%HQ:::http_request_uri=%HU:::bytes_uploaded=%U:::ssl_ciphers=%sslc:::ssl_version=%sslv:::%[capture.res.hdr(0)]
    mode    http
    option  dontlognull
    retries 3
    option  redispatch
    maxconn 200000
    timeout connect 5000
    timeout client  50000
    timeout server  50000

frontend ddsds
    bind    :3128 name ddsds
    capture request header Host len 1024
    capture request header Content-Type len 1024
    capture request header User-Agent len 1024
    capture request header Referer len 1024
    capture request header X-Forwarded-For len 1024
    capture response header Content-Type len 1024
    capture cookie Cookie_2 len 100
    http-request set-header mode mode:tcp
    http-request capture hdr(mode)  len 10
    mode tcp
    option  httplog
    option  dontlognull
    option http-tunnel
    timeout client  15000
    timeout http-request    50000
    timeout queue   50000

    default_backend default_ddsds

backend default_ddsds
    mode tcp
    balance leastconn
    option http-tunnel
    timeout connect 4000
    timeout server  50000
    timeout http-request    50000
    timeout queue   50000
    retries 3

    server normal_port 192.168.58.12:50877 weight 1 maxconn 10000 check inter 60000 rise 2 fall 3 source 192.168.58.13

# ddsds no result
frontend admin_page
    bind    127.0.0.1:64741
    mode http
    stats enable
    stats refresh 10s
    stats uri /stats

Anyone can help me to fix this issue.

Thanks in advance.

Steffen Ullrich avatar
se flag
I don't know what you expected here. HTTPS is way more compute intensive than HTTP. This is especially true for the initial TLS handshake with the key exchange.
c4f4t0r avatar
nl flag
does your http test using keep-alive?
mw flag
Hi, the stress test is under HTTP not HTTPS. Also the problem only happens when the Haproxy is configured in TCP mode, with HTTP mode works ok. Can i use keep-alive in TCP mode?
jp flag
I would suspect all these `capture request header` In the `http` mode require some additional CPU ticks.
mw flag
Hi, The problem occours only in TCP, in HTTP is ok, no cpu issue
mw flag
I have removed the capture headers and the result is the same. In http with 20k connection, cpu usage 18% and load 2, in TCP with only 10k connection, cpu usage 90% and load 8.
mw flag
Hi Any update on this please.
mangohost

Post an answer

Most people don’t grasp that asking a lot of questions unlocks learning and improves interpersonal bonding. In Alison’s studies, for example, though people could accurately recall how many questions had been asked in their conversations, they didn’t intuit the link between questions and liking. Across four studies, in which participants were engaged in conversations themselves or read transcripts of others’ conversations, people tended not to realize that question asking would influence—or had influenced—the level of amity between the conversationalists.