Score:0

One of two balanced nodes on nginx redirect to nginx welcome page using haproxy balancer

cn flag

I have an issue. I have 2 production servers with working NGINX on 2 domains production-app1.example.com and production-app2.example.com but when I add them into haproxy balancer and uses just first or second, second one is working but first one returns nginx welcome page.

Nginx configuration is same on both, differs just in server_name. I cannot figure out where is problem.

This is my haproxy configuration:

global
        maxconn 500000
    log /dev/log    local0
    log /dev/log    local1 notice
    chroot /var/lib/haproxy
    stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
    stats timeout 30s
    user haproxy
    group haproxy
    daemon

    # Default SSL material locations
    ca-base /etc/ssl/certs
    crt-base /etc/ssl/private

    # Default ciphers to use on SSL-enabled listening sockets.
    # For more information, see ciphers(1SSL). This list is from:
    #  https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
    # An alternative list with additional directives can be obtained from
    #  https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy
    ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
    ssl-default-bind-options no-sslv3

defaults
    log global
    mode    http
    option  httplog
    option  dontlognull
        option  forwardfor
        option  http-server-close
        timeout connect 10s
        timeout client  30s
        timeout server  120s
    errorfile 400 /etc/haproxy/errors/400.http
    errorfile 403 /etc/haproxy/errors/403.http
    errorfile 408 /etc/haproxy/errors/408.http
    errorfile 500 /etc/haproxy/errors/500.http
    errorfile 502 /etc/haproxy/errors/502.http
    errorfile 503 /etc/haproxy/errors/503.http
    errorfile 504 /etc/haproxy/errors/504.http

frontend http_front
   bind *:80
   stats uri /haproxy?stats
   default_backend http_back

backend http_back
   balance source
   cookie SERVERUSED insert indirect nocache
   mode http
   http-request set-header X-Client-IP %[src]
   option httpchk HEAD / HTTP/1.0
   server production-app1.example.com production-app1.example.com:80 check fall 2 rise 2
   server production-app2.example.com production-app2.example.com:80 check fall 2 rise 2

This is nginx conf on 1 app (not working one)

host:

upstream app_production {
    # Path to Puma SOCK file, as defined previously
    server unix:/var/www/production/app/shared/tmp/sockets/puma.sock fail_timeout=0;
}

server {
    server_name production-app1.example.com;

    root /var/www/production/app/shared/public;

    try_files $uri/index.html $uri @app;

    location @app {
        proxy_pass http://app_production;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header Host $http_host;
        proxy_redirect off;
    }

    # logging
    #

    access_log /var/www/log/production/app/access.log;
    error_log /var/www/log/production/app/error.log;

    error_page 500 502 503 504 /500.html;
    client_max_body_size 4G;
    #keepalive_timeout 10;

    listen 80;
}

and global nginx configuration

# production server nginx config
user deploy;
worker_processes 2; # better performance with IO peaks
worker_rlimit_nofile 65535;

error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;


events
{
    worker_connections 4096;
}

http
{
    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    ## HTTP2 tuning
    http2_max_concurrent_pushes 512;
    http2_max_concurrent_streams 512;
    http2_chunk_size 16k;
    http2_body_preread_size 256k;
    connection_pool_size 5600;
    # keepalive needs this
    proxy_http_version 1.1;
    proxy_set_header Connection "";

    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 360;
    keepalive_requests 10000;
    types_hash_max_size 2048;
    server_names_hash_bucket_size 128;
    large_client_header_buffers 8 5600;

    # proxy timeouts
    proxy_connect_timeout 120s;
    proxy_send_timeout 300s;
    proxy_read_timeout 300s;
    send_timeout 440s;

    # buffering and caching
    proxy_cache_path /tmp/nginx-cache keys_zone=nginx-cache:10m loader_threshold=300 loader_files=200 max_size=200m;
    proxy_cache nginx-cache;
    proxy_buffers 40 16k;
    proxy_buffer_size 512k;
    proxy_busy_buffers_size 512k;
    proxy_cache_valid 200 302 10m;
    proxy_cache_valid 404 1m;

    # SSL settings (will be added)

    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
    '$status $body_bytes_sent "$http_referer" '
    '"$http_user_agent" "$http_x_forwarded_for"'
    'request_time=$request_time '
    'upstream_response_time=$upstream_response_time '
    'upstream_connect_time=$upstream_connect_time '
    'upstream_header_time=$upstream_header_time';

    access_log /var/log/nginx/access.log main buffer=2048k flush=120;

    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*;
}

Second one has same configuration, but it works like a charm.

Thanks for help.

mangohost

Post an answer

Most people don’t grasp that asking a lot of questions unlocks learning and improves interpersonal bonding. In Alison’s studies, for example, though people could accurately recall how many questions had been asked in their conversations, they didn’t intuit the link between questions and liking. Across four studies, in which participants were engaged in conversations themselves or read transcripts of others’ conversations, people tended not to realize that question asking would influence—or had influenced—the level of amity between the conversationalists.