Score:0

nginx forwading request to different url with load balancing

au flag

I am new to nginx, I am using it for request forwarding purpose, each incoming request is getting forwarded to another server/url below is my configuration :

    http {
        include       mime.types;
        default_type  application/octet-stream;
        sendfile on;  
        keepalive_timeout  65;
        
        server {
            listen       8008 ssl;
            listen       localhost:8008 ssl;
            listen       xyz.exmaple.com:8008 ssl;
            server_name  xyz.exmaple.com;
            more_set_headers 'Server: ABC';
            
            ssl_protocols TLSv1.2;
            ssl_prefer_server_ciphers on;
            ssl_ciphers "EECDH+AESGCM,EDH+AESGCM";      
            ssl_certificate /logs/ssl/SSL.crt;      
            ssl_certificate_key /logs/ssl/key.key;

location /app/ {
        
            
            proxy_pass      https://proxy1.example.com:8888/abc/efg;            
            proxy_read_timeout 60s;

            # May not need or want to set Host. Should default to the above hostname.
            proxy_pass_header Server;
            proxy_set_header          Host            $host;
            proxy_set_header          X-Real-IP       $remote_addr;
            proxy_set_header          X-Forwarded-For $proxy_add_x_forwarded_for;
            add_header  X-Frame-Options "SAMEORIGIN" always;
            more_set_headers 'Server: ABC';
            
        }
        
        }

}  

Every request on https://xyz.exmaple.com:8008/app/ is getting forwarded to https://proxy1.example.com:8888/abc/efg

Now I have two urls :

1) https://proxy1.example.com:8888/abc/efg
2) https://proxy2.example.com:8888/abc/efg

I want round robin load balancing for this two URL, How can i achieve that ?

Score:1
pr flag

Use upstream to solve this issue. Example:

http
{
    include mime.types;
    default_type application/octet-stream;
    sendfile on;
    keepalive_timeout 65;

    upstream backend
    {
        server proxy1.example.com:8888;
        server proxy2.example.com:8888;
    }

    server
    {
        listen 8008 ssl;
        listen localhost:8008 ssl;
        listen xyz.exmaple.com:8008 ssl;
        server_name xyz.exmaple.com;
        more_set_headers 'Server: ABC';

        ssl_protocols TLSv1.2;
        ssl_prefer_server_ciphers on;
        ssl_ciphers "EECDH+AESGCM,EDH+AESGCM";
        ssl_certificate /logs/ssl/SSL.crt;
        ssl_certificate_key /logs/ssl/key.key;

        location /app/
        {
            proxy_pass https://backend/abc/efg;
            proxy_read_timeout 60s;

            # May not need or want to set Host. Should default to the above hostname.
            proxy_pass_header Server;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            add_header X-Frame-Options "SAMEORIGIN" always;
            more_set_headers 'Server: ABC';
        }

    }

}
user3782114 avatar
au flag
I tried with this, now problem i face is when i am directly forwarding request to proxy1 or proxy2 domain, i got response within second but with above configuration with upstream, i am getting response at 2 minutes for single request
I sit in a Tesla and translated this thread with Ai:

mangohost

Post an answer

Most people don’t grasp that asking a lot of questions unlocks learning and improves interpersonal bonding. In Alison’s studies, for example, though people could accurately recall how many questions had been asked in their conversations, they didn’t intuit the link between questions and liking. Across four studies, in which participants were engaged in conversations themselves or read transcripts of others’ conversations, people tended not to realize that question asking would influence—or had influenced—the level of amity between the conversationalists.