Score:0

Load balancer algorithm of bitnami nginx ingress

vn flag

I do install the ingress controller via helm and the Nginx ingress controller from bitnami, I pretty sure the bitnami use the kubernetes/ingress-nginx from here https://github.com/kubernetes/ingress-nginx

i read an article that said that the default algorithm for ingress is round-robin. So i have checked the detail, but I Lil bit doubt because it shows the EWMA algorithm there.

root# kubectl exec ingress-controller-nginx-ingress-controller-ldqz4 -n ingress -- nginx -T |more

2022/03/16 08:17:53 [warn] 87#87: the "http2_max_field_size" directive is obsolete, use the "large_client_header_buffers" directive instead in /opt/bitnami/nginx/conf/nginx.conf:143

nginx: [warn] the "http2_max_field_size" directive is obsolete, use the "large_client_header_buffers" directive instead in /opt/bitnami/nginx/conf/nginx.conf:143

2022/03/16 08:17:53 [warn] 87#87: the "http2_max_header_size" directive is obsolete, use the "large_client_header_buffers" directive instead in /opt/bitnami/nginx/conf/nginx.conf:144

nginx: [warn] the "http2_max_header_size" directive is obsolete, use the "large_client_header_buffers" directive instead in /opt/bitnami/nginx/conf/nginx.conf:144

2022/03/16 08:17:53 [warn] 87#87: the "http2_max_requests" directive is obsolete, use the "keepalive_requests" directive instead in /opt/bitnami/nginx/conf/nginx.conf:145

nginx: [warn] the "http2_max_requests" directive is obsolete, use the "keepalive_requests" directive instead in /opt/bitnami/nginx/conf/nginx.conf:145

nginx: the configuration file /opt/bitnami/nginx/conf/nginx.conf syntax is ok

nginx: configuration file /opt/bitnami/nginx/conf/nginx.conf test is successful

# configuration file /opt/bitnami/nginx/conf/nginx.conf:



# Configuration checksum: 14885012042408604827



# setup custom paths that do not require root access

pid /tmp/nginx.pid;



daemon off;



worker_processes 1;



worker_rlimit_nofile 1047552;



worker_shutdown_timeout 240s ;



events {

    multi_accept        on;

    worker_connections  16384;

    use                 epoll;

}



http {

    lua_package_path "/etc/nginx/lua/?.lua;;";

    

    lua_shared_dict balancer_ewma 10M;

    lua_shared_dict balancer_ewma_last_touched_at 10M;

    lua_shared_dict balancer_ewma_locks 1M;

    lua_shared_dict certificate_data 20M;

    lua_shared_dict certificate_servers 5M;

    lua_shared_dict configuration_data 20M;

    lua_shared_dict global_throttle_cache 10M;

    lua_shared_dict ocsp_response_cache 5M;

    

    init_by_lua_block {

        collectgarbage("collect")

this is my nginx.conf and located in /etc/nginx/nginx.conf

root# cat /etc/nginx/nginx.conf 

user www-data;

worker_processes auto;

pid /run/nginx.pid;

include /etc/nginx/modules-enabled/*.conf;



events {

    worker_connections 768;

    # multi_accept on;

}



http {



    ##

    # Basic Settings

    ##



    sendfile on;

    tcp_nopush on;

    types_hash_max_size 2048;

    # server_tokens off;



    # server_names_hash_bucket_size 64;

    # server_name_in_redirect off;



    include /etc/nginx/mime.types;

    default_type application/octet-stream;



    ##

    # SSL Settings

    ##



    ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE

    ssl_prefer_server_ciphers on;



    ##

    # Logging Settings

    ##



    access_log /var/log/nginx/access.log;

    error_log /var/log/nginx/error.log;



    ##

    # Gzip Settings

    ##



    gzip on;



    # gzip_vary on;

    # gzip_proxied any;

    # gzip_comp_level 6;

    # gzip_buffers 16 8k;

    # gzip_http_version 1.1;

    # gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;



    ##

    # Virtual Host Configs

    ##



    include /etc/nginx/conf.d/*.conf;

    include /etc/nginx/sites-enabled/*;

}





#mail {

#   # See sample authentication script at:

#   # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript

#

#   # auth_http localhost/auth.php;

#   # pop3_capabilities "TOP" "USER";

#   # imap_capabilities "IMAP4rev1" "UIDPLUS";

#

#   server {

#       listen     localhost:110;

#       protocol   pop3;

#       proxy      on;

#   }

#

#   server {

#       listen     localhost:143;

#       protocol   imap;

#       proxy      on;

#   }

#

I want to edit or configure the load balancer algorithm, but I don't know if mine configured correctly or not. but i tries to run kubectl apply -f filename , it showed configured but nothing happen. nothing change.

apiVersion: networking.k8s.io/v1

kind: Ingress

metadata:

  name: nginx-ingress

  namespace: ingress

  annotations:

    nginx.ingress.kubernetes.io/load-balance: "least_conn"

spec:

  ingressClassName: nginx

  rules:

  - host: mywebsite.com

    http:

      paths:

      - path: /

        pathType: Prefix

        backend:

          service:

            name: service2

            port:

              number: 80

      - path: /video

        pathType: Prefix

        backend:

          service:

            name: service2

            port:

              number: 80

      - path: /service3

        pathType: Prefix

        backend:

          service:

            name: service3

            port:

              number: 80                   

mangohost

Post an answer

Most people don’t grasp that asking a lot of questions unlocks learning and improves interpersonal bonding. In Alison’s studies, for example, though people could accurately recall how many questions had been asked in their conversations, they didn’t intuit the link between questions and liking. Across four studies, in which participants were engaged in conversations themselves or read transcripts of others’ conversations, people tended not to realize that question asking would influence—or had influenced—the level of amity between the conversationalists.