Menu

Show posts

This section allows you to view all posts made by this member. Note that you can only see posts made in areas you currently have access to.

Show posts Menu

Messages - smivan

#1
Hi folks,

Did the recent OPNsense and Haproxy updates break anyone else? I followed this tutorial last year and everything has been flawless, but now I can't get any of my sites to load coming through HAproxy.

Logs indicate that the connections come in to HTTPS_frontend/HTTP and then get sent to SNI_frontend/TCP, but then the request seems to hang.

Checking haproxy/statistics#status I  see that all the servers and backends are up, and there are no errors in the log.

Any hints are very much appreciated!

Config:

#
# Automatically generated configuration.
# Do not edit this file manually.
#

global
    uid                         80
    gid                         80
    chroot                      /var/haproxy
    daemon
    stats                       socket /var/run/haproxy.socket group proxy mode 775 level admin expose-fd listeners
    nbthread                    4
    hard-stop-after             60s
    no strict-limits
    maxconn                     10000
    httpclient.resolvers.prefer   ipv4
    tune.ssl.default-dh-param   4096
    spread-checks               2
    tune.bufsize                16384
    tune.lua.maxmem             0
    log                         /var/run/log local0 info
    lua-prepend-path            /tmp/haproxy/lua/?.lua
cache opnsense-haproxy-cache
    total-max-size 4
    max-age 60
    process-vary off

defaults
    log     global
    option redispatch -1
    maxconn 5000
    timeout client 30s
    timeout connect 30s
    timeout server 30s
    retries 3
    default-server init-addr last,libc
    default-server maxconn 5000

# autogenerated entries for ACLs


# autogenerated entries for config in backends/frontends
userlist list_65f3b0fe7fb250.26065529
    # Origin: docker_BACKEND
    user vania insecure-password o3djfFXbsMGMoKG
    # NOTE: UserlistAddUsers called with empty group data


# autogenerated entries for stats
userlist stats_auth
    user root insecure-password lovelife
    # NOTE: UserlistAddUsers called with empty group data





# Frontend: iron-k3s-api (k3s API endpoint for new nodes)
frontend iron-k3s-api
    bind 10.3.32.1:6443 name 10.3.32.1:6443
    mode tcp
    default_backend iron-k3s

    # logging options
    option tcplog

# Frontend: SNI_frontend (Listen *:80 and *:443, this is the first public hit.)
frontend SNI_frontend
    bind 0.0.0.0:80 name 0.0.0.0:80
    bind 0.0.0.0:443 name 0.0.0.0:443
    mode tcp
    default_backend SSL_backend

    # logging options

# Frontend: HTTP_frontend (Listen on 127.4.4.3:80 (redirect to ssl))
frontend HTTP_frontend
    bind 127.4.4.3:80 name 127.4.4.3:80 accept-proxy
    mode http
    option http-keep-alive
    option forwardfor

    # logging options
    # ACL: NoSSL_condition
    acl acl_65f2626661cd25.59982841 ssl_fc

    # ACTION: HTTPtoHTTPS_rule
    http-request redirect scheme https code 301 if !acl_65f2626661cd25.59982841

# Frontend: HTTPS_frontend (Listen on 127.4.4.3:443 local bind, expands public subdomains rules)
frontend HTTPS_frontend
    bind 127.4.4.3:443 name 127.4.4.3:443 accept-proxy ssl curves secp384r1  no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets prefer-client-ciphers ssl-min-ver TLSv1.2 ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES256-GCM-SHA384 ciphersuites TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 alpn h2,http/1.1 crt-list /tmp/haproxy/ssl/65f265db185755.94806451.certlist
    mode http
    option http-keep-alive
    option forwardfor

    # logging options

    # ACTION: PUBLIC_SUBDOMAINS_rule
    # NOTE: actions with no ACLs/conditions will always match
    use_backend %[req.hdr(host),lower,map_dom(/tmp/haproxy/mapfiles/65f262a83078b7.57120343.txt)]

# Frontend (DISABLED): haproxy-stats (HAproxy Stats Page)

# Backend: iron-k3s (Iron k3s kube-apiserver)
backend iron-k3s
    option log-health-checks
    # health check: kube-api check
    mode tcp
    balance roundrobin
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    server galena2 10.3.34.212:6443 check inter 30s port 6443
    server galena3 10.3.34.212:6443 check inter 30s port 6443
    server iron1-k3s 10.3.34.201:6443 check inter 30s port 6443  ssl verify none
    server iron2-k3s 10.3.34.202:6443 check inter 30s port 6443
    server iron3-k3s 10.3.34.202:6443 check inter 30s port 6443

# Backend: emby_BACKEND (emby on puma)
backend emby_BACKEND
    option log-health-checks
    # health check: TCP no ssl check 2s
    option httpchk
    http-check send meth GET uri / ver HTTP/1.1 hdr Host emby.docker
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    http-reuse safe
    server emby_SERVER 10.3.39.6:8096 check inter 2s no-check-ssl

# Backend: SSL_backend (haproxy ssl terminator)
backend SSL_backend
    # health checking is DISABLED
    mode tcp
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    server SSL_server 127.4.4.3 send-proxy-v2 check-send-proxy

# Backend: caddy_BACKEND (caddy on docker puma)
backend caddy_BACKEND
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    http-reuse safe
    server caddy_SERVER 10.3.39.6:11080

# Backend: hassio_BACKEND (homeassistant on pve puma)
backend hassio_BACKEND
    # health check: TCP no ssl check 2s
    option httpchk
    http-check send meth GET uri / ver HTTP/1.1 hdr Host emby.docker
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    http-reuse safe
    server hassio_SERVER 10.3.37.71:8123 check inter 2s no-check-ssl

# Backend: docker_BACKEND (docker on puma)
backend docker_BACKEND
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    acl auth_ok http_auth(list_65f3b0fe7fb250.26065529)
    http-request auth if !auth_ok
    http-reuse safe
    server docker_SERVER 10.3.39.6:8085

# Backend: netmaker_BACKEND (netmaker non-ssl backend)
backend netmaker_BACKEND
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    http-reuse safe
    server netmaker_SERVER 10.3.32.200:80

# Backend: librespeed_BACKEND (librespeed docker image on puma)
backend librespeed_BACKEND
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    http-reuse safe
    server librespeed_SERVER 10.3.39.6:8092

# Backend: netmaker_ssl_BACKEND (netmaker ssl backend)
backend netmaker_ssl_BACKEND
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    http-reuse safe
    server netmaker_ssl_SERVER 10.3.32.200:443

# Backend: traefik_BACKEND (traefik servers running on k3s via bgp)
backend traefik_BACKEND
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m 
    stick on src
    http-reuse safe
    server traefik_SERVER 10.5.0.10:443 ssl alpn h2,http/1.1 verify required ca-file /tmp/haproxy/ssl/667798075f75f4.30922858.calist



listen local_statistics
    bind            127.0.0.1:8822
    mode            http
    stats uri       /haproxy?stats
    stats realm     HAProxy\ statistics
    stats admin     if TRUE

listen  remote_statistics
    bind            10.3.32.1:8822
    mode            http
    stats uri       /haproxy?stats
    stats hide-version
#2
I do not use OpenVPN, and would like top keep my system lean. I see that `openvpn` is installed as a core package. Is it safe to manually uninstall it (`pkg delete openvpn`), or will this impact other parts of the system?