Hello
I'll explain the subject
#
# Automatically generated configuration.
# Do not edit this file manually.
#
global
uid 80
gid 80
chroot /var/haproxy
daemon
stats socket /var/run/haproxy.socket group proxy mode 775 level admin expose-fd listeners
nbproc 1
nbthread 1
hard-stop-after 60s
no strict-limits
tune.ssl.default-dh-param 2048
spread-checks 2
tune.bufsize 16384
tune.lua.maxmem 0
log /var/run/log local0 info
lua-prepend-path /tmp/haproxy/lua/?.lua
defaults
log global
option redispatch 0
timeout client 30s
timeout connect 10s
timeout server 30s
retries 3
default-server init-addr last,libc
# autogenerated entries for ACLs
# autogenerated entries for config in backends/frontends
# autogenerated entries for stats
# Frontend: FRONTEND ()
frontend FRONTEND
bind 127.0.0.1:4443 name 127.0.0.1:4443
mode tcp
default_backend NPM_backend
# tuning options
timeout client 30s
# logging options
# ACL: exchange_condition
acl acl_63185f07e1e485.01440772 req.ssl_sni -i mail.xxxx.com
# ACL: rds_condition
acl acl_6318646aec4140.35738389 req.ssl_sni -i rds2022.xxxx.com
# ACL: exchange_autodiscover
acl acl_63186489c20228.86220338 req.ssl_sni -i autodiscover.xxxx.com
# ACTION: Exchange_rule
use_backend Exchange_backend if acl_63185f07e1e485.01440772
# ACTION: RDS_rule
use_backend RDS_backend if acl_6318646aec4140.35738389
# ACTION: Exchange_autodiscover_rule
use_backend Exchange_backend if acl_63186489c20228.86220338
# Backend: Exchange_backend ()
backend Exchange_backend
# health checking is DISABLED
mode tcp
balance source
# tuning options
timeout connect 10s
timeout server 30s
server Exchange aaaaaaaaaaaaaaa8:443 resolve-prefer ipv4
# Backend: RDS_backend ()
backend RDS_backend
# health checking is DISABLED
mode tcp
balance source
# tuning options
timeout connect 10s
timeout server 30s
server RDS bbbbbbbbbbbbbbbbbbb:443 resolve-prefer ipv4
# Backend: NPM_backend ()
backend NPM_backend
# health checking is DISABLED
mode tcp
balance source
# tuning options
timeout connect 10s
timeout server 30s
server NPM_PRIME ccccccccccccccccccccc:443 resolve-prefer ipv4
listen local_statistics
bind 127.0.0.1:8822
mode http
stats uri /haproxy?stats
stats realm HAProxy\ statistics
stats admin if TRUE
# statistics are DISABLED
It works "finish"
When I connect to exchange with mail.xxxx.com
I have exchange
If I connect right after to rds2022.xxxx.com
the address is rds2022.xxxx.com/owa/etccccccccc
If I wait a bit a recharge the page with rds2022.xxxx.com
I have the RDS server
Same thing for backend
Why are the "session" (i presume) sticked for a certains time to the previous server called by URI ?
Thanks for help (hope I'm clear)
Question: Are you using the same wildcard certificate for both frontends?
I encountered something similar a couple of years ago and it was due to connection coalescing.
You can read more about it here:
https://daniel.haxx.se/blog/2016/08/18/http2-connection-coalescing/
If my memory serves me right I solved it by using a separate non-wildcard cert for one of the frontends.
There is only 1 frontend :)
This is the same wildcard xxxx.com for exchange, rdc and npm (backend)
I'll read this
Thank you
Quote from: stanthewizzard on September 07, 2022, 05:57:39 PM
Yes
This is the same wildcard xxxx.com for exchange, rdc and npm
I'll read this
Thank you
Ok, then connection coalescing is the root cause of your issues for sure.
There is only 1 frontend
3 backend
Hi
is browser impact excluded?
it only affect browser.
if I kill safari on iOS. It's good. In fact I have to kill safari to browser between owa and nginx