[Solved] Compose AIO external reverse proxy

Hello everyone,

I’ve been having a rought time trying to connect the docker AIO compose to my reverse proxy. I’m using HAProxy on my firewall to reach all my network devices. Here are the configurations:

   nextcloud:
    container_name: nextcloud-aio-mastercontainer
    restart: always
    environment:
      - NEXTCLOUD_DATADIR=/mnt/Cloud
      - SKIP_DOMAIN_VALIDATION=true
      - APACHE_PORT=11000
    ports:
#      - 80:80 # Can be removed when running behind a reverse proxy. See https://github.com/extcloud/all-in-one/blob/main/reverse-proxy.md#
      - 8080:8080
#      - 8443:8443 # Can be removed wen running behind a reverse proxy. See https://github.com/nextcloud/all-in-one/blob/main/revers-proxy.md
    volumes:
      - nextcloud_aio_mastercontainer:/mnt/docker-aio-config # This line is not allowed to be hanged
      - /var/run/docker.sock:/var/run/docker.sock:ro
    image: nextcloud/all-in-one:latest

volumes:
  nextcloud_aio_mastercontainer:
    name: nextcloud_aio_mastercontainer # This line is not allowed to be changed

The reverse proxy should be configured fine, since all other services forwarded work perfectly fast. It might be an issue regarding the way I use HAProxy? The way I use letsencrypt certs? Here is my HAProxy config:

#
# Automatically generated configuration.
# Do not edit this file manually.
#

global
    uid                         80
    gid                         80
    chroot                      /var/haproxy
    daemon
    stats                       socket /var/run/haproxy.socket group proxy mode 775 level admin
    nbproc                      1
    nbthread                    4
    hard-stop-after             60s
    no strict-limits
    maxconn                     10000
    tune.ssl.default-dh-param   4096
    spread-checks               2
    tune.bufsize                16384
    tune.lua.maxmem             0
    log                         /var/run/log local0 info
    lua-prepend-path            /tmp/haproxy/lua/?.lua
cache opnsense-haproxy-cache
    total-max-size 2048
    max-age 60
    process-vary off

defaults
    log     global
    option redispatch -1
    maxconn 5000
    timeout client 30s
    timeout connect 30s
    timeout server 30s
    retries 3
    default-server init-addr last,libc

# autogenerated entries for ACLs


# autogenerated entries for config in backends/frontends

# autogenerated entries for stats


# Resolver: DNS
resolvers 637c80383d4fa8.00381210
    nameserver 127.0.0.1:53 127.0.0.1:53
    nameserver 192.168.5.10:53 192.168.5.10:53
    resolve_retries 3
    timeout resolve 1s
    timeout retry 1s



# Frontend: 0_SNI_frontend (0.0.0.0:80 and 0.0.0.0:443)
frontend 0_SNI_frontend
    bind 0.0.0.0:443 name 0.0.0.0:443 
    bind 0.0.0.0:80 name 0.0.0.0:80 
    mode tcp
    default_backend SSL_Backend
    # tuning options
    timeout client 30s

    # logging options

# Frontend: 1_HTTP_frontend (localhost http)
frontend 1_HTTP_frontend
    bind 127.0.0.1:80 name 127.0.0.1:80 
    mode http
    option http-keep-alive
    option forwardfor
    # tuning options
    timeout client 30s

    # logging options
    # ACL: NoSSL_condition
    acl acl_637dfe6f6c41f0.44983477 ssl_fc

    # ACTION: HTTPtoHTTPS_rule
    http-request redirect scheme https code 301 if !acl_637dfe6f6c41f0.44983477

# Frontend: 1_HTTPS_fontend (localhost 443)
frontend 1_HTTPS_fontend
    http-response set-header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"
    bind 127.0.0.1:443 name 127.0.0.1:443 accept-proxy ssl curves secp384r1  no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets ssl-min-ver TLSv1.2 ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256 ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 alpn h2,http/1.1 crt-list /tmp/haproxy/ssl/637e02bea037c8.25886890.certlist 
    mode http
    option http-keep-alive
    option forwardfor
    # tuning options
    timeout client 15m

    # logging options

    # ACTION: PUBLIC_SUBDOMAIN_map-rule
    # NOTE: actions with no ACLs/conditions will always match
    use_backend %[req.hdr(host),lower,map_dom(/tmp/haproxy/mapfiles/637e013d3ac944.12175260.txt)] 

# Backend: acme_challenge_backend (Added by ACME Client plugin)
backend acme_challenge_backend
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m  
    stick on src
    # tuning options
    timeout connect 30s
    timeout server 30s
    http-reuse safe
    server acme_challenge_host 127.0.0.1:43580 

# Backend: Vaultwarden_backend (Vaultwarden service)
backend Vaultwarden_backend
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m  
    stick on src
    # tuning options
    timeout connect 30s
    timeout server 30s
    http-reuse safe
    server Bitwarden 192.168.5.25:88 

# Backend: SSL_Backend ()
backend SSL_Backend
    # health checking is DISABLED
    mode tcp
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m  
    stick on src
    # tuning options
    timeout connect 30s
    timeout server 30s
    server SSL_Server 127.0.0.1 send-proxy-v2 check-send-proxy

# Backend: Booksonic_backend (Booksonic service)
backend Booksonic_backend
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m  
    stick on src
    # tuning options
    timeout connect 30s
    timeout server 30s
    http-reuse safe
    server Booksonic 192.168.5.25:4040 

# Backend: Nextcloud_backend (Nextcloud service)
backend Nextcloud_backend
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m  
    stick on src
    # tuning options
    timeout connect 30s
    timeout server 30s
    http-reuse safe
    server Nextcloud 192.168.5.25:11000 

# Backend: Smokeping_backend (Smokeping service)
backend Smokeping_backend
    # health checking is DISABLED
    mode http
    balance source
    # stickiness
    stick-table type ip size 50k expire 30m  
    stick on src
    # tuning options
    timeout connect 30s
    timeout server 30s
    http-reuse safe
    server Smokeping 192.168.5.25:11 



listen local_statistics
    bind            127.0.0.1:8822
    mode            http
    stats uri       /haproxy?stats
    stats realm     HAProxy\ statistics
    stats admin     if TRUE

# remote statistics are DISABLED



The only error i can find is in the apache container:

AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 172.17.0.10. Set the 'ServerName' directive globally to suppress this message
{"level":"info","ts":1669391482.9550624,"msg":"using provided configuration","config_file":"/Caddyfile","config_adapter":""}
{"level":"warn","ts":1669391482.958633,"msg":"Caddyfile input is not formatted; run the 'caddy fmt' command to fix inconsistencies","adapter":"caddyfile","file":"/Caddyfile","line":2}
{"level":"info","ts":1669391482.9607918,"logger":"admin","msg":"admin endpoint started","address":"localhost:2019","enforce_origin":false,"origins":["//localhost:2019","//[::1]:2019","//127.0.0.1:2019"]}
{"level":"warn","ts":1669391482.9613688,"logger":"http","msg":"automatic HTTPS is completely disabled for server","server_name":"srv0"}
{"level":"info","ts":1669391482.9614027,"logger":"tls.cache.maintenance","msg":"started background certificate maintenance","cache":"0xc0002938f0"}
{"level":"info","ts":1669391482.9625843,"logger":"tls","msg":"cleaning storage unit","description":"FileStorage:/mnt/data/caddy"}
{"level":"info","ts":1669391482.9626298,"logger":"tls","msg":"finished cleaning storage units"}
{"level":"info","ts":1669391482.9626403,"logger":"http.log","msg":"server running","name":"srv0","protocols":["h1","h2","h3"]}
{"level":"info","ts":1669391482.9628708,"msg":"autosaved config (load with --resume flag)","file":"/var/www/.config/caddy/autosave.json"}
{"level":"info","ts":1669391482.9628863,"msg":"serving initial configuration"}

I’m using a let’sencrypt wildcard cert and all other services work perfectly. Can anyone help me with the HAProxy config? Or tell me where I’m going wrong?

I have been working on this the whole day.
Finally I found the solution to be to just throw the A record for nextcloud out of the unbound overrides and it works.

I used THIS guide to set up wildcard certificates on opnsense and HAProxy. If anyone else comes across this issue, check your DNS.

Case closed.