Nginx is the most widely deployed web server and reverse proxy, powering over 30% of all websites. Its event-driven architecture handles thousands of concurrent connections with minimal memory. This guide covers server blocks, reverse proxy setup, SSL/TLS termination, load balancing, caching, and gzip compression — the building blocks of production Nginx configurations.
How Do Server Blocks Work?
Server blocks (virtual hosts) let Nginx serve multiple domains from a single instance. Nginx matches incoming requests by server_name and listen directives. The first server block acts as the default if no match is found.
# /etc/nginx/sites-available/example.com
server {
listen 80;
listen [::]:80;
server_name example.com www.example.com;
root /var/www/example.com/html;
index index.html;
# Logging
access_log /var/log/nginx/example.com.access.log;
error_log /var/log/nginx/example.com.error.log;
location / {
try_files $uri $uri/ =404;
}
# Static assets with cache headers
location ~* \.(css|js|png|jpg|jpeg|gif|ico|svg|woff2)$ {
expires 30d;
add_header Cache-Control "public, immutable";
}
# Deny access to hidden files
location ~ /\. {
deny all;
return 404;
}
}# Symlink to sites-enabled
sudo ln -s /etc/nginx/sites-available/example.com /etc/nginx/sites-enabled/
# Test configuration syntax
sudo nginx -t
# Reload without downtime
sudo nginx -s reloadHow Do You Configure a Reverse Proxy?
A reverse proxy forwards client requests to backend application servers (Node.js, Python, Go, etc.) and returns the response. Nginx handles TLS termination, static files, and connection management while the backend focuses on application logic.
server {
listen 80;
server_name api.example.com;
location / {
proxy_pass http://127.0.0.1:3000;
# Forward client information to the backend
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
# Buffering (disable for streaming/SSE)
proxy_buffering off;
}
}Tip: Always set proxy_set_header Host $host — without it, the backend receives the upstream address instead of the original hostname, which breaks virtual hosting and cookie domains.
How Do You Set Up SSL/TLS?
SSL/TLS termination at Nginx encrypts traffic between clients and the server. Use Let's Encrypt with Certbot for free, automated certificates. Always redirect HTTP to HTTPS.
# Redirect HTTP → HTTPS
server {
listen 80;
listen [::]:80;
server_name example.com www.example.com;
return 301 https://$host$request_uri;
}
# HTTPS server
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name example.com www.example.com;
# Certificate files (managed by Certbot)
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
# Modern TLS settings
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
# HSTS — tell browsers to always use HTTPS
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always;
# OCSP stapling — faster TLS handshakes
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/letsencrypt/live/example.com/chain.pem;
# Session cache for TLS resumption
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 1d;
ssl_session_tickets off;
location / {
proxy_pass http://127.0.0.1:3000;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Proto $scheme;
}
}# Install Certbot
sudo apt install certbot python3-certbot-nginx
# Obtain and auto-configure SSL
sudo certbot --nginx -d example.com -d www.example.com
# Auto-renewal is set up via systemd timer
sudo systemctl status certbot.timerHow Do You Configure Load Balancing?
Nginx distributes traffic across multiple backend servers using an upstream block. It supports round-robin (default), least connections, IP hash, and weighted distribution.
# Round-robin (default) — equal distribution
upstream api_servers {
server 10.0.1.10:3000;
server 10.0.1.11:3000;
server 10.0.1.12:3000;
}
# Least connections — sends to the server with fewest active connections
upstream api_least {
least_conn;
server 10.0.1.10:3000;
server 10.0.1.11:3000;
}
# IP hash — same client always hits the same server (sticky sessions)
upstream api_sticky {
ip_hash;
server 10.0.1.10:3000;
server 10.0.1.11:3000;
}
# Weighted — send more traffic to powerful servers
upstream api_weighted {
server 10.0.1.10:3000 weight=5; # gets 5x traffic
server 10.0.1.11:3000 weight=1;
server 10.0.1.12:3000 backup; # only when others are down
}
# Health checks and failure detection
upstream api_resilient {
server 10.0.1.10:3000 max_fails=3 fail_timeout=30s;
server 10.0.1.11:3000 max_fails=3 fail_timeout=30s;
}
server {
listen 80;
location / {
proxy_pass http://api_servers;
}
}How Do You Enable Caching and Gzip?
Nginx can cache proxy responses to reduce backend load and compress responses to reduce bandwidth. Both significantly improve response times.
# Define cache zone in http block (nginx.conf)
proxy_cache_path /var/cache/nginx levels=1:2
keys_zone=app_cache:10m
max_size=1g
inactive=60m
use_temp_path=off;
server {
location /api/ {
proxy_pass http://api_servers;
proxy_cache app_cache;
# Cache successful responses for 10 minutes
proxy_cache_valid 200 10m;
proxy_cache_valid 404 1m;
# Cache key
proxy_cache_key "$scheme$request_method$host$request_uri";
# Add header to show cache status (HIT/MISS/BYPASS)
add_header X-Cache-Status $upstream_cache_status;
# Bypass cache for authenticated requests
proxy_cache_bypass $http_authorization;
proxy_no_cache $http_authorization;
}
}# Enable in http block (nginx.conf)
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 5; # 1-9, 5 is a good balance
gzip_min_length 256; # Don't compress tiny responses
gzip_types
text/plain
text/css
text/javascript
application/javascript
application/json
application/xml
image/svg+xml
application/wasm;
# Serve pre-compressed files if available (.gz)
gzip_static on;Common Nginx Patterns
Frequently used configuration snippets for real-world deployments.
# Rate limiting — 10 requests/second per IP
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
server {
location /api/ {
limit_req zone=api_limit burst=20 nodelay;
proxy_pass http://api_servers;
}
}
# Single-page application (SPA) — fallback to index.html
location / {
root /var/www/app;
try_files $uri $uri/ /index.html;
}
# Custom error pages
error_page 404 /404.html;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /var/www/errors;
internal;
}
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
# File upload size limit
client_max_body_size 50m;Key Takeaways
- • Server blocks route requests by
server_name— always test withnginx -tbefore reloading - • Reverse proxy requires
proxy_set_header Host $hostfor correct backend behavior - • Use TLS 1.2+ with Let's Encrypt and always redirect HTTP to HTTPS
- • Load balance with upstream blocks — use
least_connfor uneven request durations - • Enable gzip at compression level 5 for text-based content types
- • Use
proxy_cacheto reduce backend load on cacheable endpoints - • Rate limiting with
limit_req_zoneprotects against abuse