unicorn.git  about / heads / tags
Rack HTTP server for Unix and fast clients
blob b6b69c1e0ee3a7df9618c06374863a1b70d751cd 6181 bytes (raw)
$ git show gperf:examples/nginx.conf	# shows this blob on the CLI

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
 
# This is example contains the bare mininum to get nginx going with
# unicorn servers.  Generally these configuration settings
# are applicable to other HTTP application servers (and not just Ruby
# ones), so if you have one working well for proxying another app
# server, feel free to continue using it.
#
# The only setting we feel strongly about is the fail_timeout=0
# directive in the "upstream" block.  max_fails=0 also has the same
# effect as fail_timeout=0 for current versions of nginx and may be
# used in its place.
#
# Users are strongly encouraged to refer to nginx documentation for more
# details and search for other example configs.

# you generally only need one nginx worker unless you're serving
# large amounts of static files which require blocking disk reads
worker_processes 1;

# # drop privileges, root is needed on most systems for binding to port 80
# # (or anything < 1024).  Capability-based security may be available for
# # your system and worth checking out so you won't need to be root to
# # start nginx to bind on 80
user nobody nogroup; # for systems with a "nogroup"
# user nobody nobody; # for systems with "nobody" as a group instead

# Feel free to change all paths to suite your needs here, of course
pid /path/to/nginx.pid;
error_log /path/to/nginx.error.log;

events {
  worker_connections 1024; # increase if you have lots of clients
  accept_mutex off; # "on" if nginx worker_processes > 1
  # use epoll; # enable for Linux 2.6+
  # use kqueue; # enable for FreeBSD, OSX
}

http {
  # nginx will find this file in the config directory set at nginx build time
  include mime.types;

  # fallback in case we can't determine a type
  default_type application/octet-stream;

  # click tracking!
  access_log /path/to/nginx.access.log combined;

  # you generally want to serve static files with nginx since
  # unicorn is not and will never be optimized for it
  sendfile on;

  tcp_nopush on; # off may be better for *some* Comet/long-poll stuff
  tcp_nodelay off; # on may be better for some Comet/long-poll stuff

  # we haven't checked to see if Rack::Deflate on the app server is
  # faster or not than doing compression via nginx.  It's easier
  # to configure it all in one place here for static files and also
  # to disable gzip for clients who don't get gzip/deflate right.
  # There are other gzip settings that may be needed used to deal with
  # bad clients out there, see
  # https://nginx.org/en/docs/http/ngx_http_gzip_module.html
  gzip on;
  gzip_http_version 1.0;
  gzip_proxied any;
  gzip_min_length 500;
  gzip_disable "MSIE [1-6]\.";
  gzip_types text/plain text/html text/xml text/css
             text/comma-separated-values
             text/javascript application/x-javascript
             application/atom+xml;

  # this can be any application server, not just unicorn
  upstream app_server {
    # fail_timeout=0 means we always retry an upstream even if it failed
    # to return a good HTTP response (in case the unicorn master nukes a
    # single worker for timing out).

    # for UNIX domain socket setups:
    server unix:/path/to/.unicorn.sock fail_timeout=0;

    # for TCP setups, point these to your backend servers
    # server 192.168.0.7:8080 fail_timeout=0;
    # server 192.168.0.8:8080 fail_timeout=0;
    # server 192.168.0.9:8080 fail_timeout=0;
  }

  server {
    # enable one of the following if you're on Linux or FreeBSD
    # listen 80 default deferred; # for Linux
    # listen 80 default accept_filter=httpready; # for FreeBSD

    # If you have IPv6, you'll likely want to have two separate listeners.
    # One on IPv4 only (the default), and another on IPv6 only instead
    # of a single dual-stack listener.  A dual-stack listener will make
    # for ugly IPv4 addresses in $remote_addr (e.g ":ffff:10.0.0.1"
    # instead of just "10.0.0.1") and potentially trigger bugs in
    # some software.
    # listen [::]:80 ipv6only=on; # deferred or accept_filter recommended

    client_max_body_size 4G;
    server_name _;

    # ~2 seconds is often enough for most folks to parse HTML/CSS and
    # retrieve needed images/icons/frames, connections are cheap in
    # nginx so increasing this is generally safe...
    keepalive_timeout 5;

    # path for static files
    root /path/to/app/current/public;

    # Prefer to serve static files directly from nginx to avoid unnecessary
    # data copies from the application server.
    #
    # try_files directive appeared in in nginx 0.7.27 and has stabilized
    # over time.  Older versions of nginx (e.g. 0.6.x) requires
    # "if (!-f $request_filename)" which was less efficient:
    # https://bogomips.org/unicorn.git/tree/examples/nginx.conf?id=v3.3.1#n127
    try_files $uri/index.html $uri.html $uri @app;

    location @app {
      # an HTTP header important enough to have its own Wikipedia entry:
      #   https://en.wikipedia.org/wiki/X-Forwarded-For
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

      # enable this if you forward HTTPS traffic to unicorn,
      # this helps Rack set the proper URL scheme for doing redirects:
      # proxy_set_header X-Forwarded-Proto $scheme;

      # pass the Host: header from the client right along so redirects
      # can be set properly within the Rack application
      proxy_set_header Host $http_host;

      # we don't want nginx trying to do something clever with
      # redirects, we set the Host: header above already.
      proxy_redirect off;

      # It's also safe to set if you're using only serving fast clients
      # with unicorn + nginx, but not slow clients.  You normally want
      # nginx to buffer responses to slow clients, even with Rails 3.1
      # streaming because otherwise a slow client can become a bottleneck
      # of unicorn.
      #
      # The Rack application may also set "X-Accel-Buffering (yes|no)"
      # in the response headers do disable/enable buffering on a
      # per-response basis.
      # proxy_buffering off;

      proxy_pass http://app_server;
    }

    # Rails error pages
    error_page 500 502 503 504 /500.html;
    location = /500.html {
      root /path/to/app/current/public;
    }
  }
}

git clone https://yhbt.net/unicorn.git