From 00e04ecc9fda39fa77e2f7fd11834ea977ba9ee8 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Fri, 25 Jun 2010 11:29:13 -0700 Subject: test-exec: prefer ENV['PWD'] in working_directory tests We do an extra check in the application dispatch to ensure ENV['PWD'] is set correctly to match Dir.pwd (even if the string path is different) as this is required for Capistrano deployments. These tests should now pass under OSX where /var is apparently a symlink to /private/var. --- test/exec/test_exec.rb | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/test/exec/test_exec.rb b/test/exec/test_exec.rb index 9830683..1d24ca3 100644 --- a/test/exec/test_exec.rb +++ b/test/exec/test_exec.rb @@ -54,6 +54,20 @@ before_fork do |server, worker| end EOS + WORKING_DIRECTORY_CHECK_RU = <<-EOS +use Rack::ContentLength +run lambda { |env| + pwd = ENV['PWD'] + a = ::File.stat(pwd) + b = ::File.stat(Dir.pwd) + if (a.ino == b.ino && a.dev == b.dev) + [ 200, { 'Content-Type' => 'text/plain' }, [ pwd ] ] + else + [ 404, { 'Content-Type' => 'text/plain' }, [] ] + end +} + EOS + def setup @pwd = Dir.pwd @tmpfile = Tempfile.new('unicorn_exec_test') @@ -87,10 +101,7 @@ end File.unlink(other.path) Dir.mkdir(other.path) File.open("config.ru", "wb") do |fp| - fp.syswrite < 'text/plain' }, [ Dir.pwd ] ] } -EOF + fp.syswrite WORKING_DIRECTORY_CHECK_RU end FileUtils.cp("config.ru", other.path + "/config.ru") Dir.chdir(@tmpdir) @@ -138,10 +149,7 @@ EOF File.unlink(other.path) Dir.mkdir(other.path) File.open("config.ru", "wb") do |fp| - fp.syswrite < 'text/plain' }, [ Dir.pwd ] ] } -EOF + fp.syswrite WORKING_DIRECTORY_CHECK_RU end FileUtils.cp("config.ru", other.path + "/config.ru") tmp = Tempfile.new('unicorn.config') @@ -177,10 +185,7 @@ EOF File.unlink(other.path) Dir.mkdir(other.path) File.open("config.ru", "wb") do |fp| - fp.syswrite < 'text/plain' }, [ Dir.pwd ] ] } -EOF + fp.syswrite WORKING_DIRECTORY_CHECK_RU end FileUtils.cp("config.ru", other.path + "/config.ru") system('mkfifo', "#{other.path}/fifo") -- cgit v1.2.3-24-ge0c7 From 046d57cc8ff071a47a566e33a121b52be363be68 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 24 Jun 2010 03:54:40 +0000 Subject: tee_input: (nitpick) use IO#rewind instead of IO#seek(0) no need to pass an extra argument (cherry picked from commit 1a49a8295054a2e931f5288540acb858be8edcc8) --- lib/unicorn/tee_input.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb index 8ff7258..7540e4f 100644 --- a/lib/unicorn/tee_input.rb +++ b/lib/unicorn/tee_input.rb @@ -25,7 +25,7 @@ module Unicorn if buf.size > 0 parser.filter_body(buf2, buf) and finalize_input tmp.write(buf2) - tmp.seek(0) + tmp.rewind end end -- cgit v1.2.3-24-ge0c7 From d187ff43c41881731f5b18bed4312279a54a920c Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 24 Jun 2010 04:11:35 +0000 Subject: tee_input: allow tuning of client_body_buffer_size/io_size Some folks may require more fine-grained control of buffering and I/O chunk sizes, so we'll support them (unofficially, for now). (cherry picked from commit 9f48be69bfe579dab02b5fe8d6e728ae63fd24fc) --- lib/unicorn/tee_input.rb | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb index 7540e4f..090c605 100644 --- a/lib/unicorn/tee_input.rb +++ b/lib/unicorn/tee_input.rb @@ -15,12 +15,22 @@ module Unicorn # "rack.input" of the Rack environment. class TeeInput < Struct.new(:socket, :req, :parser, :buf, :len, :tmp, :buf2) + # The maximum size (in +bytes+) to buffer in memory before + # resorting to a temporary file. Default is 112 kilobytes. + @@client_body_buffer_size = Unicorn::Const::MAX_BODY + + # The I/O chunk size (in +bytes+) for I/O operations where + # the size cannot be user-specified when a method is called. + # The default is 16 kilobytes. + @@io_chunk_size = Unicorn::Const::CHUNK_SIZE + # Initializes a new TeeInput object. You normally do not have to call # this unless you are writing an HTTP server. def initialize(*args) super(*args) self.len = parser.content_length - self.tmp = len && len < Const::MAX_BODY ? StringIO.new("") : Util.tmpio + self.tmp = len && len < @@client_body_buffer_size ? + StringIO.new("") : Unicorn::Util.tmpio self.buf2 = "" if buf.size > 0 parser.filter_body(buf2, buf) and finalize_input @@ -50,7 +60,7 @@ module Unicorn if socket pos = tmp.pos - while tee(Const::CHUNK_SIZE, buf2) + while tee(@@io_chunk_size, buf2) end tmp.seek(pos) end @@ -83,7 +93,7 @@ module Unicorn length = args.shift if nil == length rv = tmp.read || "" - while tee(Const::CHUNK_SIZE, buf2) + while tee(@@io_chunk_size, buf2) rv << buf2 end rv @@ -113,7 +123,7 @@ module Unicorn orig_size = tmp.size if tmp.pos == orig_size - tee(Const::CHUNK_SIZE, buf2) or return nil + tee(@@io_chunk_size, buf2) or return nil tmp.seek(orig_size) end @@ -123,7 +133,7 @@ module Unicorn # unlikely, if we got here, then tmp is at EOF begin orig_size = tmp.pos - tee(Const::CHUNK_SIZE, buf2) or break + tee(@@io_chunk_size, buf2) or break tmp.seek(orig_size) line << tmp.gets $/ == line[-$/.size, $/.size] and return line @@ -195,7 +205,7 @@ module Unicorn # will catch EOFError when app is processing it, otherwise in # initialize we never get any chance to enter the app so the # EOFError will just get trapped by Unicorn and not the Rack app - buf << socket.readpartial(Const::CHUNK_SIZE) + buf << socket.readpartial(@@io_chunk_size) end self.socket = nil end -- cgit v1.2.3-24-ge0c7 From 0c6e5e165c6422ede694b37646c429595049deb5 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 24 Jun 2010 04:24:34 +0000 Subject: tee_input: undent, avoid (re)-declaring "module Unicorn" It makes RDoc look better and cleaner, since we don't do anything in the Unicorn namespace. (cherry picked from commit 6f720afd95d8131a2657c643b97cb18c750ed9f8) --- lib/unicorn/tee_input.rb | 407 +++++++++++++++++++++++------------------------ 1 file changed, 203 insertions(+), 204 deletions(-) diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb index 090c605..d1d273d 100644 --- a/lib/unicorn/tee_input.rb +++ b/lib/unicorn/tee_input.rb @@ -1,233 +1,232 @@ # -*- encoding: binary -*- +require 'stringio' + +# acts like tee(1) on an input input to provide a input-like stream +# while providing rewindable semantics through a File/StringIO backing +# store. On the first pass, the input is only read on demand so your +# Rack application can use input notification (upload progress and +# like). This should fully conform to the Rack::Lint::InputWrapper +# specification on the public API. This class is intended to be a +# strict interpretation of Rack::Lint::InputWrapper functionality and +# will not support any deviations from it. +# +# When processing uploads, Unicorn exposes a TeeInput object under +# "rack.input" of the Rack environment. +class Unicorn::TeeInput < Struct.new(:socket, :req, :parser, + :buf, :len, :tmp, :buf2) + + # The maximum size (in +bytes+) to buffer in memory before + # resorting to a temporary file. Default is 112 kilobytes. + @@client_body_buffer_size = Unicorn::Const::MAX_BODY + + # The I/O chunk size (in +bytes+) for I/O operations where + # the size cannot be user-specified when a method is called. + # The default is 16 kilobytes. + @@io_chunk_size = Unicorn::Const::CHUNK_SIZE + + # Initializes a new TeeInput object. You normally do not have to call + # this unless you are writing an HTTP server. + def initialize(*args) + super(*args) + self.len = parser.content_length + self.tmp = len && len < @@client_body_buffer_size ? + StringIO.new("") : Unicorn::Util.tmpio + self.buf2 = "" + if buf.size > 0 + parser.filter_body(buf2, buf) and finalize_input + tmp.write(buf2) + tmp.rewind + end + end -module Unicorn - - # acts like tee(1) on an input input to provide a input-like stream - # while providing rewindable semantics through a File/StringIO backing - # store. On the first pass, the input is only read on demand so your - # Rack application can use input notification (upload progress and - # like). This should fully conform to the Rack::Lint::InputWrapper - # specification on the public API. This class is intended to be a - # strict interpretation of Rack::Lint::InputWrapper functionality and - # will not support any deviations from it. + # :call-seq: + # ios.size => Integer + # + # Returns the size of the input. For requests with a Content-Length + # header value, this will not read data off the socket and just return + # the value of the Content-Length header as an Integer. + # + # For Transfer-Encoding:chunked requests, this requires consuming + # all of the input stream before returning since there's no other + # way to determine the size of the request body beforehand. # - # When processing uploads, Unicorn exposes a TeeInput object under - # "rack.input" of the Rack environment. - class TeeInput < Struct.new(:socket, :req, :parser, :buf, :len, :tmp, :buf2) - - # The maximum size (in +bytes+) to buffer in memory before - # resorting to a temporary file. Default is 112 kilobytes. - @@client_body_buffer_size = Unicorn::Const::MAX_BODY - - # The I/O chunk size (in +bytes+) for I/O operations where - # the size cannot be user-specified when a method is called. - # The default is 16 kilobytes. - @@io_chunk_size = Unicorn::Const::CHUNK_SIZE - - # Initializes a new TeeInput object. You normally do not have to call - # this unless you are writing an HTTP server. - def initialize(*args) - super(*args) - self.len = parser.content_length - self.tmp = len && len < @@client_body_buffer_size ? - StringIO.new("") : Unicorn::Util.tmpio - self.buf2 = "" - if buf.size > 0 - parser.filter_body(buf2, buf) and finalize_input - tmp.write(buf2) - tmp.rewind + # This method is no longer part of the Rack specification as of + # Rack 1.2, so its use is not recommended. This method only exists + # for compatibility with Rack applications designed for Rack 1.1 and + # earlier. Most applications should only need to call +read+ with a + # specified +length+ in a loop until it returns +nil+. + def size + len and return len + + if socket + pos = tmp.pos + while tee(@@io_chunk_size, buf2) end + tmp.seek(pos) end - # :call-seq: - # ios.size => Integer - # - # Returns the size of the input. For requests with a Content-Length - # header value, this will not read data off the socket and just return - # the value of the Content-Length header as an Integer. - # - # For Transfer-Encoding:chunked requests, this requires consuming - # all of the input stream before returning since there's no other - # way to determine the size of the request body beforehand. - # - # This method is no longer part of the Rack specification as of - # Rack 1.2, so its use is not recommended. This method only exists - # for compatibility with Rack applications designed for Rack 1.1 and - # earlier. Most applications should only need to call +read+ with a - # specified +length+ in a loop until it returns +nil+. - def size - len and return len - - if socket - pos = tmp.pos - while tee(@@io_chunk_size, buf2) - end - tmp.seek(pos) - end - - self.len = tmp.size - end + self.len = tmp.size + end - # :call-seq: - # ios.read([length [, buffer ]]) => string, buffer, or nil - # - # Reads at most length bytes from the I/O stream, or to the end of - # file if length is omitted or is nil. length must be a non-negative - # integer or nil. If the optional buffer argument is present, it - # must reference a String, which will receive the data. - # - # At end of file, it returns nil or "" depend on length. - # ios.read() and ios.read(nil) returns "". - # ios.read(length [, buffer]) returns nil. - # - # If the Content-Length of the HTTP request is known (as is the common - # case for POST requests), then ios.read(length [, buffer]) will block - # until the specified length is read (or it is the last chunk). - # Otherwise, for uncommon "Transfer-Encoding: chunked" requests, - # ios.read(length [, buffer]) will return immediately if there is - # any data and only block when nothing is available (providing - # IO#readpartial semantics). - def read(*args) - socket or return tmp.read(*args) - - length = args.shift - if nil == length - rv = tmp.read || "" - while tee(@@io_chunk_size, buf2) - rv << buf2 - end - rv + # :call-seq: + # ios.read([length [, buffer ]]) => string, buffer, or nil + # + # Reads at most length bytes from the I/O stream, or to the end of + # file if length is omitted or is nil. length must be a non-negative + # integer or nil. If the optional buffer argument is present, it + # must reference a String, which will receive the data. + # + # At end of file, it returns nil or "" depend on length. + # ios.read() and ios.read(nil) returns "". + # ios.read(length [, buffer]) returns nil. + # + # If the Content-Length of the HTTP request is known (as is the common + # case for POST requests), then ios.read(length [, buffer]) will block + # until the specified length is read (or it is the last chunk). + # Otherwise, for uncommon "Transfer-Encoding: chunked" requests, + # ios.read(length [, buffer]) will return immediately if there is + # any data and only block when nothing is available (providing + # IO#readpartial semantics). + def read(*args) + socket or return tmp.read(*args) + + length = args.shift + if nil == length + rv = tmp.read || "" + while tee(@@io_chunk_size, buf2) + rv << buf2 + end + rv + else + rv = args.shift || "" + diff = tmp.size - tmp.pos + if 0 == diff + ensure_length(tee(length, rv), length) else - rv = args.shift || "" - diff = tmp.size - tmp.pos - if 0 == diff - ensure_length(tee(length, rv), length) - else - ensure_length(tmp.read(diff > length ? length : diff, rv), length) - end + ensure_length(tmp.read(diff > length ? length : diff, rv), length) end end + end - # :call-seq: - # ios.gets => string or nil - # - # Reads the next ``line'' from the I/O stream; lines are separated - # by the global record separator ($/, typically "\n"). A global - # record separator of nil reads the entire unread contents of ios. - # Returns nil if called at the end of file. - # This takes zero arguments for strict Rack::Lint compatibility, - # unlike IO#gets. - def gets - socket or return tmp.gets - nil == $/ and return read - - orig_size = tmp.size - if tmp.pos == orig_size - tee(@@io_chunk_size, buf2) or return nil - tmp.seek(orig_size) - end + # :call-seq: + # ios.gets => string or nil + # + # Reads the next ``line'' from the I/O stream; lines are separated + # by the global record separator ($/, typically "\n"). A global + # record separator of nil reads the entire unread contents of ios. + # Returns nil if called at the end of file. + # This takes zero arguments for strict Rack::Lint compatibility, + # unlike IO#gets. + def gets + socket or return tmp.gets + nil == $/ and return read + + orig_size = tmp.size + if tmp.pos == orig_size + tee(@@io_chunk_size, buf2) or return nil + tmp.seek(orig_size) + end - line = tmp.gets # cannot be nil here since size > pos - $/ == line[-$/.size, $/.size] and return line + line = tmp.gets # cannot be nil here since size > pos + $/ == line[-$/.size, $/.size] and return line - # unlikely, if we got here, then tmp is at EOF - begin - orig_size = tmp.pos - tee(@@io_chunk_size, buf2) or break - tmp.seek(orig_size) - line << tmp.gets - $/ == line[-$/.size, $/.size] and return line - # tmp is at EOF again here, retry the loop - end while true - - line - end + # unlikely, if we got here, then tmp is at EOF + begin + orig_size = tmp.pos + tee(@@io_chunk_size, buf2) or break + tmp.seek(orig_size) + line << tmp.gets + $/ == line[-$/.size, $/.size] and return line + # tmp is at EOF again here, retry the loop + end while true - # :call-seq: - # ios.each { |line| block } => ios - # - # Executes the block for every ``line'' in *ios*, where lines are - # separated by the global record separator ($/, typically "\n"). - def each(&block) - while line = gets - yield line - end + line + end - self # Rack does not specify what the return value is here + # :call-seq: + # ios.each { |line| block } => ios + # + # Executes the block for every ``line'' in *ios*, where lines are + # separated by the global record separator ($/, typically "\n"). + def each(&block) + while line = gets + yield line end - # :call-seq: - # ios.rewind => 0 - # - # Positions the *ios* pointer to the beginning of input, returns - # the offset (zero) of the +ios+ pointer. Subsequent reads will - # start from the beginning of the previously-buffered input. - def rewind - tmp.rewind # Rack does not specify what the return value is here - end + self # Rack does not specify what the return value is here + end - private - - def client_error(e) - case e - when EOFError - # in case client only did a premature shutdown(SHUT_WR) - # we do support clients that shutdown(SHUT_WR) after the - # _entire_ request has been sent, and those will not have - # raised EOFError on us. - socket.close if socket - raise ClientShutdown, "bytes_read=#{tmp.size}", [] - when HttpParserError - e.set_backtrace([]) - end - raise e - end + # :call-seq: + # ios.rewind => 0 + # + # Positions the *ios* pointer to the beginning of input, returns + # the offset (zero) of the +ios+ pointer. Subsequent reads will + # start from the beginning of the previously-buffered input. + def rewind + tmp.rewind # Rack does not specify what the return value is here + end - # tees off a +length+ chunk of data from the input into the IO - # backing store as well as returning it. +dst+ must be specified. - # returns nil if reading from the input returns nil - def tee(length, dst) - unless parser.body_eof? - if parser.filter_body(dst, socket.readpartial(length, buf)).nil? - tmp.write(dst) - tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug - return dst - end - end - finalize_input - rescue => e - client_error(e) +private + + def client_error(e) + case e + when EOFError + # in case client only did a premature shutdown(SHUT_WR) + # we do support clients that shutdown(SHUT_WR) after the + # _entire_ request has been sent, and those will not have + # raised EOFError on us. + socket.close if socket + raise ClientShutdown, "bytes_read=#{tmp.size}", [] + when HttpParserError + e.set_backtrace([]) end + raise e + end - def finalize_input - while parser.trailers(req, buf).nil? - # Don't worry about raising ClientShutdown here on EOFError, tee() - # will catch EOFError when app is processing it, otherwise in - # initialize we never get any chance to enter the app so the - # EOFError will just get trapped by Unicorn and not the Rack app - buf << socket.readpartial(@@io_chunk_size) + # tees off a +length+ chunk of data from the input into the IO + # backing store as well as returning it. +dst+ must be specified. + # returns nil if reading from the input returns nil + def tee(length, dst) + unless parser.body_eof? + if parser.filter_body(dst, socket.readpartial(length, buf)).nil? + tmp.write(dst) + tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug + return dst end - self.socket = nil end + finalize_input + rescue => e + client_error(e) + end - # tee()s into +dst+ until it is of +length+ bytes (or until - # we've reached the Content-Length of the request body). - # Returns +dst+ (the exact object, not a duplicate) - # To continue supporting applications that need near-real-time - # streaming input bodies, this is a no-op for - # "Transfer-Encoding: chunked" requests. - def ensure_length(dst, length) - # len is nil for chunked bodies, so we can't ensure length for those - # since they could be streaming bidirectionally and we don't want to - # block the caller in that case. - return dst if dst.nil? || len.nil? - - while dst.size < length && tee(length - dst.size, buf2) - dst << buf2 - end + def finalize_input + while parser.trailers(req, buf).nil? + # Don't worry about raising ClientShutdown here on EOFError, tee() + # will catch EOFError when app is processing it, otherwise in + # initialize we never get any chance to enter the app so the + # EOFError will just get trapped by Unicorn and not the Rack app + buf << socket.readpartial(@@io_chunk_size) + end + self.socket = nil + end - dst + # tee()s into +dst+ until it is of +length+ bytes (or until + # we've reached the Content-Length of the request body). + # Returns +dst+ (the exact object, not a duplicate) + # To continue supporting applications that need near-real-time + # streaming input bodies, this is a no-op for + # "Transfer-Encoding: chunked" requests. + def ensure_length(dst, length) + # len is nil for chunked bodies, so we can't ensure length for those + # since they could be streaming bidirectionally and we don't want to + # block the caller in that case. + return dst if dst.nil? || len.nil? + + while dst.size < length && tee(length - dst.size, buf2) + dst << buf2 end + dst end + end -- cgit v1.2.3-24-ge0c7 From 2b07395f33f321d14c0a252abc37d9e2966f7627 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 24 Jun 2010 04:31:37 +0000 Subject: http: avoid (re-)declaring the Unicorn module It makes for messy documentation. (cherry picked from commit b8b979d75519be1c84818f32b83d85f8ec5f6072) --- ext/unicorn_http/unicorn_http.rl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ext/unicorn_http/unicorn_http.rl b/ext/unicorn_http/unicorn_http.rl index f6c632f..1ad2a5d 100644 --- a/ext/unicorn_http/unicorn_http.rl +++ b/ext/unicorn_http/unicorn_http.rl @@ -684,7 +684,7 @@ void Init_unicorn_http(void) { VALUE mUnicorn, cHttpParser; - mUnicorn = rb_define_module("Unicorn"); + mUnicorn = rb_const_get(rb_cObject, rb_intern("Unicorn")); cHttpParser = rb_define_class_under(mUnicorn, "HttpParser", rb_cObject); eHttpParserError = rb_define_class_under(mUnicorn, "HttpParserError", rb_eIOError); -- cgit v1.2.3-24-ge0c7 From 85d55f6450f3546d3211be247919a2dae03a1110 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Mon, 28 Jun 2010 04:45:16 +0000 Subject: http_response: this should be a module, not a class This affects Rainbows!, but Rainbows! is still using the Unicorn 1.x branch. While we're at it, avoid redeclaring the "Unicorn" module, it makes documentation noisier. (cherry picked from commit 5769f313793ca84100f089b1911f2e22d0a31e9d) --- lib/unicorn/http_response.rb | 115 +++++++++++++++++++++---------------------- 1 file changed, 55 insertions(+), 60 deletions(-) diff --git a/lib/unicorn/http_response.rb b/lib/unicorn/http_response.rb index 96e484b..6f1cd48 100644 --- a/lib/unicorn/http_response.rb +++ b/lib/unicorn/http_response.rb @@ -1,75 +1,70 @@ # -*- encoding: binary -*- - require 'time' -module Unicorn - # Writes a Rack response to your client using the HTTP/1.1 specification. - # You use it by simply doing: - # - # status, headers, body = rack_app.call(env) - # HttpResponse.write(socket, [ status, headers, body ]) - # - # Most header correctness (including Content-Length and Content-Type) - # is the job of Rack, with the exception of the "Connection: close" - # and "Date" headers. - # - # A design decision was made to force the client to not pipeline or - # keepalive requests. HTTP/1.1 pipelining really kills the - # performance due to how it has to be handled and how unclear the - # standard is. To fix this the HttpResponse always gives a - # "Connection: close" header which forces the client to close right - # away. The bonus for this is that it gives a pretty nice speed boost - # to most clients since they can close their connection immediately. - - class HttpResponse +# Writes a Rack response to your client using the HTTP/1.1 specification. +# You use it by simply doing: +# +# status, headers, body = rack_app.call(env) +# HttpResponse.write(socket, [ status, headers, body ]) +# +# Most header correctness (including Content-Length and Content-Type) +# is the job of Rack, with the exception of the "Connection: close" +# and "Date" headers. +# +# A design decision was made to force the client to not pipeline or +# keepalive requests. HTTP/1.1 pipelining really kills the +# performance due to how it has to be handled and how unclear the +# standard is. To fix this the HttpResponse always gives a +# "Connection: close" header which forces the client to close right +# away. The bonus for this is that it gives a pretty nice speed boost +# to most clients since they can close their connection immediately. +module Unicorn::HttpResponse - # Every standard HTTP code mapped to the appropriate message. - CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)| - hash[code] = "#{code} #{msg}" - hash - } + # Every standard HTTP code mapped to the appropriate message. + CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)| + hash[code] = "#{code} #{msg}" + hash + } - # Rack does not set/require a Date: header. We always override the - # Connection: and Date: headers no matter what (if anything) our - # Rack application sent us. - SKIP = { 'connection' => true, 'date' => true, 'status' => true } + # Rack does not set/require a Date: header. We always override the + # Connection: and Date: headers no matter what (if anything) our + # Rack application sent us. + SKIP = { 'connection' => true, 'date' => true, 'status' => true } - # writes the rack_response to socket as an HTTP response - def self.write(socket, rack_response, have_header = true) - status, headers, body = rack_response + # writes the rack_response to socket as an HTTP response + def self.write(socket, rack_response, have_header = true) + status, headers, body = rack_response - if have_header - status = CODES[status.to_i] || status - out = [] + if have_header + status = CODES[status.to_i] || status + out = [] - # Don't bother enforcing duplicate supression, it's a Hash most of - # the time anyways so just hope our app knows what it's doing - headers.each do |key, value| - next if SKIP.include?(key.downcase) - if value =~ /\n/ - # avoiding blank, key-only cookies with /\n+/ - out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" }) - else - out << "#{key}: #{value}\r\n" - end + # Don't bother enforcing duplicate supression, it's a Hash most of + # the time anyways so just hope our app knows what it's doing + headers.each do |key, value| + next if SKIP.include?(key.downcase) + if value =~ /\n/ + # avoiding blank, key-only cookies with /\n+/ + out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" }) + else + out << "#{key}: #{value}\r\n" end - - # Rack should enforce Content-Length or chunked transfer encoding, - # so don't worry or care about them. - # Date is required by HTTP/1.1 as long as our clock can be trusted. - # Some broken clients require a "Status" header so we accomodate them - socket.write("HTTP/1.1 #{status}\r\n" \ - "Date: #{Time.now.httpdate}\r\n" \ - "Status: #{status}\r\n" \ - "Connection: close\r\n" \ - "#{out.join('')}\r\n") end - body.each { |chunk| socket.write(chunk) } - socket.close # flushes and uncorks the socket immediately - ensure - body.respond_to?(:close) and body.close + # Rack should enforce Content-Length or chunked transfer encoding, + # so don't worry or care about them. + # Date is required by HTTP/1.1 as long as our clock can be trusted. + # Some broken clients require a "Status" header so we accomodate them + socket.write("HTTP/1.1 #{status}\r\n" \ + "Date: #{Time.now.httpdate}\r\n" \ + "Status: #{status}\r\n" \ + "Connection: close\r\n" \ + "#{out.join('')}\r\n") end + body.each { |chunk| socket.write(chunk) } + socket.close # flushes and uncorks the socket immediately + ensure + body.respond_to?(:close) and body.close end end -- cgit v1.2.3-24-ge0c7 From a88bed858dfa20b5131b631739b340da9dceae99 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Sat, 3 Jul 2010 09:30:57 +0000 Subject: socket_helper: tunables for tcp_defer_accept/accept_filter Under Linux, this allows users to tune the time (in seconds) to defer connections before allowing them to be accepted. The behavior of TCP_DEFER_ACCEPT changed with Linux 2.6.32 and idle connections may still be accept()-ed after the specified value in seconds. A small value of '1' remains the default for Unicorn as Unicorn does not worry about slow clients. Higher values provide better DoS protection for Rainbows! but also increases kernel memory usage. Allowing "dataready" for FreeBSD accept filters will allow SSL sockets to be used in the future for HTTPS, too. (cherry picked from commit 646cc762cc9297510102fc094f3af8a5a9e296c7) --- lib/unicorn/socket_helper.rb | 36 +++++++++++++++++++++++++++++------- test/unit/test_socket_helper.rb | 24 ++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 7 deletions(-) diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb index 9a4266d..8677f70 100644 --- a/lib/unicorn/socket_helper.rb +++ b/lib/unicorn/socket_helper.rb @@ -12,6 +12,14 @@ module Unicorn # from /usr/include/linux/tcp.h TCP_DEFER_ACCEPT = 9 unless defined?(TCP_DEFER_ACCEPT) + # The semantics for TCP_DEFER_ACCEPT changed in Linux 2.6.32+ + # with commit d1b99ba41d6c5aa1ed2fc634323449dd656899e9 + # This change shouldn't affect Unicorn users behind nginx (a + # value of 1 remains an optimization), but Rainbows! users may + # want to use a higher value on Linux 2.6.32+ to protect against + # denial-of-service attacks + TCP_DEFER_ACCEPT_DEFAULT = 1 + # do not send out partial frames (Linux) TCP_CORK = 3 unless defined?(TCP_CORK) when /freebsd(([1-4]\..{1,2})|5\.[0-4])/ @@ -25,10 +33,16 @@ module Unicorn # The struct made by pack() is defined in /usr/include/sys/socket.h # as accept_filter_arg unless `/sbin/sysctl -nq net.inet.accf.http`.empty? - # set set the "httpready" accept filter in FreeBSD if available - # if other protocols are to be supported, this may be - # String#replace-d with "dataready" arguments instead - FILTER_ARG = ['httpready', nil].pack('a16a240') + # struct accept_filter_arg { + # char af_name[16]; + # char af_arg[240]; + # }; + # + # +af_name+ is either "httpready" or "dataready", + # though other filters may be supported by FreeBSD + def accf_arg(af_name) + [ af_name, nil ].pack('a16a240') + end end end @@ -49,10 +63,18 @@ module Unicorn end # No good reason to ever have deferred accepts off + # (except maybe benchmarking) if defined?(TCP_DEFER_ACCEPT) - sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, 1) - elsif defined?(SO_ACCEPTFILTER) && defined?(FILTER_ARG) - sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, FILTER_ARG) + # this differs from nginx, since nginx doesn't allow us to + # configure the the timeout... + tmp = { :tcp_defer_accept => true }.update(opt) + seconds = tmp[:tcp_defer_accept] + seconds = TCP_DEFER_ACCEPT_DEFAULT if seconds == true + seconds and sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds) + elsif defined?(SO_ACCEPTFILTER) && respond_to?(:accf_arg) + tmp = { :accept_filter => 'httpready' }.update(opt) + name = tmp[:accept_filter] and + sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name)) end end diff --git a/test/unit/test_socket_helper.rb b/test/unit/test_socket_helper.rb index 36b2dc2..bbce359 100644 --- a/test/unit/test_socket_helper.rb +++ b/test/unit/test_socket_helper.rb @@ -146,4 +146,28 @@ class TestSocketHelper < Test::Unit::TestCase sock_name(@unix_server) end + def test_tcp_defer_accept_default + port = unused_port @test_addr + name = "#@test_addr:#{port}" + sock = bind_listen(name) + cur = sock.getsockopt(Socket::SOL_TCP, TCP_DEFER_ACCEPT).unpack('i')[0] + assert cur >= 1 + end if defined?(TCP_DEFER_ACCEPT) + + def test_tcp_defer_accept_disable + port = unused_port @test_addr + name = "#@test_addr:#{port}" + sock = bind_listen(name, :tcp_defer_accept => false) + cur = sock.getsockopt(Socket::SOL_TCP, TCP_DEFER_ACCEPT).unpack('i')[0] + assert_equal 0, cur + end if defined?(TCP_DEFER_ACCEPT) + + def test_tcp_defer_accept_nr + port = unused_port @test_addr + name = "#@test_addr:#{port}" + sock = bind_listen(name, :tcp_defer_accept => 60) + cur = sock.getsockopt(Socket::SOL_TCP, TCP_DEFER_ACCEPT).unpack('i')[0] + assert cur > 1 + end if defined?(TCP_DEFER_ACCEPT) + end -- cgit v1.2.3-24-ge0c7 From 143f466ba0511f3e5a8feaec5d8efd6712829b2c Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Mon, 5 Jul 2010 23:14:40 +0000 Subject: doc: recommend absolute paths for -c/--config-file Suggested-by: Jeremy Evans ref: http://mid.gmane.org/AANLkTintT4vHGEdueuG45_RwJqFCToHi5pm2-WKDSUMz@mail.gmail.com (cherry picked from commit d7695c25c5e3b1c90e63bf15a5c5fdf68bfd0c34) --- Documentation/unicorn.1.txt | 3 +++ Documentation/unicorn_rails.1.txt | 7 +++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/Documentation/unicorn.1.txt b/Documentation/unicorn.1.txt index 24df7ab..c20a570 100644 --- a/Documentation/unicorn.1.txt +++ b/Documentation/unicorn.1.txt @@ -36,6 +36,9 @@ with rackup(1) but strongly discouraged. implemented as a Ruby DSL, so Ruby code may executed. See the RDoc/ri for the *Unicorn::Configurator* class for the full list of directives available from the DSL. + Using an absolute path for for CONFIG_FILE is recommended as it + makes multiple instances of Unicorn easily distinguishable when + viewing ps(1) output. -D, \--daemonize : Run daemonized in the background. The process is detached from diff --git a/Documentation/unicorn_rails.1.txt b/Documentation/unicorn_rails.1.txt index 267e425..f426b07 100644 --- a/Documentation/unicorn_rails.1.txt +++ b/Documentation/unicorn_rails.1.txt @@ -34,8 +34,11 @@ as much as possible. -c, \--config-file CONFIG_FILE : Path to the Unicorn-specific config file. The config file is implemented as a Ruby DSL, so Ruby code may executed. - See the RDoc/ri for the *Unicorn::Configurator* class for the - full list of directives available from the DSL. + See the RDoc/ri for the *Unicorn::Configurator* class for the full + list of directives available from the DSL. + Using an absolute path for for CONFIG_FILE is recommended as it + makes multiple instances of Unicorn easily distinguishable when + viewing ps(1) output. -D, \--daemonize : Run daemonized in the background. The process is detached from -- cgit v1.2.3-24-ge0c7 From 3673d07250f35e88c9b57ec429a75e5a68f7ca7b Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 6 Jul 2010 12:35:45 -0700 Subject: socket_helper: move defaults to the DEFAULTS constant This is to allow Rainbows! to override the defaults. (cherry picked from commit ef8f888ba1bacc759156f7336d39ba9b947e3f9d) --- lib/unicorn/socket_helper.rb | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb index 8677f70..0369c92 100644 --- a/lib/unicorn/socket_helper.rb +++ b/lib/unicorn/socket_helper.rb @@ -6,19 +6,28 @@ module Unicorn module SocketHelper include Socket::Constants - # configure platform-specific options (only tested on Linux 2.6 so far) - case RUBY_PLATFORM - when /linux/ - # from /usr/include/linux/tcp.h - TCP_DEFER_ACCEPT = 9 unless defined?(TCP_DEFER_ACCEPT) - + # :stopdoc: + # internal interface, only used by Rainbows!/Zbatery + DEFAULTS = { # The semantics for TCP_DEFER_ACCEPT changed in Linux 2.6.32+ # with commit d1b99ba41d6c5aa1ed2fc634323449dd656899e9 # This change shouldn't affect Unicorn users behind nginx (a # value of 1 remains an optimization), but Rainbows! users may # want to use a higher value on Linux 2.6.32+ to protect against # denial-of-service attacks - TCP_DEFER_ACCEPT_DEFAULT = 1 + :tcp_defer_accept => 1, + + # FreeBSD, we need to override this to 'dataready' when we + # eventually get HTTPS support + :accept_filter => 'httpready', + } + #:startdoc: + + # configure platform-specific options (only tested on Linux 2.6 so far) + case RUBY_PLATFORM + when /linux/ + # from /usr/include/linux/tcp.h + TCP_DEFER_ACCEPT = 9 unless defined?(TCP_DEFER_ACCEPT) # do not send out partial frames (Linux) TCP_CORK = 3 unless defined?(TCP_CORK) @@ -67,12 +76,13 @@ module Unicorn if defined?(TCP_DEFER_ACCEPT) # this differs from nginx, since nginx doesn't allow us to # configure the the timeout... - tmp = { :tcp_defer_accept => true }.update(opt) + tmp = DEFAULTS.merge(opt) seconds = tmp[:tcp_defer_accept] - seconds = TCP_DEFER_ACCEPT_DEFAULT if seconds == true - seconds and sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds) + seconds = DEFAULTS[:tcp_defer_accept] if seconds == true + seconds = 0 unless seconds # nil/false means disable this + sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds) elsif defined?(SO_ACCEPTFILTER) && respond_to?(:accf_arg) - tmp = { :accept_filter => 'httpready' }.update(opt) + tmp = DEFAULTS.merge(opt) name = tmp[:accept_filter] and sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name)) end -- cgit v1.2.3-24-ge0c7 From a96e19063d5154b87676fa435ca174f545465ec2 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 6 Jul 2010 12:39:36 -0700 Subject: configurator: documentation for new accept options The defaults should be reasonable, but there may be folks who want to experiment. (cherry picked from commit 686281a90a9b47bac4dfd32a72a97e6e8d26afa1) --- lib/unicorn/configurator.rb | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb index 533e0ed..3cf0d72 100644 --- a/lib/unicorn/configurator.rb +++ b/lib/unicorn/configurator.rb @@ -273,6 +273,41 @@ module Unicorn # This has no effect on TCP listeners. # # Default: 0 (world read/writable) + # + # +:tcp_defer_accept:+ defer accept() until data is ready (Linux-only) + # + # For Linux 2.6.32 and later, this is the number of retransmits to + # defer an accept() for if no data arrives, but the client will + # eventually be accepted after the specified number of retransmits + # regardless of whether data is ready. + # + # For Linux before 2.6.32, this is a boolean option, and + # accepts are _always_ deferred indefinitely if no data arrives. + # This is similar to :accept_filter => "dataready" + # under FreeBSD. + # + # Specifying +true+ is synonymous for the default value(s) below, + # and +false+ or +nil+ is synonymous for a value of zero. + # + # A value of +1+ is a good optimization for local networks + # and trusted clients. For Rainbows! and Zbatery users, a higher + # value (e.g. +60+) provides more protection against some + # denial-of-service attacks. There is no good reason to ever + # disable this with a +zero+ value when serving HTTP. + # + # Default: 1 retransmit for \Unicorn, 60 for Rainbows! 0.95.0\+ + # + # +:accept_filter: defer accept() until data is ready (FreeBSD-only) + # + # This enables either the "dataready" or (default) "httpready" + # accept() filter under FreeBSD. This is intended as an + # optimization to reduce context switches with common GET/HEAD + # requests. For Rainbows! and Zbatery users, this provides + # some protection against certain denial-of-service attacks, too. + # + # There is no good reason to change from the default. + # + # Default: "httpready" def listen(address, opt = {}) address = expand_addr(address) if String === address -- cgit v1.2.3-24-ge0c7 From 38b3c521be7715cdbdc406266e351765b2907ac1 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 6 Jul 2010 12:49:48 -0700 Subject: configurator: cleanup RDoc, un-indent No point in redeclaring the Unicorn module in here. (cherry picked from commit e4d2c7c302e96ee504d82376885ac6b1897c666a) --- lib/unicorn/configurator.rb | 938 ++++++++++++++++++++++---------------------- 1 file changed, 466 insertions(+), 472 deletions(-) diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb index 3cf0d72..6be6fbd 100644 --- a/lib/unicorn/configurator.rb +++ b/lib/unicorn/configurator.rb @@ -1,518 +1,512 @@ # -*- encoding: binary -*- - -require 'socket' require 'logger' -module Unicorn - - # Implements a simple DSL for configuring a Unicorn server. - # - # See http://unicorn.bogomips.org/examples/unicorn.conf.rb and - # http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb - # example configuration files. An example config file for use with - # nginx is also available at - # http://unicorn.bogomips.org/examples/nginx.conf - class Configurator < Struct.new(:set, :config_file, :after_reload) - # :stopdoc: - # used to stash stuff for deferred processing of cli options in - # config.ru after "working_directory" is bound. Do not rely on - # this being around later on... - RACKUP = {} - # :startdoc: - - # Default settings for Unicorn - DEFAULTS = { - :timeout => 60, - :logger => Logger.new($stderr), - :worker_processes => 1, - :after_fork => lambda { |server, worker| - server.logger.info("worker=#{worker.nr} spawned pid=#{$$}") - }, - :before_fork => lambda { |server, worker| - server.logger.info("worker=#{worker.nr} spawning...") - }, - :before_exec => lambda { |server| - server.logger.info("forked child re-executing...") - }, - :pid => nil, - :preload_app => false, - } - - def initialize(defaults = {}) #:nodoc: - self.set = Hash.new(:unset) - use_defaults = defaults.delete(:use_defaults) - self.config_file = defaults.delete(:config_file) - - # after_reload is only used by unicorn_rails, unsupported otherwise - self.after_reload = defaults.delete(:after_reload) - - set.merge!(DEFAULTS) if use_defaults - defaults.each { |key, value| self.send(key, value) } - Hash === set[:listener_opts] or - set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} } - Array === set[:listeners] or set[:listeners] = [] - reload - end +# Implements a simple DSL for configuring a \Unicorn server. +# +# See http://unicorn.bogomips.org/examples/unicorn.conf.rb and +# http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb +# example configuration files. An example config file for use with +# nginx is also available at +# http://unicorn.bogomips.org/examples/nginx.conf +class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload) + # used to stash stuff for deferred processing of cli options in + # config.ru after "working_directory" is bound. Do not rely on + # this being around later on... + RACKUP = {} # :nodoc: + + # Default settings for Unicorn + # :stopdoc: + DEFAULTS = { + :timeout => 60, + :logger => Logger.new($stderr), + :worker_processes => 1, + :after_fork => lambda { |server, worker| + server.logger.info("worker=#{worker.nr} spawned pid=#{$$}") + }, + :before_fork => lambda { |server, worker| + server.logger.info("worker=#{worker.nr} spawning...") + }, + :before_exec => lambda { |server| + server.logger.info("forked child re-executing...") + }, + :pid => nil, + :preload_app => false, + } + #:startdoc: + + def initialize(defaults = {}) #:nodoc: + self.set = Hash.new(:unset) + use_defaults = defaults.delete(:use_defaults) + self.config_file = defaults.delete(:config_file) + + # after_reload is only used by unicorn_rails, unsupported otherwise + self.after_reload = defaults.delete(:after_reload) + + set.merge!(DEFAULTS) if use_defaults + defaults.each { |key, value| self.send(key, value) } + Hash === set[:listener_opts] or + set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} } + Array === set[:listeners] or set[:listeners] = [] + reload + end - def reload #:nodoc: - instance_eval(File.read(config_file), config_file) if config_file + def reload #:nodoc: + instance_eval(File.read(config_file), config_file) if config_file - parse_rackup_file + parse_rackup_file - # unicorn_rails creates dirs here after working_directory is bound - after_reload.call if after_reload + # unicorn_rails creates dirs here after working_directory is bound + after_reload.call if after_reload - # working_directory binds immediately (easier error checking that way), - # now ensure any paths we changed are correctly set. - [ :pid, :stderr_path, :stdout_path ].each do |var| - String === (path = set[var]) or next - path = File.expand_path(path) - File.writable?(path) || File.writable?(File.dirname(path)) or \ - raise ArgumentError, "directory for #{var}=#{path} not writable" - end + # working_directory binds immediately (easier error checking that way), + # now ensure any paths we changed are correctly set. + [ :pid, :stderr_path, :stdout_path ].each do |var| + String === (path = set[var]) or next + path = File.expand_path(path) + File.writable?(path) || File.writable?(File.dirname(path)) or \ + raise ArgumentError, "directory for #{var}=#{path} not writable" end + end - def commit!(server, options = {}) #:nodoc: - skip = options[:skip] || [] - if ready_pipe = RACKUP.delete(:ready_pipe) - server.ready_pipe = ready_pipe - end - set.each do |key, value| - value == :unset and next - skip.include?(key) and next - server.__send__("#{key}=", value) - end + def commit!(server, options = {}) #:nodoc: + skip = options[:skip] || [] + if ready_pipe = RACKUP.delete(:ready_pipe) + server.ready_pipe = ready_pipe end - - def [](key) # :nodoc: - set[key] + set.each do |key, value| + value == :unset and next + skip.include?(key) and next + server.__send__("#{key}=", value) end + end - # sets object to the +new+ Logger-like object. The new logger-like - # object must respond to the following methods: - # +debug+, +info+, +warn+, +error+, +fatal+ - # The default Logger will log its output to the path specified - # by +stderr_path+. If you're running Unicorn daemonized, then - # you must specify a path to prevent error messages from going - # to /dev/null. - def logger(new) - %w(debug info warn error fatal).each do |m| - new.respond_to?(m) and next - raise ArgumentError, "logger=#{new} does not respond to method=#{m}" - end + def [](key) # :nodoc: + set[key] + end - set[:logger] = new + # sets object to the +new+ Logger-like object. The new logger-like + # object must respond to the following methods: + # +debug+, +info+, +warn+, +error+, +fatal+ + # The default Logger will log its output to the path specified + # by +stderr_path+. If you're running Unicorn daemonized, then + # you must specify a path to prevent error messages from going + # to /dev/null. + def logger(new) + %w(debug info warn error fatal).each do |m| + new.respond_to?(m) and next + raise ArgumentError, "logger=#{new} does not respond to method=#{m}" end - # sets after_fork hook to a given block. This block will be called by - # the worker after forking. The following is an example hook which adds - # a per-process listener to every worker: - # - # after_fork do |server,worker| - # # per-process listener ports for debugging/admin: - # addr = "127.0.0.1:#{9293 + worker.nr}" - # - # # the negative :tries parameter indicates we will retry forever - # # waiting on the existing process to exit with a 5 second :delay - # # Existing options for Unicorn::Configurator#listen such as - # # :backlog, :rcvbuf, :sndbuf are available here as well. - # server.listen(addr, :tries => -1, :delay => 5, :backlog => 128) - # - # # drop permissions to "www-data" in the worker - # # generally there's no reason to start Unicorn as a priviledged user - # # as it is not recommended to expose Unicorn to public clients. - # worker.user('www-data', 'www-data') if Process.euid == 0 - # end - def after_fork(*args, &block) - set_hook(:after_fork, block_given? ? block : args[0]) - end + set[:logger] = new + end - # sets before_fork got be a given Proc object. This Proc - # object will be called by the master process before forking - # each worker. - def before_fork(*args, &block) - set_hook(:before_fork, block_given? ? block : args[0]) - end + # sets after_fork hook to a given block. This block will be called by + # the worker after forking. The following is an example hook which adds + # a per-process listener to every worker: + # + # after_fork do |server,worker| + # # per-process listener ports for debugging/admin: + # addr = "127.0.0.1:#{9293 + worker.nr}" + # + # # the negative :tries parameter indicates we will retry forever + # # waiting on the existing process to exit with a 5 second :delay + # # Existing options for Unicorn::Configurator#listen such as + # # :backlog, :rcvbuf, :sndbuf are available here as well. + # server.listen(addr, :tries => -1, :delay => 5, :backlog => 128) + # + # # drop permissions to "www-data" in the worker + # # generally there's no reason to start Unicorn as a priviledged user + # # as it is not recommended to expose Unicorn to public clients. + # worker.user('www-data', 'www-data') if Process.euid == 0 + # end + def after_fork(*args, &block) + set_hook(:after_fork, block_given? ? block : args[0]) + end - # sets the before_exec hook to a given Proc object. This - # Proc object will be called by the master process right - # before exec()-ing the new unicorn binary. This is useful - # for freeing certain OS resources that you do NOT wish to - # share with the reexeced child process. - # There is no corresponding after_exec hook (for obvious reasons). - def before_exec(*args, &block) - set_hook(:before_exec, block_given? ? block : args[0], 1) - end + # sets before_fork got be a given Proc object. This Proc + # object will be called by the master process before forking + # each worker. + def before_fork(*args, &block) + set_hook(:before_fork, block_given? ? block : args[0]) + end - # sets the timeout of worker processes to +seconds+. Workers - # handling the request/app.call/response cycle taking longer than - # this time period will be forcibly killed (via SIGKILL). This - # timeout is enforced by the master process itself and not subject - # to the scheduling limitations by the worker process. Due the - # low-complexity, low-overhead implementation, timeouts of less - # than 3.0 seconds can be considered inaccurate and unsafe. - # - # For running Unicorn behind nginx, it is recommended to set - # "fail_timeout=0" for in your nginx configuration like this - # to have nginx always retry backends that may have had workers - # SIGKILL-ed due to timeouts. - # - # # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details - # # on nginx upstream configuration: - # upstream unicorn_backend { - # # for UNIX domain socket setups: - # server unix:/path/to/unicorn.sock fail_timeout=0; - # - # # for TCP setups - # server 192.168.0.7:8080 fail_timeout=0; - # server 192.168.0.8:8080 fail_timeout=0; - # server 192.168.0.9:8080 fail_timeout=0; - # } - def timeout(seconds) - Numeric === seconds or raise ArgumentError, - "not numeric: timeout=#{seconds.inspect}" - seconds >= 3 or raise ArgumentError, - "too low: timeout=#{seconds.inspect}" - set[:timeout] = seconds - end + # sets the before_exec hook to a given Proc object. This + # Proc object will be called by the master process right + # before exec()-ing the new unicorn binary. This is useful + # for freeing certain OS resources that you do NOT wish to + # share with the reexeced child process. + # There is no corresponding after_exec hook (for obvious reasons). + def before_exec(*args, &block) + set_hook(:before_exec, block_given? ? block : args[0], 1) + end - # sets the current number of worker_processes to +nr+. Each worker - # process will serve exactly one client at a time. You can - # increment or decrement this value at runtime by sending SIGTTIN - # or SIGTTOU respectively to the master process without reloading - # the rest of your Unicorn configuration. See the SIGNALS document - # for more information. - def worker_processes(nr) - Integer === nr or raise ArgumentError, - "not an integer: worker_processes=#{nr.inspect}" - nr >= 0 or raise ArgumentError, - "not non-negative: worker_processes=#{nr.inspect}" - set[:worker_processes] = nr - end + # sets the timeout of worker processes to +seconds+. Workers + # handling the request/app.call/response cycle taking longer than + # this time period will be forcibly killed (via SIGKILL). This + # timeout is enforced by the master process itself and not subject + # to the scheduling limitations by the worker process. Due the + # low-complexity, low-overhead implementation, timeouts of less + # than 3.0 seconds can be considered inaccurate and unsafe. + # + # For running Unicorn behind nginx, it is recommended to set + # "fail_timeout=0" for in your nginx configuration like this + # to have nginx always retry backends that may have had workers + # SIGKILL-ed due to timeouts. + # + # # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details + # # on nginx upstream configuration: + # upstream unicorn_backend { + # # for UNIX domain socket setups: + # server unix:/path/to/unicorn.sock fail_timeout=0; + # + # # for TCP setups + # server 192.168.0.7:8080 fail_timeout=0; + # server 192.168.0.8:8080 fail_timeout=0; + # server 192.168.0.9:8080 fail_timeout=0; + # } + def timeout(seconds) + Numeric === seconds or raise ArgumentError, + "not numeric: timeout=#{seconds.inspect}" + seconds >= 3 or raise ArgumentError, + "too low: timeout=#{seconds.inspect}" + set[:timeout] = seconds + end - # sets listeners to the given +addresses+, replacing or augmenting the - # current set. This is for the global listener pool shared by all - # worker processes. For per-worker listeners, see the after_fork example - # This is for internal API use only, do not use it in your Unicorn - # config file. Use listen instead. - def listeners(addresses) # :nodoc: - Array === addresses or addresses = Array(addresses) - addresses.map! { |addr| expand_addr(addr) } - set[:listeners] = addresses - end + # sets the current number of worker_processes to +nr+. Each worker + # process will serve exactly one client at a time. You can + # increment or decrement this value at runtime by sending SIGTTIN + # or SIGTTOU respectively to the master process without reloading + # the rest of your Unicorn configuration. See the SIGNALS document + # for more information. + def worker_processes(nr) + Integer === nr or raise ArgumentError, + "not an integer: worker_processes=#{nr.inspect}" + nr >= 0 or raise ArgumentError, + "not non-negative: worker_processes=#{nr.inspect}" + set[:worker_processes] = nr + end + + # sets listeners to the given +addresses+, replacing or augmenting the + # current set. This is for the global listener pool shared by all + # worker processes. For per-worker listeners, see the after_fork example + # This is for internal API use only, do not use it in your Unicorn + # config file. Use listen instead. + def listeners(addresses) # :nodoc: + Array === addresses or addresses = Array(addresses) + addresses.map! { |addr| expand_addr(addr) } + set[:listeners] = addresses + end - # adds an +address+ to the existing listener set. - # - # The following options may be specified (but are generally not needed): - # - # +:backlog+: this is the backlog of the listen() syscall. - # - # Some operating systems allow negative values here to specify the - # maximum allowable value. In most cases, this number is only - # recommendation and there are other OS-specific tunables and - # variables that can affect this number. See the listen(2) - # syscall documentation of your OS for the exact semantics of - # this. - # - # If you are running unicorn on multiple machines, lowering this number - # can help your load balancer detect when a machine is overloaded - # and give requests to a different machine. - # - # Default: 1024 - # - # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets - # - # These correspond to the SO_RCVBUF and SO_SNDBUF settings which - # can be set via the setsockopt(2) syscall. Some kernels - # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and - # there is no need (and it is sometimes detrimental) to specify them. - # - # See the socket API documentation of your operating system - # to determine the exact semantics of these settings and - # other operating system-specific knobs where they can be - # specified. - # - # Defaults: operating system defaults - # - # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets - # - # This has no effect on UNIX sockets. - # - # Default: operating system defaults (usually Nagle's algorithm enabled) - # - # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD - # - # This will prevent partial TCP frames from being sent out. - # Enabling +tcp_nopush+ is generally not needed or recommended as - # controlling +tcp_nodelay+ already provides sufficient latency - # reduction whereas Unicorn does not know when the best times are - # for flushing corked sockets. - # - # This has no effect on UNIX sockets. - # - # +:tries+: times to retry binding a socket if it is already in use - # - # A negative number indicates we will retry indefinitely, this is - # useful for migrations and upgrades when individual workers - # are binding to different ports. - # - # Default: 5 - # - # +:delay+: seconds to wait between successive +tries+ - # - # Default: 0.5 seconds - # - # +:umask+: sets the file mode creation mask for UNIX sockets - # - # Typically UNIX domain sockets are created with more liberal - # file permissions than the rest of the application. By default, - # we create UNIX domain sockets to be readable and writable by - # all local users to give them the same accessibility as - # locally-bound TCP listeners. - # - # This has no effect on TCP listeners. - # - # Default: 0 (world read/writable) - # - # +:tcp_defer_accept:+ defer accept() until data is ready (Linux-only) - # - # For Linux 2.6.32 and later, this is the number of retransmits to - # defer an accept() for if no data arrives, but the client will - # eventually be accepted after the specified number of retransmits - # regardless of whether data is ready. - # - # For Linux before 2.6.32, this is a boolean option, and - # accepts are _always_ deferred indefinitely if no data arrives. - # This is similar to :accept_filter => "dataready" - # under FreeBSD. - # - # Specifying +true+ is synonymous for the default value(s) below, - # and +false+ or +nil+ is synonymous for a value of zero. - # - # A value of +1+ is a good optimization for local networks - # and trusted clients. For Rainbows! and Zbatery users, a higher - # value (e.g. +60+) provides more protection against some - # denial-of-service attacks. There is no good reason to ever - # disable this with a +zero+ value when serving HTTP. - # - # Default: 1 retransmit for \Unicorn, 60 for Rainbows! 0.95.0\+ - # - # +:accept_filter: defer accept() until data is ready (FreeBSD-only) - # - # This enables either the "dataready" or (default) "httpready" - # accept() filter under FreeBSD. This is intended as an - # optimization to reduce context switches with common GET/HEAD - # requests. For Rainbows! and Zbatery users, this provides - # some protection against certain denial-of-service attacks, too. - # - # There is no good reason to change from the default. - # - # Default: "httpready" - def listen(address, opt = {}) - address = expand_addr(address) - if String === address - [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key| - value = opt[key] or next - Integer === value or - raise ArgumentError, "not an integer: #{key}=#{value.inspect}" - end - [ :tcp_nodelay, :tcp_nopush ].each do |key| - (value = opt[key]).nil? and next - TrueClass === value || FalseClass === value or - raise ArgumentError, "not boolean: #{key}=#{value.inspect}" - end - unless (value = opt[:delay]).nil? - Numeric === value or - raise ArgumentError, "not numeric: delay=#{value.inspect}" - end - set[:listener_opts][address].merge!(opt) + # adds an +address+ to the existing listener set. + # + # The following options may be specified (but are generally not needed): + # + # +:backlog+: this is the backlog of the listen() syscall. + # + # Some operating systems allow negative values here to specify the + # maximum allowable value. In most cases, this number is only + # recommendation and there are other OS-specific tunables and + # variables that can affect this number. See the listen(2) + # syscall documentation of your OS for the exact semantics of + # this. + # + # If you are running unicorn on multiple machines, lowering this number + # can help your load balancer detect when a machine is overloaded + # and give requests to a different machine. + # + # Default: 1024 + # + # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets + # + # These correspond to the SO_RCVBUF and SO_SNDBUF settings which + # can be set via the setsockopt(2) syscall. Some kernels + # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and + # there is no need (and it is sometimes detrimental) to specify them. + # + # See the socket API documentation of your operating system + # to determine the exact semantics of these settings and + # other operating system-specific knobs where they can be + # specified. + # + # Defaults: operating system defaults + # + # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets + # + # This has no effect on UNIX sockets. + # + # Default: operating system defaults (usually Nagle's algorithm enabled) + # + # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD + # + # This will prevent partial TCP frames from being sent out. + # Enabling +tcp_nopush+ is generally not needed or recommended as + # controlling +tcp_nodelay+ already provides sufficient latency + # reduction whereas Unicorn does not know when the best times are + # for flushing corked sockets. + # + # This has no effect on UNIX sockets. + # + # +:tries+: times to retry binding a socket if it is already in use + # + # A negative number indicates we will retry indefinitely, this is + # useful for migrations and upgrades when individual workers + # are binding to different ports. + # + # Default: 5 + # + # +:delay+: seconds to wait between successive +tries+ + # + # Default: 0.5 seconds + # + # +:umask+: sets the file mode creation mask for UNIX sockets + # + # Typically UNIX domain sockets are created with more liberal + # file permissions than the rest of the application. By default, + # we create UNIX domain sockets to be readable and writable by + # all local users to give them the same accessibility as + # locally-bound TCP listeners. + # + # This has no effect on TCP listeners. + # + # Default: 0 (world read/writable) + # + # +:tcp_defer_accept:+ defer accept() until data is ready (Linux-only) + # + # For Linux 2.6.32 and later, this is the number of retransmits to + # defer an accept() for if no data arrives, but the client will + # eventually be accepted after the specified number of retransmits + # regardless of whether data is ready. + # + # For Linux before 2.6.32, this is a boolean option, and + # accepts are _always_ deferred indefinitely if no data arrives. + # This is similar to :accept_filter => "dataready" + # under FreeBSD. + # + # Specifying +true+ is synonymous for the default value(s) below, + # and +false+ or +nil+ is synonymous for a value of zero. + # + # A value of +1+ is a good optimization for local networks + # and trusted clients. For Rainbows! and Zbatery users, a higher + # value (e.g. +60+) provides more protection against some + # denial-of-service attacks. There is no good reason to ever + # disable this with a +zero+ value when serving HTTP. + # + # Default: 1 retransmit for \Unicorn, 60 for Rainbows! 0.95.0\+ + # + # +:accept_filter: defer accept() until data is ready (FreeBSD-only) + # + # This enables either the "dataready" or (default) "httpready" + # accept() filter under FreeBSD. This is intended as an + # optimization to reduce context switches with common GET/HEAD + # requests. For Rainbows! and Zbatery users, this provides + # some protection against certain denial-of-service attacks, too. + # + # There is no good reason to change from the default. + # + # Default: "httpready" + def listen(address, opt = {}) + address = expand_addr(address) + if String === address + [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key| + value = opt[key] or next + Integer === value or + raise ArgumentError, "not an integer: #{key}=#{value.inspect}" + end + [ :tcp_nodelay, :tcp_nopush ].each do |key| + (value = opt[key]).nil? and next + TrueClass === value || FalseClass === value or + raise ArgumentError, "not boolean: #{key}=#{value.inspect}" + end + unless (value = opt[:delay]).nil? + Numeric === value or + raise ArgumentError, "not numeric: delay=#{value.inspect}" end + set[:listener_opts][address].merge!(opt) + end + + set[:listeners] << address + end - set[:listeners] << address + # sets the +path+ for the PID file of the unicorn master process + def pid(path); set_path(:pid, path); end + + # Enabling this preloads an application before forking worker + # processes. This allows memory savings when using a + # copy-on-write-friendly GC but can cause bad things to happen when + # resources like sockets are opened at load time by the master + # process and shared by multiple children. People enabling this are + # highly encouraged to look at the before_fork/after_fork hooks to + # properly close/reopen sockets. Files opened for logging do not + # have to be reopened as (unbuffered-in-userspace) files opened with + # the File::APPEND flag are written to atomically on UNIX. + # + # In addition to reloading the unicorn-specific config settings, + # SIGHUP will reload application code in the working + # directory/symlink when workers are gracefully restarted when + # preload_app=false (the default). As reloading the application + # sometimes requires RubyGems updates, +Gem.refresh+ is always + # called before the application is loaded (for RubyGems users). + # + # During deployments, care should _always_ be taken to ensure your + # applications are properly deployed and running. Using + # preload_app=false (the default) means you _must_ check if + # your application is responding properly after a deployment. + # Improperly deployed applications can go into a spawn loop + # if the application fails to load. While your children are + # in a spawn loop, it is is possible to fix an application + # by properly deploying all required code and dependencies. + # Using preload_app=true means any application load error will + # cause the master process to exit with an error. + + def preload_app(bool) + case bool + when TrueClass, FalseClass + set[:preload_app] = bool + else + raise ArgumentError, "preload_app=#{bool.inspect} not a boolean" end + end - # sets the +path+ for the PID file of the unicorn master process - def pid(path); set_path(:pid, path); end - - # Enabling this preloads an application before forking worker - # processes. This allows memory savings when using a - # copy-on-write-friendly GC but can cause bad things to happen when - # resources like sockets are opened at load time by the master - # process and shared by multiple children. People enabling this are - # highly encouraged to look at the before_fork/after_fork hooks to - # properly close/reopen sockets. Files opened for logging do not - # have to be reopened as (unbuffered-in-userspace) files opened with - # the File::APPEND flag are written to atomically on UNIX. - # - # In addition to reloading the unicorn-specific config settings, - # SIGHUP will reload application code in the working - # directory/symlink when workers are gracefully restarted when - # preload_app=false (the default). As reloading the application - # sometimes requires RubyGems updates, +Gem.refresh+ is always - # called before the application is loaded (for RubyGems users). - # - # During deployments, care should _always_ be taken to ensure your - # applications are properly deployed and running. Using - # preload_app=false (the default) means you _must_ check if - # your application is responding properly after a deployment. - # Improperly deployed applications can go into a spawn loop - # if the application fails to load. While your children are - # in a spawn loop, it is is possible to fix an application - # by properly deploying all required code and dependencies. - # Using preload_app=true means any application load error will - # cause the master process to exit with an error. - - def preload_app(bool) - case bool - when TrueClass, FalseClass - set[:preload_app] = bool - else - raise ArgumentError, "preload_app=#{bool.inspect} not a boolean" - end + # Allow redirecting $stderr to a given path. Unlike doing this from + # the shell, this allows the unicorn process to know the path its + # writing to and rotate the file if it is used for logging. The + # file will be opened with the File::APPEND flag and writes + # synchronized to the kernel (but not necessarily to _disk_) so + # multiple processes can safely append to it. + # + # If you are daemonizing and using the default +logger+, it is important + # to specify this as errors will otherwise be lost to /dev/null. + # Some applications/libraries may also triggering warnings that go to + # stderr, and they will end up here. + def stderr_path(path) + set_path(:stderr_path, path) + end + + # Same as stderr_path, except for $stdout. Not many Rack applications + # write to $stdout, but any that do will have their output written here. + # It is safe to point this to the same location a stderr_path. + # Like stderr_path, this defaults to /dev/null when daemonized. + def stdout_path(path) + set_path(:stdout_path, path) + end + + # sets the working directory for Unicorn. This ensures SIGUSR2 will + # start a new instance of Unicorn in this directory. This may be + # a symlink, a common scenario for Capistrano users. + def working_directory(path) + # just let chdir raise errors + path = File.expand_path(path) + if config_file && + config_file[0] != ?/ && + ! File.readable?("#{path}/#{config_file}") + raise ArgumentError, + "config_file=#{config_file} would not be accessible in" \ + " working_directory=#{path}" end + Dir.chdir(path) + Unicorn::HttpServer::START_CTX[:cwd] = ENV["PWD"] = path + end + + # Runs worker processes as the specified +user+ and +group+. + # The master process always stays running as the user who started it. + # This switch will occur after calling the after_fork hook, and only + # if the Worker#user method is not called in the after_fork hook + def user(user, group = nil) + # raises ArgumentError on invalid user/group + Etc.getpwnam(user) + Etc.getgrnam(group) if group + set[:user] = [ user, group ] + end - # Allow redirecting $stderr to a given path. Unlike doing this from - # the shell, this allows the unicorn process to know the path its - # writing to and rotate the file if it is used for logging. The - # file will be opened with the File::APPEND flag and writes - # synchronized to the kernel (but not necessarily to _disk_) so - # multiple processes can safely append to it. - # - # If you are daemonizing and using the default +logger+, it is important - # to specify this as errors will otherwise be lost to /dev/null. - # Some applications/libraries may also triggering warnings that go to - # stderr, and they will end up here. - def stderr_path(path) - set_path(:stderr_path, path) + # expands "unix:path/to/foo" to a socket relative to the current path + # expands pathnames of sockets if relative to "~" or "~username" + # expands "*:port and ":port" to "0.0.0.0:port" + def expand_addr(address) #:nodoc + return "0.0.0.0:#{address}" if Integer === address + return address unless String === address + + case address + when %r{\Aunix:(.*)\z} + File.expand_path($1) + when %r{\A~} + File.expand_path(address) + when %r{\A(?:\*:)?(\d+)\z} + "0.0.0.0:#$1" + when %r{\A(.*):(\d+)\z} + # canonicalize the name + packed = Socket.pack_sockaddr_in($2.to_i, $1) + Socket.unpack_sockaddr_in(packed).reverse!.join(':') + else + address end + end + +private - # Same as stderr_path, except for $stdout. Not many Rack applications - # write to $stdout, but any that do will have their output written here. - # It is safe to point this to the same location a stderr_path. - # Like stderr_path, this defaults to /dev/null when daemonized. - def stdout_path(path) - set_path(:stdout_path, path) + def set_path(var, path) #:nodoc: + case path + when NilClass, String + set[var] = path + else + raise ArgumentError end + end - # sets the working directory for Unicorn. This ensures SIGUSR2 will - # start a new instance of Unicorn in this directory. This may be - # a symlink, a common scenario for Capistrano users. - def working_directory(path) - # just let chdir raise errors - path = File.expand_path(path) - if config_file && - config_file[0] != ?/ && - ! File.readable?("#{path}/#{config_file}") + def set_hook(var, my_proc, req_arity = 2) #:nodoc: + case my_proc + when Proc + arity = my_proc.arity + (arity == req_arity) or \ raise ArgumentError, - "config_file=#{config_file} would not be accessible in" \ - " working_directory=#{path}" - end - Dir.chdir(path) - HttpServer::START_CTX[:cwd] = ENV["PWD"] = path + "#{var}=#{my_proc.inspect} has invalid arity: " \ + "#{arity} (need #{req_arity})" + when NilClass + my_proc = DEFAULTS[var] + else + raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}" end + set[var] = my_proc + end - # Runs worker processes as the specified +user+ and +group+. - # The master process always stays running as the user who started it. - # This switch will occur after calling the after_fork hook, and only - # if the Worker#user method is not called in the after_fork hook - def user(user, group = nil) - # raises ArgumentError on invalid user/group - Etc.getpwnam(user) - Etc.getgrnam(group) if group - set[:user] = [ user, group ] - end + # this is called _after_ working_directory is bound. This only + # parses the embedded switches in .ru files + # (for "rackup" compatibility) + def parse_rackup_file # :nodoc: + ru = RACKUP[:file] or return # we only return here in unit tests - # expands "unix:path/to/foo" to a socket relative to the current path - # expands pathnames of sockets if relative to "~" or "~username" - # expands "*:port and ":port" to "0.0.0.0:port" - def expand_addr(address) #:nodoc - return "0.0.0.0:#{address}" if Integer === address - return address unless String === address - - case address - when %r{\Aunix:(.*)\z} - File.expand_path($1) - when %r{\A~} - File.expand_path(address) - when %r{\A(?:\*:)?(\d+)\z} - "0.0.0.0:#$1" - when %r{\A(.*):(\d+)\z} - # canonicalize the name - packed = Socket.pack_sockaddr_in($2.to_i, $1) - Socket.unpack_sockaddr_in(packed).reverse!.join(':') - else - address - end + # :rails means use (old) Rails autodetect + if ru == :rails + File.readable?('config.ru') or return + ru = 'config.ru' end - private + File.readable?(ru) or + raise ArgumentError, "rackup file (#{ru}) not readable" - def set_path(var, path) #:nodoc: - case path - when NilClass, String - set[var] = path - else - raise ArgumentError - end - end + # it could be a .rb file, too, we don't parse those manually + ru =~ /\.ru\z/ or return - def set_hook(var, my_proc, req_arity = 2) #:nodoc: - case my_proc - when Proc - arity = my_proc.arity - (arity == req_arity) or \ - raise ArgumentError, - "#{var}=#{my_proc.inspect} has invalid arity: " \ - "#{arity} (need #{req_arity})" - when NilClass - my_proc = DEFAULTS[var] - else - raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}" - end - set[var] = my_proc - end + /^#\\(.*)/ =~ File.read(ru) or return + RACKUP[:optparse].parse!($1.split(/\s+/)) - # this is called _after_ working_directory is bound. This only - # parses the embedded switches in .ru files - # (for "rackup" compatibility) - def parse_rackup_file # :nodoc: - ru = RACKUP[:file] or return # we only return here in unit tests + # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery) + host, port, set_listener, options, daemonize = + eval("[ host, port, set_listener, options, daemonize ]", + TOPLEVEL_BINDING) - # :rails means use (old) Rails autodetect - if ru == :rails - File.readable?('config.ru') or return - ru = 'config.ru' - end + # XXX duplicate code from bin/unicorn{,_rails} + set[:listeners] << "#{host}:#{port}" if set_listener - File.readable?(ru) or - raise ArgumentError, "rackup file (#{ru}) not readable" - - # it could be a .rb file, too, we don't parse those manually - ru =~ /\.ru\z/ or return - - /^#\\(.*)/ =~ File.read(ru) or return - RACKUP[:optparse].parse!($1.split(/\s+/)) - - # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery) - host, port, set_listener, options, daemonize = - eval("[ host, port, set_listener, options, daemonize ]", - TOPLEVEL_BINDING) - - # XXX duplicate code from bin/unicorn{,_rails} - set[:listeners] << "#{host}:#{port}" if set_listener - - if daemonize - # unicorn_rails wants a default pid path, (not plain 'unicorn') - if after_reload - spid = set[:pid] - pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset - end - unless RACKUP[:daemonized] - Unicorn::Launcher.daemonize!(options) - RACKUP[:ready_pipe] = options.delete(:ready_pipe) - end + if daemonize + # unicorn_rails wants a default pid path, (not plain 'unicorn') + if after_reload + spid = set[:pid] + pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset + end + unless RACKUP[:daemonized] + Unicorn::Launcher.daemonize!(options) + RACKUP[:ready_pipe] = options.delete(:ready_pipe) end end - end end -- cgit v1.2.3-24-ge0c7 From 8dbcf5390091b7aedf8c1f76ca11659c13d931b4 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 6 Jul 2010 12:51:24 -0700 Subject: socket_helper: no reason to check for logger method We only use this module in HttpServer and our unit test mocks it properly. (cherry picked from commit e0ea1e1548a807d152c0ffc175915e98addfe1f2) --- lib/unicorn/socket_helper.rb | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb index 0369c92..769cdf1 100644 --- a/lib/unicorn/socket_helper.rb +++ b/lib/unicorn/socket_helper.rb @@ -101,14 +101,11 @@ module Unicorn end sock.listen(opt[:backlog] || 1024) rescue => e - if respond_to?(:logger) - logger.error "error setting socket options: #{e.inspect}" - logger.error e.backtrace.join("\n") - end + logger.error "error setting socket options: #{e.inspect}" + logger.error e.backtrace.join("\n") end def log_buffer_sizes(sock, pfx = '') - respond_to?(:logger) or return rcvbuf = sock.getsockopt(SOL_SOCKET, SO_RCVBUF).unpack('i') sndbuf = sock.getsockopt(SOL_SOCKET, SO_SNDBUF).unpack('i') logger.info "#{pfx}#{sock_name(sock)} rcvbuf=#{rcvbuf} sndbuf=#{sndbuf}" @@ -123,9 +120,7 @@ module Unicorn sock = if address[0] == ?/ if File.exist?(address) if File.socket?(address) - if self.respond_to?(:logger) - logger.info "unlinking existing socket=#{address}" - end + logger.info "unlinking existing socket=#{address}" File.unlink(address) else raise ArgumentError, -- cgit v1.2.3-24-ge0c7 From da0160d1eaf9fda309939253e51d88bb20f03ff4 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 6 Jul 2010 12:59:45 -0700 Subject: socket_helper: cleanup FreeBSD accf_* detection Instead of detecting at startup if filters may be used, just try anyways and log the error. It is better to ask for forgiveness than permission :) (cherry picked from commit 2b4b15cf513f66dc7a5aabaae4491c17895c288c) --- lib/unicorn/socket_helper.rb | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb index 769cdf1..08b3d06 100644 --- a/lib/unicorn/socket_helper.rb +++ b/lib/unicorn/socket_helper.rb @@ -31,28 +31,13 @@ module Unicorn # do not send out partial frames (Linux) TCP_CORK = 3 unless defined?(TCP_CORK) - when /freebsd(([1-4]\..{1,2})|5\.[0-4])/ - # Do nothing for httpready, just closing a bug when freebsd <= 5.4 - TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH) # :nodoc: when /freebsd/ # do not send out partial frames (FreeBSD) TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH) - # Use the HTTP accept filter if available. - # The struct made by pack() is defined in /usr/include/sys/socket.h - # as accept_filter_arg - unless `/sbin/sysctl -nq net.inet.accf.http`.empty? - # struct accept_filter_arg { - # char af_name[16]; - # char af_arg[240]; - # }; - # - # +af_name+ is either "httpready" or "dataready", - # though other filters may be supported by FreeBSD - def accf_arg(af_name) - [ af_name, nil ].pack('a16a240') - end - end + def accf_arg(af_name) + [ af_name, nil ].pack('a16a240') + end if defined?(SO_ACCEPTFILTER) end def set_tcp_sockopt(sock, opt) @@ -81,10 +66,16 @@ module Unicorn seconds = DEFAULTS[:tcp_defer_accept] if seconds == true seconds = 0 unless seconds # nil/false means disable this sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds) - elsif defined?(SO_ACCEPTFILTER) && respond_to?(:accf_arg) + elsif respond_to?(:accf_arg) tmp = DEFAULTS.merge(opt) - name = tmp[:accept_filter] and - sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name)) + if name = tmp[:accept_filter] + begin + sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name)) + rescue => e + logger.error("#{sock_name(sock)} " \ + "failed to set accept_filter=#{name} (#{e.inspect})") + end + end end end -- cgit v1.2.3-24-ge0c7 From a77c60a372273b24866346482255c4cf21240d60 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 6 Jul 2010 14:17:02 -0700 Subject: socket_helper: disable documentation (cherry picked from commit 98c51edf8b6f031a655a93b52808c9f9b78fb6fa) --- lib/unicorn/socket_helper.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb index 08b3d06..9a155e1 100644 --- a/lib/unicorn/socket_helper.rb +++ b/lib/unicorn/socket_helper.rb @@ -1,5 +1,5 @@ # -*- encoding: binary -*- - +# :enddoc: require 'socket' module Unicorn -- cgit v1.2.3-24-ge0c7 From 4fa17dfb4adef0945d73e692147a3302b8dd9b74 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 8 Jul 2010 05:14:55 +0000 Subject: tee_input: safer record separator ($/) handling Different threads may change $/ during execution, so cache it at function entry to a local variable for safety. $/ may also be of a non-binary encoding, so rely on Rack::Utils.bytesize to portably capture the correct size. Our string slicing is always safe from 1.9 encoding: both our socket and backing temporary file are opened in binary mode, so we'll always be dealing with binary strings in this class (in accordance to the Rack spec). (cherry picked from commit 1cd698f8c7938b1f19e9ba091708cb4515187939) --- lib/unicorn/tee_input.rb | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb index d1d273d..c0f916e 100644 --- a/lib/unicorn/tee_input.rb +++ b/lib/unicorn/tee_input.rb @@ -119,7 +119,7 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser, # unlike IO#gets. def gets socket or return tmp.gets - nil == $/ and return read + sep = $/ or return read orig_size = tmp.size if tmp.pos == orig_size @@ -127,8 +127,9 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser, tmp.seek(orig_size) end + sep_size = Rack::Utils.bytesize(sep) line = tmp.gets # cannot be nil here since size > pos - $/ == line[-$/.size, $/.size] and return line + sep == line[-sep_size, sep_size] and return line # unlikely, if we got here, then tmp is at EOF begin @@ -136,7 +137,7 @@ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser, tee(@@io_chunk_size, buf2) or break tmp.seek(orig_size) line << tmp.gets - $/ == line[-$/.size, $/.size] and return line + sep == line[-sep_size, sep_size] and return line # tmp is at EOF again here, retry the loop end while true -- cgit v1.2.3-24-ge0c7 From 6ea71159e2557eca51d2388f07ab552c69a44dd7 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 8 Jul 2010 05:33:49 +0000 Subject: prefer "[]" to "first"/"last" where possible "[]" is slightly faster under Ruby 1.9 (but slightly slower under 1.8). (cherry picked from commit 5ece8c1c33f10e6496dfe5ae1d0d368293278d2d) --- lib/unicorn.rb | 12 ++++++------ lib/unicorn/http_request.rb | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/unicorn.rb b/lib/unicorn.rb index a7b0646..e60dd61 100644 --- a/lib/unicorn.rb +++ b/lib/unicorn.rb @@ -482,8 +482,8 @@ module Unicorn # Wake up every second anyways to run murder_lazy_workers def master_sleep(sec) begin - IO.select([ SELF_PIPE.first ], nil, nil, sec) or return - SELF_PIPE.first.read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF) + IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return + SELF_PIPE[0].read_nonblock(Const::CHUNK_SIZE, HttpRequest::BUF) rescue Errno::EAGAIN, Errno::EINTR break end while true @@ -491,7 +491,7 @@ module Unicorn def awaken_master begin - SELF_PIPE.last.write_nonblock('.') # wakeup master process from select + SELF_PIPE[1].write_nonblock('.') # wakeup master process from select rescue Errno::EAGAIN, Errno::EINTR # pipe is full, master should wake up anyways retry @@ -635,7 +635,7 @@ module Unicorn client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) response = app.call(env = REQUEST.read(client)) - if 100 == response.first.to_i + if 100 == response[0].to_i client.write(Const::EXPECT_100_RESPONSE) env.delete(Const::HTTP_EXPECT) response = app.call(env) @@ -684,7 +684,7 @@ module Unicorn ready = LISTENERS # closing anything we IO.select on will raise EBADF - trap(:USR1) { nr = -65536; SELF_PIPE.first.close rescue nil } + trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil } trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } } [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown logger.info "worker=#{worker.nr} ready" @@ -725,7 +725,7 @@ module Unicorn begin # timeout used so we can detect parent death: ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo - ready = ret.first + ready = ret[0] rescue Errno::EINTR ready = LISTENERS rescue Errno::EBADF diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb index 65b09fa..8c369cf 100644 --- a/lib/unicorn/http_request.rb +++ b/lib/unicorn/http_request.rb @@ -53,7 +53,7 @@ module Unicorn # that client may be a proxy, gateway, or other intermediary # acting on behalf of the actual source client." REQ[Const::REMOTE_ADDR] = - TCPSocket === socket ? socket.peeraddr.last : LOCALHOST + TCPSocket === socket ? socket.peeraddr[-1] : LOCALHOST # short circuit the common case with small GET requests first if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil? -- cgit v1.2.3-24-ge0c7 From aad1fdfc17e2fe1a6308690daf74456877796f51 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 8 Jul 2010 05:45:22 +0000 Subject: tee_input: safer record separator ($/) handling Different threads may change $/ during execution, so cache it at function entry to a local variable for safety. $/ may also be of a non-binary encoding, so rely on Rack::Utils.bytesize to portably capture the correct size. Our string slicing is always safe from 1.9 encoding: both our socket and backing temporary file are opened in binary mode, so we'll always be dealing with binary strings in this class (in accordance to the Rack spec). (cherry picked from commit 1cd698f8c7938b1f19e9ba091708cb4515187939) --- lib/unicorn/tee_input.rb | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb index 8ff7258..563747c 100644 --- a/lib/unicorn/tee_input.rb +++ b/lib/unicorn/tee_input.rb @@ -109,7 +109,7 @@ module Unicorn # unlike IO#gets. def gets socket or return tmp.gets - nil == $/ and return read + sep = $/ or return read orig_size = tmp.size if tmp.pos == orig_size @@ -117,8 +117,9 @@ module Unicorn tmp.seek(orig_size) end + sep_size = Rack::Utils.bytesize(sep) line = tmp.gets # cannot be nil here since size > pos - $/ == line[-$/.size, $/.size] and return line + sep == line[-sep_size, sep_size] and return line # unlikely, if we got here, then tmp is at EOF begin @@ -126,7 +127,7 @@ module Unicorn tee(Const::CHUNK_SIZE, buf2) or break tmp.seek(orig_size) line << tmp.gets - $/ == line[-$/.size, $/.size] and return line + sep == line[-sep_size, sep_size] and return line # tmp is at EOF again here, retry the loop end while true -- cgit v1.2.3-24-ge0c7 From aa8262b71a207e396cb984a39f1cc917146bcb4b Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 8 Jul 2010 05:54:25 +0000 Subject: cleanup "stringio" require "stringio" is part of the Ruby distro and we use it in multiple places, so avoid re-requiring it. (cherry picked from commit 0fea004ab093ec4f59d919915a505a136326bd8a) --- lib/unicorn.rb | 1 + lib/unicorn/http_request.rb | 1 - lib/unicorn/tee_input.rb | 1 - test/test_helper.rb | 1 - 4 files changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/unicorn.rb b/lib/unicorn.rb index e60dd61..9e2d1c6 100644 --- a/lib/unicorn.rb +++ b/lib/unicorn.rb @@ -2,6 +2,7 @@ require 'fcntl' require 'etc' +require 'stringio' require 'rack' require 'unicorn/socket_helper' require 'unicorn/const' diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb index 8c369cf..65870ed 100644 --- a/lib/unicorn/http_request.rb +++ b/lib/unicorn/http_request.rb @@ -1,6 +1,5 @@ # -*- encoding: binary -*- -require 'stringio' require 'unicorn_http' module Unicorn diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb index c0f916e..a11420a 100644 --- a/lib/unicorn/tee_input.rb +++ b/lib/unicorn/tee_input.rb @@ -1,5 +1,4 @@ # -*- encoding: binary -*- -require 'stringio' # acts like tee(1) on an input input to provide a input-like stream # while providing rewindable semantics through a File/StringIO backing diff --git a/test/test_helper.rb b/test/test_helper.rb index f0da9c1..c4e56a2 100644 --- a/test/test_helper.rb +++ b/test/test_helper.rb @@ -32,7 +32,6 @@ require 'tempfile' require 'fileutils' require 'logger' require 'unicorn' -require 'unicorn_http' if ENV['DEBUG'] require 'ruby-debug' -- cgit v1.2.3-24-ge0c7 From 36ab744c7b79c47ca3d03fcf0f90eb3b8bba9b07 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 8 Jul 2010 07:31:15 +0000 Subject: unicorn 1.1.0 - small changes and cleanups This is a small, incremental feature release with some internal changes to better support upcoming versions of the Rainbows! and Zbatery web servers. There is no need to upgrade if you're happy with 1.0.0, but also little danger in upgrading. There is one pedantic bugfix which shouldn't affect anyone and small documentation updates as well. --- lib/unicorn/const.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb index d3ccead..f572177 100644 --- a/lib/unicorn/const.rb +++ b/lib/unicorn/const.rb @@ -8,8 +8,8 @@ module Unicorn # Symbols did not really improve things much compared to constants. module Const - # The current version of Unicorn, currently 1.0.0 - UNICORN_VERSION="1.0.0" + # The current version of Unicorn, currently 1.1.0 + UNICORN_VERSION="1.1.0" DEFAULT_HOST = "0.0.0.0" # default TCP listen host address DEFAULT_PORT = 8080 # default TCP listen port -- cgit v1.2.3-24-ge0c7 From 454a81378bf88dccb081b2cd7e7126a66a92f013 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Sun, 11 Jul 2010 02:05:01 +0000 Subject: tee_input: fix constant resolution for client EOF Noticed while hacking on a Zbatery-using application (cherry picked from commit ac15513bb81a345cd12c67702a81a585b8b0514e) --- lib/unicorn/tee_input.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb index a11420a..540cfe0 100644 --- a/lib/unicorn/tee_input.rb +++ b/lib/unicorn/tee_input.rb @@ -176,8 +176,8 @@ private # _entire_ request has been sent, and those will not have # raised EOFError on us. socket.close if socket - raise ClientShutdown, "bytes_read=#{tmp.size}", [] - when HttpParserError + raise Unicorn::ClientShutdown, "bytes_read=#{tmp.size}", [] + when Unicorn::HttpParserError e.set_backtrace([]) end raise e -- cgit v1.2.3-24-ge0c7 From a055dcd9e15ed93c9bbd0d83b2f87c96a4733cfb Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Sun, 11 Jul 2010 02:10:54 +0000 Subject: unicorn 1.1.1 - fixing cleanups gone bad :x Unicorn::TeeInput constant resolution for Unicorn::ClientError got broken simplifying code for RDoc. This affects users of Rainbows! and Zbatery. --- GIT-VERSION-GEN | 2 +- GNUmakefile | 2 +- lib/unicorn/const.rb | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index 88b943a..432f3c0 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=GIT-VERSION-FILE -DEF_VER=v1.0.0.GIT +DEF_VER=v1.1.1.GIT LF=' ' diff --git a/GNUmakefile b/GNUmakefile index b5fe9fd..3354ff1 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -169,7 +169,7 @@ NEWS: GIT-VERSION-FILE .manifest $(RAKE) -s news_rdoc > $@+ mv $@+ $@ -SINCE = 0.991.0 +SINCE = 1.0.0 ChangeLog: LOG_VERSION = \ $(shell git rev-parse -q "$(GIT_VERSION)" >/dev/null 2>&1 && \ echo $(GIT_VERSION) || git describe) diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb index f572177..52fe201 100644 --- a/lib/unicorn/const.rb +++ b/lib/unicorn/const.rb @@ -8,8 +8,8 @@ module Unicorn # Symbols did not really improve things much compared to constants. module Const - # The current version of Unicorn, currently 1.1.0 - UNICORN_VERSION="1.1.0" + # The current version of Unicorn, currently 1.1.1 + UNICORN_VERSION="1.1.1" DEFAULT_HOST = "0.0.0.0" # default TCP listen host address DEFAULT_PORT = 8080 # default TCP listen port -- cgit v1.2.3-24-ge0c7 From 430c8cc9f8b5f1db9395531df4ebab655da5d958 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 13 Jul 2010 08:53:48 +0000 Subject: launcher: do not re-daemonize when USR2 upgrading This was accidentally enabled when ready_pipe was developed. While re-daemonizing appears harmless in most cases this makes detecting backed-out upgrades from the original master process impossible. (cherry picked from commit 3f0f9d6d72cf17b34c130b86eb933bbc513b24b3) --- lib/unicorn/launcher.rb | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/unicorn/launcher.rb b/lib/unicorn/launcher.rb index 0d957cf..0d415dd 100644 --- a/lib/unicorn/launcher.rb +++ b/lib/unicorn/launcher.rb @@ -24,11 +24,7 @@ module Unicorn::Launcher # We only start a new process group if we're not being reexecuted # and inheriting file descriptors from our parent - if ENV['UNICORN_FD'] - exit if fork - Process.setsid - exit if fork - else + unless ENV['UNICORN_FD'] # grandparent - reads pipe, exits when master is ready # \_ parent - exits immediately ASAP # \_ unicorn master - writes to pipe when ready -- cgit v1.2.3-24-ge0c7 From 883e76b12d6034e5750d662c9e4f453888dbd154 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 13 Jul 2010 08:57:37 +0000 Subject: SIGHUP deals w/ dual master pid path scenario As described in our SIGNALS documentation, sending SIGHUP to the old master (to respawn SIGWINCH-ed children) while the new master (spawned from SIGUSR2) is active is useful for backing out of an upgrade before sending SIGQUIT to the new master. Unfortunately, the SIGHUP signal to the old master will cause the ".oldbin" pid file to be reset to the non-".oldbin" version and thus attempt to clobber the pid file in use by the to-be-terminated new master process. Thanks to the previous commit to prevent redaemonization in the new master, the old master can reliably detect if the new master is active while it is reloading the config file. Thanks to Lawrence Pit for discovering this bug. ref: http://mid.gmane.org/4C3BEACF.7040301@gmail.com (cherry picked from commit c13bec3449396b21795966101367838161612d61) --- lib/unicorn.rb | 5 ++ t/pid.ru | 3 ++ t/t0008-back_out_of_upgrade.sh | 110 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 118 insertions(+) create mode 100644 t/pid.ru create mode 100755 t/t0008-back_out_of_upgrade.sh diff --git a/lib/unicorn.rb b/lib/unicorn.rb index 9e2d1c6..c231a4d 100644 --- a/lib/unicorn.rb +++ b/lib/unicorn.rb @@ -313,6 +313,11 @@ module Unicorn if path if x = valid_pid?(path) return path if pid && path == pid && x == $$ + if x == reexec_pid && pid =~ /\.oldbin\z/ + logger.warn("will not set pid=#{path} while reexec-ed "\ + "child is running PID:#{x}") + return + end raise ArgumentError, "Already running on PID:#{x} " \ "(or pid=#{path} is stale)" end diff --git a/t/pid.ru b/t/pid.ru new file mode 100644 index 0000000..f5fd31f --- /dev/null +++ b/t/pid.ru @@ -0,0 +1,3 @@ +use Rack::ContentLength +use Rack::ContentType, "text/plain" +run lambda { |env| [ 200, {}, [ "#$$\n" ] ] } diff --git a/t/t0008-back_out_of_upgrade.sh b/t/t0008-back_out_of_upgrade.sh new file mode 100755 index 0000000..96d4057 --- /dev/null +++ b/t/t0008-back_out_of_upgrade.sh @@ -0,0 +1,110 @@ +#!/bin/sh +. ./test-lib.sh +t_plan 13 "backout of USR2 upgrade" + +worker_wait_start () { + test xSTART = x"$(cat $fifo)" + unicorn_pid=$(cat $pid) +} + +t_begin "setup and start" && { + unicorn_setup + rm -f $pid.oldbin + +cat >> $unicorn_config </dev/null + do + i=$(( $i + 1 )) + test $i -lt 600 || die "timed out" + sleep 1 + done +} + +t_begin "capture pid of new worker" && { + new_worker_pid=$(curl -sSf http://$listen/) +} + +t_begin "reload old master process" && { + kill -HUP $orig_master_pid + worker_wait_start +} + +t_begin "gracefully kill new master and ensure it dies" && { + kill -QUIT $new_master_pid + i=0 + while kill -0 $new_worker_pid 2>/dev/null + do + i=$(( $i + 1 )) + test $i -lt 600 || die "timed out" + sleep 1 + done +} + +t_begin "ensure $pid.oldbin does not exist" && { + i=0 + while test -s $pid.oldbin + do + i=$(( $i + 1 )) + test $i -lt 600 || die "timed out" + sleep 1 + done + while ! test -s $pid + do + i=$(( $i + 1 )) + test $i -lt 600 || die "timed out" + sleep 1 + done +} + +t_begin "ensure $pid is correct" && { + cur_master_pid=$(cat $pid) + test $orig_master_pid -eq $cur_master_pid +} + +t_begin "killing succeeds" && { + kill $orig_master_pid +} + +dbgcat r_err + +t_done -- cgit v1.2.3-24-ge0c7 From ae1f5e2d331d1714dd1b71d4905b296abf7780d0 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 13 Jul 2010 08:53:48 +0000 Subject: launcher: do not re-daemonize when USR2 upgrading This was accidentally enabled when ready_pipe was developed. While re-daemonizing appears harmless in most cases this makes detecting backed-out upgrades from the original master process impossible. (cherry picked from commit 3f0f9d6d72cf17b34c130b86eb933bbc513b24b3) --- lib/unicorn/launcher.rb | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/unicorn/launcher.rb b/lib/unicorn/launcher.rb index 0d957cf..0d415dd 100644 --- a/lib/unicorn/launcher.rb +++ b/lib/unicorn/launcher.rb @@ -24,11 +24,7 @@ module Unicorn::Launcher # We only start a new process group if we're not being reexecuted # and inheriting file descriptors from our parent - if ENV['UNICORN_FD'] - exit if fork - Process.setsid - exit if fork - else + unless ENV['UNICORN_FD'] # grandparent - reads pipe, exits when master is ready # \_ parent - exits immediately ASAP # \_ unicorn master - writes to pipe when ready -- cgit v1.2.3-24-ge0c7 From 2a8c4bea2c39d0a551feb79cb471171cf96a55db Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 13 Jul 2010 08:57:37 +0000 Subject: SIGHUP deals w/ dual master pid path scenario As described in our SIGNALS documentation, sending SIGHUP to the old master (to respawn SIGWINCH-ed children) while the new master (spawned from SIGUSR2) is active is useful for backing out of an upgrade before sending SIGQUIT to the new master. Unfortunately, the SIGHUP signal to the old master will cause the ".oldbin" pid file to be reset to the non-".oldbin" version and thus attempt to clobber the pid file in use by the to-be-terminated new master process. Thanks to the previous commit to prevent redaemonization in the new master, the old master can reliably detect if the new master is active while it is reloading the config file. Thanks to Lawrence Pit for discovering this bug. ref: http://mid.gmane.org/4C3BEACF.7040301@gmail.com (cherry picked from commit c13bec3449396b21795966101367838161612d61) --- lib/unicorn.rb | 5 ++ t/pid.ru | 3 ++ t/t0008-back_out_of_upgrade.sh | 110 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 118 insertions(+) create mode 100644 t/pid.ru create mode 100755 t/t0008-back_out_of_upgrade.sh diff --git a/lib/unicorn.rb b/lib/unicorn.rb index a7b0646..cbb5520 100644 --- a/lib/unicorn.rb +++ b/lib/unicorn.rb @@ -312,6 +312,11 @@ module Unicorn if path if x = valid_pid?(path) return path if pid && path == pid && x == $$ + if x == reexec_pid && pid =~ /\.oldbin\z/ + logger.warn("will not set pid=#{path} while reexec-ed "\ + "child is running PID:#{x}") + return + end raise ArgumentError, "Already running on PID:#{x} " \ "(or pid=#{path} is stale)" end diff --git a/t/pid.ru b/t/pid.ru new file mode 100644 index 0000000..f5fd31f --- /dev/null +++ b/t/pid.ru @@ -0,0 +1,3 @@ +use Rack::ContentLength +use Rack::ContentType, "text/plain" +run lambda { |env| [ 200, {}, [ "#$$\n" ] ] } diff --git a/t/t0008-back_out_of_upgrade.sh b/t/t0008-back_out_of_upgrade.sh new file mode 100755 index 0000000..96d4057 --- /dev/null +++ b/t/t0008-back_out_of_upgrade.sh @@ -0,0 +1,110 @@ +#!/bin/sh +. ./test-lib.sh +t_plan 13 "backout of USR2 upgrade" + +worker_wait_start () { + test xSTART = x"$(cat $fifo)" + unicorn_pid=$(cat $pid) +} + +t_begin "setup and start" && { + unicorn_setup + rm -f $pid.oldbin + +cat >> $unicorn_config </dev/null + do + i=$(( $i + 1 )) + test $i -lt 600 || die "timed out" + sleep 1 + done +} + +t_begin "capture pid of new worker" && { + new_worker_pid=$(curl -sSf http://$listen/) +} + +t_begin "reload old master process" && { + kill -HUP $orig_master_pid + worker_wait_start +} + +t_begin "gracefully kill new master and ensure it dies" && { + kill -QUIT $new_master_pid + i=0 + while kill -0 $new_worker_pid 2>/dev/null + do + i=$(( $i + 1 )) + test $i -lt 600 || die "timed out" + sleep 1 + done +} + +t_begin "ensure $pid.oldbin does not exist" && { + i=0 + while test -s $pid.oldbin + do + i=$(( $i + 1 )) + test $i -lt 600 || die "timed out" + sleep 1 + done + while ! test -s $pid + do + i=$(( $i + 1 )) + test $i -lt 600 || die "timed out" + sleep 1 + done +} + +t_begin "ensure $pid is correct" && { + cur_master_pid=$(cat $pid) + test $orig_master_pid -eq $cur_master_pid +} + +t_begin "killing succeeds" && { + kill $orig_master_pid +} + +dbgcat r_err + +t_done -- cgit v1.2.3-24-ge0c7 From a965c0bb48d5b92373f939865212641d810c97d7 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 13 Jul 2010 12:54:26 -0700 Subject: unicorn 1.0.1 - bugfixes only The first maintenance release of 1.0.x, this release is primarily to fix a long-standing bug where the original PID file is not restored when rolling back from a USR2 upgrade. Presumably most upgrades aren't rolled back, so it took over a year to notice this issue. Thanks to Lawrence Pit for discovering and reporting this issue. There is also a pedantic TeeInput bugfix which shouldn't affect real apps from the 1.1.x series and a test case fix for OSX, too. --- GIT-VERSION-GEN | 2 +- lib/unicorn/const.rb | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index 88b943a..7d0c7ed 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=GIT-VERSION-FILE -DEF_VER=v1.0.0.GIT +DEF_VER=v1.0.1.GIT LF=' ' diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb index d3ccead..51a8a3b 100644 --- a/lib/unicorn/const.rb +++ b/lib/unicorn/const.rb @@ -8,8 +8,8 @@ module Unicorn # Symbols did not really improve things much compared to constants. module Const - # The current version of Unicorn, currently 1.0.0 - UNICORN_VERSION="1.0.0" + # The current version of Unicorn, currently 1.0.1 + UNICORN_VERSION="1.0.1" DEFAULT_HOST = "0.0.0.0" # default TCP listen host address DEFAULT_PORT = 8080 # default TCP listen port -- cgit v1.2.3-24-ge0c7 From d1818d26b0ca491dac861f3c2d9e249b665269a4 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Tue, 13 Jul 2010 13:01:55 -0700 Subject: unicorn 1.1.2 - fixing upgrade rollbacks This release is fixes a long-standing bug where the original PID file is not restored when rolling back from a USR2 upgrade. Presumably most upgrades aren't rolled back, so it took over a year to notice this issue. Thanks to Lawrence Pit for discovering and reporting this issue. --- GIT-VERSION-GEN | 2 +- lib/unicorn/const.rb | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN index 432f3c0..d9eeb42 100755 --- a/GIT-VERSION-GEN +++ b/GIT-VERSION-GEN @@ -1,7 +1,7 @@ #!/bin/sh GVF=GIT-VERSION-FILE -DEF_VER=v1.1.1.GIT +DEF_VER=v1.1.2.GIT LF=' ' diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb index 52fe201..f36884c 100644 --- a/lib/unicorn/const.rb +++ b/lib/unicorn/const.rb @@ -8,8 +8,8 @@ module Unicorn # Symbols did not really improve things much compared to constants. module Const - # The current version of Unicorn, currently 1.1.1 - UNICORN_VERSION="1.1.1" + # The current version of Unicorn, currently 1.1.2 + UNICORN_VERSION="1.1.2" DEFAULT_HOST = "0.0.0.0" # default TCP listen host address DEFAULT_PORT = 8080 # default TCP listen port -- cgit v1.2.3-24-ge0c7