about summary refs log tree commit homepage
diff options
context:
space:
mode:
-rw-r--r--.CHANGELOG.old (renamed from CHANGELOG)5
-rw-r--r--.document14
-rw-r--r--.gitignore9
-rw-r--r--.mailmap26
-rw-r--r--CONTRIBUTORS5
-rw-r--r--COPYING339
-rw-r--r--DESIGN22
-rw-r--r--Documentation/.gitignore5
-rw-r--r--Documentation/GNUmakefile30
-rw-r--r--Documentation/unicorn.1.txt171
-rw-r--r--Documentation/unicorn_rails.1.txt167
-rw-r--r--FAQ45
-rwxr-xr-xGIT-VERSION-GEN40
-rw-r--r--GNUmakefile220
-rw-r--r--HACKING116
-rw-r--r--ISSUES36
-rw-r--r--KNOWN_ISSUES43
-rw-r--r--LICENSE80
-rw-r--r--Manifest131
-rw-r--r--PHILOSOPHY7
-rw-r--r--README71
-rw-r--r--Rakefile225
-rw-r--r--SIGNALS25
-rw-r--r--TODO19
-rw-r--r--TUNING9
-rwxr-xr-xbin/unicorn23
-rwxr-xr-xbin/unicorn_rails31
-rw-r--r--examples/echo.ru27
-rw-r--r--examples/git.ru13
-rw-r--r--examples/init.sh1
-rw-r--r--examples/nginx.conf139
-rw-r--r--examples/unicorn.conf.rb78
-rw-r--r--ext/unicorn/http11/ext_help.h12
-rw-r--r--ext/unicorn/http11/extconf.rb5
-rw-r--r--ext/unicorn/http11/http11.c450
-rw-r--r--ext/unicorn/http11/http11_parser.h1289
-rw-r--r--ext/unicorn/http11/http11_parser.rl158
-rw-r--r--ext/unicorn_http/c_util.h107
-rw-r--r--ext/unicorn_http/common_field_optimization.h111
-rw-r--r--ext/unicorn_http/ext_help.h77
-rw-r--r--ext/unicorn_http/extconf.rb14
-rw-r--r--ext/unicorn_http/global_variables.h91
-rw-r--r--ext/unicorn_http/unicorn_http.rl716
-rw-r--r--ext/unicorn_http/unicorn_http_common.rl (renamed from ext/unicorn/http11/http11_parser_common.rl)33
-rw-r--r--lib/unicorn.rb452
-rw-r--r--lib/unicorn/app/exec_cgi.rb63
-rw-r--r--lib/unicorn/app/inetd.rb109
-rw-r--r--lib/unicorn/app/old_rails.rb6
-rw-r--r--lib/unicorn/app/old_rails/static.rb40
-rw-r--r--lib/unicorn/cgi_wrapper.rb18
-rw-r--r--lib/unicorn/configurator.rb237
-rw-r--r--lib/unicorn/const.rb18
-rw-r--r--lib/unicorn/http_request.rb124
-rw-r--r--lib/unicorn/http_response.rb53
-rw-r--r--lib/unicorn/launcher.rb48
-rw-r--r--lib/unicorn/socket_helper.rb36
-rw-r--r--lib/unicorn/tee_input.rb217
-rw-r--r--lib/unicorn/util.rb53
-rw-r--r--local.mk.sample50
-rw-r--r--setup.rb1
-rwxr-xr-xtest/aggregate.rb2
-rw-r--r--test/benchmark/README5
-rw-r--r--test/benchmark/big_request.rb44
-rw-r--r--test/benchmark/request.rb56
-rw-r--r--test/benchmark/response.rb30
-rw-r--r--test/exec/test_exec.rb364
-rw-r--r--test/rails/app-1.2.3/app/controllers/application.rb2
-rw-r--r--test/rails/app-1.2.3/app/controllers/foo_controller.rb2
-rw-r--r--test/rails/app-1.2.3/app/helpers/application_helper.rb2
-rw-r--r--test/rails/app-1.2.3/config/boot.rb2
-rw-r--r--test/rails/app-1.2.3/config/environment.rb2
-rw-r--r--test/rails/app-1.2.3/config/environments/development.rb2
-rw-r--r--test/rails/app-1.2.3/config/environments/production.rb2
-rw-r--r--test/rails/app-1.2.3/config/routes.rb2
-rw-r--r--test/rails/app-2.0.2/app/controllers/application.rb2
-rw-r--r--test/rails/app-2.0.2/app/controllers/foo_controller.rb2
-rw-r--r--test/rails/app-2.0.2/app/helpers/application_helper.rb2
-rw-r--r--test/rails/app-2.0.2/config/boot.rb2
-rw-r--r--test/rails/app-2.0.2/config/environment.rb2
-rw-r--r--test/rails/app-2.0.2/config/environments/development.rb2
-rw-r--r--test/rails/app-2.0.2/config/environments/production.rb2
-rw-r--r--test/rails/app-2.0.2/config/routes.rb2
-rw-r--r--test/rails/app-2.1.2/app/controllers/application.rb2
-rw-r--r--test/rails/app-2.1.2/app/controllers/foo_controller.rb2
-rw-r--r--test/rails/app-2.1.2/app/helpers/application_helper.rb2
-rw-r--r--test/rails/app-2.1.2/config/boot.rb2
-rw-r--r--test/rails/app-2.1.2/config/environment.rb2
-rw-r--r--test/rails/app-2.1.2/config/environments/development.rb2
-rw-r--r--test/rails/app-2.1.2/config/environments/production.rb2
-rw-r--r--test/rails/app-2.1.2/config/routes.rb2
-rw-r--r--test/rails/app-2.2.2/app/controllers/application.rb2
-rw-r--r--test/rails/app-2.2.2/app/controllers/foo_controller.rb2
-rw-r--r--test/rails/app-2.2.2/app/helpers/application_helper.rb2
-rw-r--r--test/rails/app-2.2.2/config/boot.rb2
-rw-r--r--test/rails/app-2.2.2/config/environment.rb2
-rw-r--r--test/rails/app-2.2.2/config/environments/development.rb2
-rw-r--r--test/rails/app-2.2.2/config/environments/production.rb2
-rw-r--r--test/rails/app-2.2.2/config/routes.rb2
-rw-r--r--test/rails/app-2.3.5/.gitignore (renamed from test/rails/app-2.3.2.1/.gitignore)0
-rw-r--r--test/rails/app-2.3.5/Rakefile (renamed from test/rails/app-2.3.2.1/Rakefile)0
-rw-r--r--test/rails/app-2.3.5/app/controllers/application_controller.rb (renamed from test/rails/app-2.3.2.1/app/controllers/application_controller.rb)2
-rw-r--r--test/rails/app-2.3.5/app/controllers/foo_controller.rb (renamed from test/rails/app-2.3.2.1/app/controllers/foo_controller.rb)2
-rw-r--r--test/rails/app-2.3.5/app/helpers/application_helper.rb (renamed from test/rails/app-2.3.2.1/app/helpers/application_helper.rb)2
-rw-r--r--test/rails/app-2.3.5/config/boot.rb (renamed from test/rails/app-2.3.2.1/config/boot.rb)2
-rw-r--r--test/rails/app-2.3.5/config/database.yml (renamed from test/rails/app-2.3.2.1/config/database.yml)0
-rw-r--r--test/rails/app-2.3.5/config/environment.rb (renamed from test/rails/app-2.3.2.1/config/environment.rb)2
-rw-r--r--test/rails/app-2.3.5/config/environments/development.rb (renamed from test/rails/app-2.3.2.1/config/environments/development.rb)2
-rw-r--r--test/rails/app-2.3.5/config/environments/production.rb (renamed from test/rails/app-2.3.2.1/config/environments/production.rb)2
-rw-r--r--test/rails/app-2.3.5/config/routes.rb (renamed from test/rails/app-2.3.2.1/config/routes.rb)2
-rw-r--r--test/rails/app-2.3.5/db/.gitignore (renamed from test/rails/app-2.3.2.1/db/.gitignore)0
-rw-r--r--test/rails/app-2.3.5/log/.gitignore (renamed from test/rails/app-2.3.2.1/log/.gitignore)0
-rw-r--r--test/rails/app-2.3.5/public/404.html (renamed from test/rails/app-2.3.2.1/public/404.html)0
-rw-r--r--test/rails/app-2.3.5/public/500.html (renamed from test/rails/app-2.3.2.1/public/500.html)0
-rw-r--r--test/rails/app-2.3.5/public/x.txt1
-rw-r--r--test/rails/test_rails.rb55
-rw-r--r--test/test_helper.rb36
-rw-r--r--test/unit/test_configurator.rb65
-rw-r--r--test/unit/test_http_parser.rb304
-rw-r--r--test/unit/test_http_parser_ng.rb420
-rw-r--r--test/unit/test_request.rb43
-rw-r--r--test/unit/test_response.rb13
-rw-r--r--test/unit/test_server.rb132
-rw-r--r--test/unit/test_signals.rb31
-rw-r--r--test/unit/test_socket_helper.rb16
-rw-r--r--test/unit/test_tee_input.rb229
-rw-r--r--test/unit/test_upload.rb236
-rw-r--r--test/unit/test_util.rb7
-rw-r--r--unicorn.gemspec53
128 files changed, 5895 insertions, 3087 deletions
diff --git a/CHANGELOG b/.CHANGELOG.old
index 54b02ff..8326f53 100644
--- a/CHANGELOG
+++ b/.CHANGELOG.old
@@ -1,3 +1,8 @@
+v0.91.0 - HTTP/0.9 support, multiline header support, small fixes
+v0.90.0 - switch chunking+trailer handling to Ragel, v0.8.4 fixes
+v0.9.2 - Ruby 1.9.2 preview1 compatibility
+v0.9.1 - FD_CLOEXEC portability fix (v0.8.2 port)
+v0.9.0 - bodies: "Transfer-Encoding: chunked", rewindable streaming
 v0.8.4 - pass through unknown HTTP status codes
 v0.8.3 - Ruby 1.9.2 preview1 compatibility
 v0.8.2 - socket handling bugfixes and usability tweaks
diff --git a/.document b/.document
index e8ef088..2156af2 100644
--- a/.document
+++ b/.document
@@ -1,14 +1,18 @@
+FAQ
 README
 TUNING
 PHILOSOPHY
+HACKING
 DESIGN
 CONTRIBUTORS
 LICENSE
 SIGNALS
+KNOWN_ISSUES
 TODO
-bin/unicorn
-bin/unicorn_rails
+NEWS
+ChangeLog
 lib
-ext/**/*.c
-ext/**/*.rl
-ext/**/*.h
+ext/unicorn_http/unicorn_http.c
+unicorn.1
+unicorn_rails.1
+ISSUES
diff --git a/.gitignore b/.gitignore
index 8588a5c..ee48676 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,13 +1,20 @@
 *.bundle
 *.log
 *.so
+*.rbc
 .DS_Store
 /.config
 /InstalledFiles
 /doc
 /local.mk
 /test/install-*
-ext/unicorn/http11/Makefile
+ext/unicorn_http/Makefile
+ext/unicorn_http/unicorn_http.c
 log/
 pkg/
 /vendor
+/NEWS
+/ChangeLog
+/.manifest
+/GIT-VERSION-FILE
+/man
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 0000000..7b8974f
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,26 @@
+# This list is used by "git shortlog" to fixup the ugly faux email addresses
+# "<username@UGLY-UUID>" that the "git svn" tool creates by default.
+
+# Eric Wong started this .mailmap file (and is the maintainer of it...)
+Eric Wong <normalperson@yhbt.net> normalperson <normalperson@19e92222-5c0b-0410-8929-a290d50e31e9>
+
+# This also includes all the Mongrel contributors that committed to the
+# Rubyforge SVN repo.  Some real names were looked up on rubyforge.org
+# (http://rubyforge.org/users/$user), but we're not going expose any email
+# addresses here without their permission.
+
+Austin Godber godber <godber> godber <godber@19e92222-5c0b-0410-8929-a290d50e31e9>
+Bradley Taylor <bktaylor> bktaylor <bktaylor@19e92222-5c0b-0410-8929-a290d50e31e9>
+Ezra Zygmuntowicz <ezmobius> ezmobius <ezmobius@19e92222-5c0b-0410-8929-a290d50e31e9>
+Filipe Lautert <filipe> filipe <filipe@19e92222-5c0b-0410-8929-a290d50e31e9>
+Luis Lavena <luislavena> luislavena <luislavena@19e92222-5c0b-0410-8929-a290d50e31e9>
+Matt Pelletier <bricolage> bricolage <bricolage@19e92222-5c0b-0410-8929-a290d50e31e9>
+MenTaLguY <mental> mental <mental@19e92222-5c0b-0410-8929-a290d50e31e9>
+Nick Sieger <nicksieger> nicksieger <nicksieger@19e92222-5c0b-0410-8929-a290d50e31e9>
+Rick Olson <technoweenie> technoweenie <technoweenie@19e92222-5c0b-0410-8929-a290d50e31e9>
+Wayne E. Seguin <wayneeseguin> wayneeseguin <wayneeseguin@19e92222-5c0b-0410-8929-a290d50e31e9>
+Zed A. Shaw <zedshaw> <zedshaw@19e92222-5c0b-0410-8929-a290d50e31e9>
+why the lucky stiff <whytheluckystiff> <why@19e92222-5c0b-0410-8929-a290d50e31e9>
+
+# Evan had his email address in the git history we branched from anyways
+Evan Weaver <eweaver@twitter.com> evanweaver <evanweaver@19e92222-5c0b-0410-8929-a290d50e31e9>
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index 5a6fa4d..97b891c 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -1,5 +1,8 @@
 Unicorn developers:
 * Eric Wong
+* Suraj N. Kurapati
+* Andrey Stikheev
+* Wayne Larsen
 * ... (help wanted)
 
 We would like to thank following folks for helping make Unicorn possible:
@@ -14,7 +17,7 @@ The original Mongrel contributors:
 
 * Luis Lavena
 * Wilson Bilkovich
-* Why the Lucky Stiff
+* why the lucky stiff
 * Dan Kubb
 * MenTaLguY
 * Filipe Lautert
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..d511905
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/DESIGN b/DESIGN
index 3543d9e..1d195c7 100644
--- a/DESIGN
+++ b/DESIGN
@@ -7,12 +7,12 @@
   all clients down, just one.  Only UNIX-like systems supporting
   fork() and file descriptor inheritance are supported.
 
-* The Ragel->C HTTP parser is taken from Mongrel.  This is the
+* The Ragel+C HTTP parser is taken from Mongrel.  This is the
   only non-Ruby part and there are no plans to add any more
   non-Ruby components.
 
-* All HTTP protocol parsing and I/O is done just like Mongrel:
-    1. read/parse HTTP request in full
+* All HTTP protocol parsing and I/O is done much like Mongrel:
+    1. read/parse HTTP request headers in full
     2. call Rack application
     3. write HTTP response back to the client
 
@@ -55,6 +55,22 @@
   applications that are running all the time since worker processes
   will only select()/accept() outside of the application dispatch.
 
+* Additionally, thundering herds are much smaller than with
+  configurations using existing prefork servers.  Process counts should
+  only be scaled to backend resources, _never_ to the number of expected
+  clients like is typical with blocking prefork servers.  So while we've
+  seen instances of popular prefork servers configured to run many
+  hundreds of worker processes, Unicorn deployments are typically only
+  2-4 processes per-core.
+
+* On-demand scaling of worker processes never happens automatically.
+  Again, Unicorn is concerned about scaling to backend limits and should
+  never configured in a fashion where it could be waiting on slow
+  clients.  For extremely rare circumstances, we provide TTIN and TTOU
+  signal handlers to increment/decrement your process counts without
+  reloading.  Think of it as driving a car with manual transmission:
+  you have a lot more control if you know what you're doing.
+
 * Blocking I/O is used for clients.  This allows a simpler code path
   to be followed within the Ruby interpreter and fewer syscalls.
   Applications that use threads continue to work if Unicorn
diff --git a/Documentation/.gitignore b/Documentation/.gitignore
new file mode 100644
index 0000000..46679d6
--- /dev/null
+++ b/Documentation/.gitignore
@@ -0,0 +1,5 @@
+*.1
+*.5
+*.7
+*.gz
+*.html
diff --git a/Documentation/GNUmakefile b/Documentation/GNUmakefile
new file mode 100644
index 0000000..fcda3c4
--- /dev/null
+++ b/Documentation/GNUmakefile
@@ -0,0 +1,30 @@
+all::
+
+PANDOC = pandoc
+PANDOC_OPTS = -f markdown --email-obfuscation=none --sanitize-html
+pandoc = $(PANDOC) $(PANDOC_OPTS)
+pandoc_html = $(pandoc) --toc -t html --no-wrap
+
+man1 := $(addsuffix .1,unicorn unicorn_rails)
+html1 := $(addsuffix .html,$(man1))
+
+all:: html man
+
+html: $(html1)
+man: $(man1)
+
+install-html: html
+        mkdir -p ../doc/man1
+        install -m 644 $(html1) ../doc/man1
+
+install-man: man
+        mkdir -p ../man/man1
+        install -m 644 $(man1) ../man/man1
+
+%.1: %.1.txt
+        $(pandoc) -s -t man < $< > $@+ && mv $@+ $@
+%.1.html: %.1.txt
+        $(pandoc_html) < $< > $@+ && mv $@+ $@
+
+clean::
+        $(RM) $(man1) $(html1)
diff --git a/Documentation/unicorn.1.txt b/Documentation/unicorn.1.txt
new file mode 100644
index 0000000..e05a916
--- /dev/null
+++ b/Documentation/unicorn.1.txt
@@ -0,0 +1,171 @@
+% UNICORN(1) Unicorn User Manual
+% The Unicorn Community <mongrel-unicorn@rubyforge.org>
+% September 15, 2009
+
+# NAME
+
+unicorn - a rackup-like command to launch the Unicorn HTTP server
+
+# SYNOPSIS
+
+unicorn [-c CONFIG_FILE] [-E RACK_ENV] [-D] [RACKUP_FILE]
+
+# DESCRIPTION
+
+A rackup(1)-like command to launch Rack applications using Unicorn.
+It is expected to be started in your application root (APP_ROOT),
+but the "working_directory" directive may be used in the CONFIG_FILE.
+
+While Unicorn takes a myriad of command-line options for
+compatibility with ruby(1) and rackup(1), it is recommended to stick
+to the few command-line options specified in the SYNOPSIS and use
+the CONFIG_FILE as much as possible.
+
+# RACKUP FILE
+
+This defaults to \"config.ru\" in APP_ROOT.  It should be the same
+file used by rackup(1) and other Rack launchers, it uses the
+*Rack::Builder* DSL.
+
+Embedded command-line options are mostly parsed for compatibility
+with rackup(1) but strongly discouraged.
+
+# UNICORN OPTIONS
+-c, \--config-file CONFIG_FILE
+:   Path to the Unicorn-specific config file.  The config file is
+    implemented as a Ruby DSL, so Ruby code may executed.
+    See the RDoc/ri for the *Unicorn::Configurator* class for the full
+    list of directives available from the DSL.
+
+-D, \--daemonize
+:   Run daemonized in the background.  The process is detached from
+    the controlling terminal and stdin is redirected to "/dev/null".
+    Unlike many common UNIX daemons, we do not chdir to \"/\"
+    upon daemonization to allow more control over the startup/upgrade
+    process.
+    Unless specified in the CONFIG_FILE, stderr and stdout will
+    also be redirected to "/dev/null".
+
+-E, \--env RACK_ENV
+:   Run under the given RACK_ENV.  See the RACK ENVIRONMENT section
+    for more details.
+
+-l, \--listen ADDRESS
+:   Listens on a given ADDRESS.  ADDRESS may be in the form of
+    HOST:PORT or PATH, HOST:PORT is taken to mean a TCP socket
+    and PATH is meant to be a path to a UNIX domain socket.
+    Defaults to "0.0.0.0:8080" (all addresses on TCP port 8080)
+    For production deployments, specifying the "listen" directive in
+    CONFIG_FILE is recommended as it allows fine-tuning of socket
+    options.
+
+# RACKUP COMPATIBILITY OPTIONS
+-o, \--host HOST
+:   Listen on a TCP socket belonging to HOST, default is
+    "0.0.0.0" (all addresses).
+    If specified multiple times on the command-line, only the
+    last-specified value takes effect.
+    This option only exists for compatibility with the rackup(1) command,
+    use of "-l"/"\--listen" switch is recommended instead.
+
+-p, \--port PORT
+:   Listen on the specified TCP PORT, default is 8080.
+    If specified multiple times on the command-line, only the last-specified
+    value takes effect.
+    This option only exists for compatibility with the rackup(1) command,
+    use of "-l"/"\--listen" switch is recommended instead.
+
+-s, \--server SERVER
+:   No-op, this exists only for compatibility with rackup(1).
+
+# RUBY OPTIONS
+-e, \--eval LINE
+:   Evaluate a LINE of Ruby code.  This evaluation happens
+    immediately as the command-line is being parsed.
+
+-d, \--debug
+:   Turn on debug mode, the $DEBUG variable is set to true.
+
+-w, \--warn
+:   Turn on verbose warnings, the $VERBOSE variable is set to true.
+
+-I, \--include PATH
+:   specify $LOAD_PATH.  PATH will be prepended to $LOAD_PATH.
+    The \':\' character may be used to delimit multiple directories.
+    This directive may be used more than once.  Modifications to
+    $LOAD_PATH take place immediately and in the order they were
+    specified on the command-line.
+
+-r, \--require LIBRARY
+:   require a specified LIBRARY before executing the application.  The
+    \"require\" statement will be executed immediately and in the order
+    they were specified on the command-line.
+
+# SIGNALS
+
+The following UNIX signals may be sent to the master process:
+
+* HUP - reload config file, app, and gracefully restart all workers
+* INT/TERM - quick shutdown, kills all workers immediately
+* QUIT - graceful shutdown, waits for workers to finish their
+  current request before finishing.
+* USR1 - reopen all logs owned by the master and all workers
+  See Unicorn::Util.reopen_logs for what is considered a log.
+* USR2 - reexecute the running binary.  A separate QUIT
+  should be sent to the original process once the child is verified to
+  be up and running.
+* WINCH - gracefully stops workers but keep the master running.
+  This will only work for daemonized processes.
+* TTIN - increment the number of worker processes by one
+* TTOU - decrement the number of worker processes by one
+
+See the [SIGNALS][4] document for full description of all signals
+used by Unicorn.
+
+#  RACK ENVIRONMENT
+
+Accepted values of RACK_ENV and the middleware they automatically load
+(outside of RACKUP_FILE) are exactly as those in rackup(1):
+
+* development - loads Rack::CommonLogger, Rack::ShowExceptions, and
+                Rack::Lint middleware
+* deployment  - loads Rack::CommonLogger middleware
+* none        - loads no middleware at all, relying
+                entirely on RACKUP_FILE
+
+All unrecognized values for RACK_ENV are assumed to be
+"none".  Production deployments are strongly encouraged to use
+"deployment" or "none" for maximum performance.
+
+As of Unicorn 0.94.0, RACK_ENV is exported as a process-wide environment
+variable as well.  While not current a part of the Rack specification as
+of Rack 1.0.1, this has become a de facto standard in the Rack world.
+
+Note that the Rack::ContentLength and Rack::Chunked middlewares
+are never loaded by default.  If needed, they should be
+individually specified in the RACKUP_FILE, some frameworks do
+not require them.
+
+# ENVIRONMENT VARIABLES
+
+The RACK_ENV variable is set by the aforementioned \-E switch.
+All application or library-specific environment variables (e.g. TMPDIR)
+may always be set in the Unicorn CONFIG_FILE in addition to the spawning
+shell.  When transparently upgrading Unicorn, all environment variables
+set in the old master process are inherited by the new master process.
+Unicorn only uses (and will overwrite) the UNICORN_FD environment
+variable internally when doing transparent upgrades.
+
+# SEE ALSO
+
+* unicorn_rails(1)
+* *Rack::Builder* ri/RDoc
+* *Unicorn::Configurator* ri/RDoc
+* [Unicorn RDoc][1]
+* [Rack RDoc][2]
+* [Rackup HowTo][3]
+
+[1]: http://unicorn.bogomips.org/
+[2]: http://rack.rubyforge.org/doc/
+[3]: http://wiki.github.com/rack/rack/tutorial-rackup-howto
+[4]: http://unicorn.bogomips.org/SIGNALS.html
diff --git a/Documentation/unicorn_rails.1.txt b/Documentation/unicorn_rails.1.txt
new file mode 100644
index 0000000..d2d8190
--- /dev/null
+++ b/Documentation/unicorn_rails.1.txt
@@ -0,0 +1,167 @@
+% UNICORN_RAILS(1) Unicorn User Manual
+% The Unicorn Community <mongrel-unicorn@rubyforge.org>
+% September 17, 2009
+
+# NAME
+
+unicorn_rails - a rackup-like command to launch the Unicorn HTTP server
+
+# SYNOPSIS
+
+unicorn_rails [-c CONFIG_FILE] [-E RAILS_ENV] [-D] [RACKUP_FILE]
+
+# DESCRIPTION
+
+A rackup(1)-like command to launch Rails applications using Unicorn.  It
+is expected to be started in your Rails application root (RAILS_ROOT),
+but the "working_directory" directive may be used in the CONFIG_FILE.
+
+The outward interface resembles rackup(1), the internals and default
+middleware loading is designed like the `script/server` command
+distributed with Rails.
+
+While Unicorn takes a myriad of command-line options for compatibility
+with ruby(1) and rackup(1), it is recommended to stick to the few
+command-line options specified in the SYNOPSIS and use the CONFIG_FILE
+as much as possible.
+
+# UNICORN OPTIONS
+-c, \--config-file CONFIG_FILE
+:   Path to the Unicorn-specific config file.  The config file is
+    implemented as a Ruby DSL, so Ruby code may executed.
+    See the RDoc/ri for the *Unicorn::Configurator* class for the
+    full list of directives available from the DSL.
+
+-D, \--daemonize
+:   Run daemonized in the background.  The process is detached from
+    the controlling terminal and stdin is redirected to "/dev/null".
+    Unlike many common UNIX daemons, we do not chdir to \"/\"
+    upon daemonization to allow more control over the startup/upgrade
+    process.
+    Unless specified in the CONFIG_FILE, stderr and stdout will
+    also be redirected to "/dev/null".
+    Daemonization will _skip_ loading of the *Rails::Rack::LogTailer*
+    middleware under Rails \>\= 2.3.x.
+    By default, unicorn\_rails(1) will create a PID file in
+    _\"RAILS\_ROOT/tmp/pids/unicorn.pid\"_.  You may override this
+    by specifying the "pid" directive to override this Unicorn config file.
+
+-E, \--env RAILS_ENV
+:   Run under the given RAILS_ENV.  This sets the RAILS_ENV environment
+    variable.  Acceptable values are exactly those you expect in your Rails
+    application, typically "development" or "production".
+
+-l, \--listen ADDRESS
+:   Listens on a given ADDRESS.  ADDRESS may be in the form of
+    HOST:PORT or PATH, HOST:PORT is taken to mean a TCP socket
+    and PATH is meant to be a path to a UNIX domain socket.
+    Defaults to "0.0.0.0:8080" (all addresses on TCP port 8080).
+    For production deployments, specifying the "listen" directive in
+    CONFIG_FILE is recommended as it allows fine-tuning of socket
+    options.
+
+# RACKUP COMPATIBILITY OPTIONS
+-o, \--host HOST
+:   Listen on a TCP socket belonging to HOST, default is
+    "0.0.0.0" (all addresses).
+    If specified multiple times on the command-line, only the
+    last-specified value takes effect.
+    This option only exists for compatibility with the rackup(1) command,
+    use of "-l"/"\--listen" switch is recommended instead.
+
+-p, \--port PORT
+:   Listen on the specified TCP PORT, default is 8080.
+    If specified multiple times on the command-line, only the last-specified
+    value takes effect.
+    This option only exists for compatibility with the rackup(1) command,
+    use of "-l"/"\--listen" switch is recommended instead.
+
+\--path PATH
+:   Mounts the Rails application at the given PATH (instead of "/").
+    This is equivalent to setting the RAILS_RELATIVE_URL_ROOT
+    environment variable.  This is only supported under Rails 2.3
+    or later at the moment.
+
+# RUBY OPTIONS
+-e, \--eval LINE
+:   Evaluate a LINE of Ruby code.  This evaluation happens
+    immediately as the command-line is being parsed.
+
+-d, \--debug
+:   Turn on debug mode, the $DEBUG variable is set to true.
+    For Rails \>\= 2.3.x, this loads the *Rails::Rack::Debugger*
+    middleware.
+
+-w, \--warn
+:   Turn on verbose warnings, the $VERBOSE variable is set to true.
+
+-I, \--include PATH
+:   specify $LOAD_PATH.  PATH will be prepended to $LOAD_PATH.
+    The \':\' character may be used to delimit multiple directories.
+    This directive may be used more than once.  Modifications to
+    $LOAD_PATH take place immediately and in the order they were
+    specified on the command-line.
+
+-r, \--require LIBRARY
+:   require a specified LIBRARY before executing the application.  The
+    \"require\" statement will be executed immediately and in the order
+    they were specified on the command-line.
+
+# RACKUP FILE
+
+This defaults to \"config.ru\" in RAILS_ROOT.  It should be the same
+file used by rackup(1) and other Rack launchers, it uses the
+*Rack::Builder* DSL.  Unlike many other Rack applications, RACKUP_FILE
+is completely _optional_ for Rails, but may be used to disable some
+of the default middleware for performance.
+
+Embedded command-line options are mostly parsed for compatibility
+with rackup(1) but strongly discouraged.
+
+# ENVIRONMENT VARIABLES
+
+The RAILS_ENV variable is set by the aforementioned \-E switch.  The
+RAILS_RELATIVE_URL_ROOT is set by the aforementioned \--path switch.
+Either of these variables may also be set in the shell or the Unicorn
+CONFIG_FILE.  All application or library-specific environment variables
+(e.g. TMPDIR, RAILS_ASSET_ID) may always be set in the Unicorn
+CONFIG_FILE in addition to the spawning shell.  When transparently
+upgrading Unicorn, all environment variables set in the old master
+process are inherited by the new master process.  Unicorn only uses (and
+will overwrite) the UNICORN_FD environment variable internally when
+doing transparent upgrades.
+
+# SIGNALS
+
+The following UNIX signals may be sent to the master process:
+
+* HUP - reload config file, app, and gracefully restart all workers
+* INT/TERM - quick shutdown, kills all workers immediately
+* QUIT - graceful shutdown, waits for workers to finish their
+  current request before finishing.
+* USR1 - reopen all logs owned by the master and all workers
+  See Unicorn::Util.reopen_logs for what is considered a log.
+* USR2 - reexecute the running binary.  A separate QUIT
+  should be sent to the original process once the child is verified to
+  be up and running.
+* WINCH - gracefully stops workers but keep the master running.
+  This will only work for daemonized processes.
+* TTIN - increment the number of worker processes by one
+* TTOU - decrement the number of worker processes by one
+
+See the [SIGNALS][4] document for full description of all signals
+used by Unicorn.
+
+# SEE ALSO
+
+* unicorn(1)
+* *Rack::Builder* ri/RDoc
+* *Unicorn::Configurator* ri/RDoc
+* [Unicorn RDoc][1]
+* [Rack RDoc][2]
+* [Rackup HowTo][3]
+
+[1]: http://unicorn.bogomips.org/
+[2]: http://rack.rubyforge.org/doc/
+[3]: http://wiki.github.com/rack/rack/tutorial-rackup-howto
+[4]: http://unicorn.bogomips.org/SIGNALS.html
diff --git a/FAQ b/FAQ
new file mode 100644
index 0000000..4bb6b98
--- /dev/null
+++ b/FAQ
@@ -0,0 +1,45 @@
+= Frequently Asked Questions about Unicorn
+
+=== Why are my redirects going to "http" URLs when my site uses https?
+
+If your site is entirely behind https, then Rack applications that use
+"rack.url_scheme" can set the following in the Unicorn config file:
+
+  HttpRequest::DEFAULTS["rack.url_scheme"] = "https"
+
+For frameworks that do not use "rack.url_scheme", you can also
+try setting one or both of the following:
+
+  HttpRequest::DEFAULTS["HTTPS"] = "on"
+  HttpRequest::DEFAULTS["HTTP_X_FORWARDED_PROTO"] = "https"
+
+Otherwise, you can configure your proxy (nginx) to send the
+"X-Forwarded-Proto: https" header only for parts of the site that use
+https.  For nginx, you can do it with the following line in appropriate
+"location" blocks of your nginx config file:
+
+  proxy_set_header X-Forwarded-Proto https;
+
+=== Why are log messages from Unicorn are unformatted when using Rails?
+
+Current versions of Rails unfortunately overrides the default Logger
+formatter.
+
+You can undo this behavior with the default logger in your Unicorn
+config file:
+
+  Configurator::DEFAULTS[:logger].formatter = Logger::Formatter.new
+
+Of course you can specify an entirely different logger as well
+with the "logger" directive described by Unicorn::Configurator.
+
+=== Why am I getting "connection refused"/502 errors under high load?
+
+Short answer: your application cannot keep up.
+
+You can increase the size of the :backlog parameter if your kernel
+supports a larger listen() queue, but keep in mind having a large listen
+queue makes failover to a different machine more difficult.
+
+See the TUNING and Unicorn::Configurator documents for more information
+on :backlog-related topics.
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
new file mode 100755
index 0000000..ccb2fe3
--- /dev/null
+++ b/GIT-VERSION-GEN
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+GVF=GIT-VERSION-FILE
+DEF_VER=v0.96.0.GIT
+
+LF='
+'
+
+# First see if there is a version file (included in release tarballs),
+# then try git-describe, then default.
+if test -f version
+then
+        VN=$(cat version) || VN="$DEF_VER"
+elif test -d .git -o -f .git &&
+        VN=$(git describe --abbrev=4 HEAD 2>/dev/null) &&
+        case "$VN" in
+        *$LF*) (exit 1) ;;
+        v[0-9]*)
+                git update-index -q --refresh
+                test -z "$(git diff-index --name-only HEAD --)" ||
+                VN="$VN-dirty" ;;
+        esac
+then
+        VN=$(echo "$VN" | sed -e 's/-/./g');
+else
+        VN="$DEF_VER"
+fi
+
+VN=$(expr "$VN" : v*'\(.*\)')
+
+if test -r $GVF
+then
+        VC=$(sed -e 's/^GIT_VERSION = //' <$GVF)
+else
+        VC=unset
+fi
+test "$VN" = "$VC" || {
+        echo >&2 "GIT_VERSION = $VN"
+        echo "GIT_VERSION = $VN" >$GVF
+}
diff --git a/GNUmakefile b/GNUmakefile
index 47c6b5f..bc67b99 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -1,15 +1,28 @@
-# use GNU Make to run tests in parallel, and without depending on Rubygems
+# use GNU Make to run tests in parallel, and without depending on RubyGems
 all:: test
+
 ruby = ruby
+rake = rake
 ragel = ragel
+
+GIT_URL = git://git.bogomips.org/unicorn.git
 RLFLAGS = -G2
+
+# lower-case vars are deprecated
+RUBY = $(ruby)
+RAKE = $(rake)
+RAGEL = $(ragel)
+
+GIT-VERSION-FILE: .FORCE-GIT-VERSION-FILE
+        @./GIT-VERSION-GEN
+-include GIT-VERSION-FILE
 -include local.mk
-ruby_bin := $(shell which $(ruby))
+ruby_bin := $(shell which $(RUBY))
 ifeq ($(DLEXT),) # "so" for Linux
-  DLEXT := $(shell $(ruby) -rrbconfig -e 'puts Config::CONFIG["DLEXT"]')
+  DLEXT := $(shell $(RUBY) -rrbconfig -e 'puts Config::CONFIG["DLEXT"]')
 endif
 ifeq ($(RUBY_VERSION),)
-  RUBY_VERSION := $(shell $(ruby) -e 'puts RUBY_VERSION')
+  RUBY_VERSION := $(shell $(RUBY) -e 'puts RUBY_VERSION')
 endif
 
 # dunno how to implement this as concisely in Ruby, and hell, I love awk
@@ -27,41 +40,43 @@ T_n_log := $(subst .n,$(log_suffix),$(T_n))
 T_r_log := $(subst .r,$(log_suffix),$(T_r))
 test_prefix = $(CURDIR)/test/install-$(RUBY_VERSION)
 
-ext := ext/unicorn/http11
-c_files := $(addprefix $(ext)/,ext_help.h http11.c http11_parser.h)
-rl_files := $(addprefix $(ext)/,http11_parser.rl http11_parser_common.rl)
-rb_files := $(shell grep '^\(bin\|lib\)' Manifest)
-inst_deps := $(c_files) $(rb_files)
+ext := ext/unicorn_http
+c_files := $(ext)/unicorn_http.c $(wildcard $(ext)/*.h)
+rl_files := $(wildcard $(ext)/*.rl)
+base_bins := unicorn unicorn_rails
+bins := $(addprefix bin/, $(base_bins))
+man1_bins := $(addsuffix .1, $(base_bins))
+man1_paths := $(addprefix man/man1/, $(man1_bins))
+rb_files := $(bins) $(shell find lib ext -type f -name '*.rb')
+inst_deps := $(c_files) $(rb_files) GNUmakefile test/test_helper.rb
 
-ragel: $(ext)/http11_parser.h
-$(ext)/http11_parser.h: $(rl_files)
-        cd $(@D) && $(ragel) http11_parser.rl -C $(RLFLAGS) -o $(@F)
-        $(ruby) -i -p -e '$$_.gsub!(%r{[ \t]*$$},"")' $@
+ragel: $(ext)/unicorn_http.c
+$(ext)/unicorn_http.c: $(rl_files)
+        cd $(@D) && $(RAGEL) unicorn_http.rl -C $(RLFLAGS) -o $(@F)
 $(ext)/Makefile: $(ext)/extconf.rb $(c_files)
-        cd $(@D) && $(ruby) extconf.rb
-$(ext)/http11.$(DLEXT): $(ext)/Makefile
+        cd $(@D) && $(RUBY) extconf.rb
+$(ext)/unicorn_http.$(DLEXT): $(ext)/Makefile
         $(MAKE) -C $(@D)
-lib/unicorn/http11.$(DLEXT): $(ext)/http11.$(DLEXT)
+lib/unicorn_http.$(DLEXT): $(ext)/unicorn_http.$(DLEXT)
         @mkdir -p lib
         install -m644 $< $@
-http11: lib/unicorn/http11.$(DLEXT)
+http: lib/unicorn_http.$(DLEXT)
 
 $(test_prefix)/.stamp: $(inst_deps)
         mkdir -p $(test_prefix)/.ccache
-        tar c `cat Manifest` | (cd $(test_prefix) && tar x)
+        tar cf - $(inst_deps) GIT-VERSION-GEN | \
+          (cd $(test_prefix) && tar xf -)
         $(MAKE) -C $(test_prefix) clean
-        $(MAKE) -C $(test_prefix) http11 shebang
+        $(MAKE) -C $(test_prefix) http shebang
         > $@
 
-bins := $(wildcard bin/*)
-
 # this is only intended to be run within $(test_prefix)
 shebang: $(bins)
-        $(ruby) -i -p -e '$$_.gsub!(%r{^#!.*$$},"#!$(ruby_bin)")' $^
+        $(RUBY) -i -p -e '$$_.gsub!(%r{^#!.*$$},"#!$(ruby_bin)")' $^
 
 t_log := $(T_log) $(T_n_log)
 test: $(T) $(T_n)
-        @cat $(t_log) | $(ruby) test/aggregate.rb
+        @cat $(t_log) | $(RUBY) test/aggregate.rb
         @$(RM) $(t_log)
 
 test-exec: $(wildcard test/exec/test_*.rb)
@@ -70,19 +85,27 @@ $(slow_tests): $(test_prefix)/.stamp
         @$(MAKE) $(shell $(awk_slow) $@)
 
 TEST_OPTS = -v
-TEST_OPTS = -v
+check_test = grep '0 failures, 0 errors' $(t) >/dev/null
 ifndef V
        quiet_pre = @echo '* $(arg)$(extra)';
-       quiet_post = >$(t) 2>&1
+       quiet_post = >$(t) 2>&1 && $(check_test)
 else
        # we can't rely on -o pipefail outside of bash 3+,
        # so we use a stamp file to indicate success and
        # have rm fail if the stamp didn't get created
        stamp = $@$(log_suffix).ok
-       quiet_pre = @echo $(ruby) $(arg) $(TEST_OPTS); ! test -f $(stamp) && (
-       quiet_post = && > $(stamp) )>&2 | tee $(t); rm $(stamp) 2>/dev/null
+       quiet_pre = @echo $(RUBY) $(arg) $(TEST_OPTS); ! test -f $(stamp) && (
+       quiet_post = && > $(stamp) )2>&1 | tee $(t); \
+         rm $(stamp) 2>/dev/null && $(check_test)
 endif
-run_test = $(quiet_pre) setsid $(ruby) -w $(arg) $(TEST_OPTS) $(quiet_post) || \
+
+# not all systems have setsid(8), we need it because we spam signals
+# stupidly in some tests...
+rb_setsid := $(RUBY) -e 'Process.setsid' -e 'exec *ARGV'
+
+# TRACER='strace -f -o $(t).strace -s 100000'
+run_test = $(quiet_pre) \
+  $(rb_setsid) $(TRACER) $(RUBY) -w $(arg) $(TEST_OPTS) $(quiet_post) || \
   (sed "s,^,$(extra): ," >&2 < $(t); exit 1)
 
 %.n: arg = $(subst .n,,$(subst --, -n ,$@))
@@ -99,14 +122,15 @@ $(T): export RUBYLIB := $(test_prefix):$(test_prefix)/lib:$(RUBYLIB)
 $(T): $(test_prefix)/.stamp
         $(run_test)
 
-install: $(bins)
+install: $(bins) $(ext)/unicorn_http.c
         $(prep_setup_rb)
+        $(RM) lib/unicorn_http.$(DLEXT)
         $(RM) -r .install-tmp
         mkdir .install-tmp
-        cp -p $^ .install-tmp
-        $(ruby) setup.rb all
+        cp -p bin/* .install-tmp
+        $(RUBY) setup.rb all
         $(RM) $^
-        mv $(addprefix .install-tmp/,$(^F)) bin/
+        mv .install-tmp/* bin/
         $(RM) -r .install-tmp
         $(prep_setup_rb)
 
@@ -115,18 +139,67 @@ prep_setup_rb := @-$(RM) $(setup_rb_files);$(MAKE) -C $(ext) clean
 
 clean:
         -$(MAKE) -C $(ext) clean
-        $(RM) $(ext)/Makefile lib/unicorn/http11.$(DLEXT)
+        -$(MAKE) -C Documentation clean
+        $(RM) $(ext)/Makefile lib/unicorn_http.$(DLEXT)
         $(RM) $(setup_rb_files) $(t_log)
-        $(RM) -r $(test_prefix)
+        $(RM) -r $(test_prefix) man
+
+man:
+        $(MAKE) -C Documentation install-man
+
+pkg_extra := GIT-VERSION-FILE NEWS ChangeLog $(ext)/unicorn_http.c
+manifest: $(pkg_extra) man
+        $(RM) .manifest
+        $(MAKE) .manifest
 
-Manifest:
-        git ls-files > $@+
+.manifest:
+        (git ls-files && \
+         for i in $@ $(pkg_extra) $(man1_paths); \
+         do echo $$i; done) | LC_ALL=C sort > $@+
         cmp $@+ $@ || mv $@+ $@
-        $(RM) -f $@+
+        $(RM) $@+
+
+NEWS: GIT-VERSION-FILE .manifest
+        $(RAKE) -s news_rdoc > $@+
+        mv $@+ $@
+
+SINCE = 0.95.0
+ChangeLog: LOG_VERSION = \
+  $(shell git rev-parse -q "$(GIT_VERSION)" >/dev/null 2>&1 && \
+          echo $(GIT_VERSION) || git describe)
+ChangeLog: log_range = v$(SINCE)..$(LOG_VERSION)
+ChangeLog: GIT-VERSION-FILE
+        @echo "ChangeLog from $(GIT_URL) ($(log_range))" > $@+
+        @echo >> $@+
+        git log $(log_range) | sed -e 's/^/    /' >> $@+
+        mv $@+ $@
+
+news_atom := http://unicorn.bogomips.org/NEWS.atom.xml
+cgit_atom := http://git.bogomips.org/cgit/unicorn.git/atom/?h=master
+atom = <link rel="alternate" title="Atom feed" href="$(1)" \
+             type="application/atom+xml"/>
 
-# using rdoc 2.4.1
-doc: .document
-        rdoc -Na -m README -t "$(shell sed -ne '1s/^= //p' README)"
+# using rdoc 2.4.1+
+doc: .document $(ext)/unicorn_http.c NEWS ChangeLog
+        for i in $(man1_bins); do > $$i; done
+        find bin lib -type f -name '*.rbc' -exec rm -f '{}' ';'
+        rdoc -Na -t "$(shell sed -ne '1s/^= //p' README)"
+        install -m644 COPYING doc/COPYING
+        install -m644 $(shell grep '^[A-Z]' .document)  doc/
+        $(MAKE) -C Documentation install-html install-man
+        install -m644 $(man1_paths) doc/
+        cd doc && for i in $(base_bins); do \
+          sed -e '/"documentation">/r man1/'$$i'.1.html' \
+                < $${i}_1.html > tmp && mv tmp $${i}_1.html; done
+        $(RUBY) -i -p -e \
+          '$$_.gsub!("</title>",%q{\&$(call atom,$(cgit_atom))})' \
+          doc/ChangeLog.html
+        $(RUBY) -i -p -e \
+          '$$_.gsub!("</title>",%q{\&$(call atom,$(news_atom))})' \
+          doc/NEWS.html doc/README.html
+        $(RAKE) -s news_atom > doc/NEWS.atom.xml
+        cd doc && ln README.html tmp && mv tmp index.html
+        $(RM) $(man1_bins)
 
 rails_git_url = git://github.com/rails/rails.git
 rails_git := vendor/rails.git
@@ -147,4 +220,69 @@ $(T_r).%.r: export RAILS_GIT_REPO = $(CURDIR)/$(rails_git)
 $(T_r).%.r: $(test_prefix)/.stamp $(rails_git)/info/cloned-stamp
         $(run_test)
 
-.PHONY: doc $(T) $(slow_tests) Manifest
+ifneq ($(VERSION),)
+rfproject := mongrel
+rfpackage := unicorn
+pkggem := pkg/$(rfpackage)-$(VERSION).gem
+pkgtgz := pkg/$(rfpackage)-$(VERSION).tgz
+release_notes := release_notes-$(VERSION)
+release_changes := release_changes-$(VERSION)
+
+release-notes: $(release_notes)
+release-changes: $(release_changes)
+$(release_changes):
+        $(RAKE) -s release_changes > $@+
+        $(VISUAL) $@+ && test -s $@+ && mv $@+ $@
+$(release_notes):
+        GIT_URL=$(GIT_URL) $(RAKE) -s release_notes > $@+
+        $(VISUAL) $@+ && test -s $@+ && mv $@+ $@
+
+# ensures we're actually on the tagged $(VERSION), only used for release
+verify:
+        test x"$(shell umask)" = x0022
+        git rev-parse --verify refs/tags/v$(VERSION)^{}
+        git diff-index --quiet HEAD^0
+        test `git rev-parse --verify HEAD^0` = \
+             `git rev-parse --verify refs/tags/v$(VERSION)^{}`
+
+fix-perms:
+        git ls-tree -r HEAD | awk '/^100644 / {print $$NF}' | xargs chmod 644
+        git ls-tree -r HEAD | awk '/^100755 / {print $$NF}' | xargs chmod 755
+
+gem: $(pkggem)
+
+install-gem: $(pkggem)
+        gem install $(CURDIR)/$<
+
+$(pkggem): manifest fix-perms
+        gem build $(rfpackage).gemspec
+        mkdir -p pkg
+        mv $(@F) $@
+
+$(pkgtgz): distdir = $(basename $@)
+$(pkgtgz): HEAD = v$(VERSION)
+$(pkgtgz): manifest fix-perms
+        @test -n "$(distdir)"
+        $(RM) -r $(distdir)
+        mkdir -p $(distdir)
+        tar cf - `cat .manifest` | (cd $(distdir) && tar xf -)
+        cd pkg && tar cf - $(basename $(@F)) | gzip -9 > $(@F)+
+        mv $@+ $@
+
+package: $(pkgtgz) $(pkggem)
+
+release: verify package $(release_notes) $(release_changes)
+        # make tgz release on RubyForge
+        rubyforge add_release -f -n $(release_notes) -a $(release_changes) \
+          $(rfproject) $(rfpackage) $(VERSION) $(pkgtgz)
+        # push gem to Gemcutter
+        gem push $(pkggem)
+        # in case of gem downloads from RubyForge releases page
+        -rubyforge add_file \
+          $(rfproject) $(rfpackage) $(VERSION) $(pkggem)
+else
+gem install-gem: GIT-VERSION-FILE
+        $(MAKE) $@ VERSION=$(GIT_VERSION)
+endif
+
+.PHONY: .FORCE-GIT-VERSION-FILE doc $(T) $(slow_tests) manifest man
diff --git a/HACKING b/HACKING
new file mode 100644
index 0000000..6a05d4b
--- /dev/null
+++ b/HACKING
@@ -0,0 +1,116 @@
+= Unicorn Hacker's Guide
+
+== Polyglot Infrastructure
+
+Like Mongrel, we use Ruby where it makes sense, and Ragel with C where
+it helps performance.  All of the code that actually runs your Rack
+application is written Ruby, Ragel or C.
+
+As far as tests and documentation goes, we're not afraid to embrace Unix
+and use traditional Unix tools where they make sense and get the job
+done.
+
+=== Tests
+
+Tests are good, but slow tests make development slow, so we make tests
+faster (in parallel) with GNU make (instead of Rake) and avoiding
+RubyGems.
+
+Users of GNU-based systems (such as GNU/Linux) usually have GNU make installed
+as "make" instead of "gmake".
+
+Since we don't load RubyGems by default, loading Rack properly requires
+setting up RUBYLIB to point to where Rack is located.  Not loading
+RubyGems drastically lowers the time to run the full test suite.  You
+may setup a "local.mk" file in the top-level working directory to setup
+your RUBYLIB and any other environment variables.  A "local.mk.sample"
+file is provided for reference.
+
+Running the entire test suite with 4 tests in parallel:
+
+  gmake -j4 test
+
+Running just one unit test:
+
+  gmake test/unit/test_http_parser.rb
+
+Running just one test case in a unit test:
+
+  gmake test/unit/test_http_parser.rb--test_parse_simple.n
+
+=== HttpServer
+
+We strive to write as little code as possible while still maintaining
+readability.  However, readability and flexibility may be sacrificed for
+performance in hot code paths.  For Ruby, less code generally means
+faster code.
+
+Memory allocation should be minimized as much as practically possible.
+Buffers for IO#readpartial are preallocated in the hot paths to avoid
+building up garbage.  Hash assignments use frozen strings to avoid the
+duplication behind-the-scenes.
+
+We spend as little time as possible inside signal handlers and instead
+defer handling them for predictability and robustness.  Most of the
+Unix-specific things are in the Unicorn::HttpServer class.  Unix systems
+programming experience will come in handy (or be learned) here.
+
+=== Documentation
+
+We use RDoc 2.4.x with Darkfish for documentation as much as possible,
+if you're on Ruby 1.8 you want to install the latest "rdoc" gem.  Due to
+the lack of RDoc-to-manpage converters we know about, we're writing
+manpages in Markdown and converting to troff/HTML with Pandoc.
+
+=== Ruby/C Compatibility
+
+We target Ruby 1.8.6+, 1.9 and will target Rubinius as it becomes
+production-ready.  We need the Ruby implementation to support fork,
+exec, pipe, UNIX signals, access to integer file descriptors and
+ability to use unlinked files.
+
+All of our C code is OS-independent and should run on compilers
+supported by the versions of Ruby we target.
+
+=== Ragel Compatibility
+
+We target the latest released version of Ragel and will update our code
+to keep up with new releases.  Packaged tarballs and gems include the
+generated source code so they will remain usable if compatibility is
+broken.
+
+== Contributing
+
+Contributions are welcome in the form of patches, pull requests, code
+review, testing, documentation, user support or any other feedback is
+welcome.  The mailing list is the central coordination point for all
+user and developer feedback and bug reports.
+
+=== Submitting Patches
+
+Follow conventions already established in the code and do not exceed 80
+characters per line.
+
+Inline patches (from "git format-patch -M") to the mailing list are
+preferred because they allow code review and comments in the reply to
+the patch.
+
+We will adhere to mostly the same conventions for patch submissions as
+git itself.  See the Documentation/SubmittingPatches document
+distributed with git on on patch submission guidelines to follow.  Just
+don't email the git mailing list or maintainer with Unicorn patches :)
+
+== Running Development Versions
+
+It is easy to install the contents of your git working directory:
+
+Via RubyGems (RubyGems 1.3.5+ recommended for prerelease versions):
+
+  gmake install-gem
+
+Without RubyGems (via setup.rb):
+
+  gmake install
+
+It is not at all recommended to mix a RubyGems installation with an
+installation done without RubyGems, however.
diff --git a/ISSUES b/ISSUES
new file mode 100644
index 0000000..a0d620b
--- /dev/null
+++ b/ISSUES
@@ -0,0 +1,36 @@
+= Issues
+
+The {mailing list}[mailto:mongrel-unicorn@rubyforge.org] is the best
+place to report bugs, submit patches and/or obtain support after you
+have searched the mailing list archives and
+{documentation}[http://unicorn.bogomips.org].
+
+* No subscription is needed to post to the mailing list,
+  let us know that we need to Cc: replies to you if you're unsubscribed.
+* Do not {top post}[http://catb.org/jargon/html/T/top-post.html] in replies
+* Quote only the relevant portions of the message you're replying to
+* Do not send HTML mail
+
+If your issue is of a sensitive nature or you're just shy in public,
+then feel free to email us privately at mailto:unicorn@bogomips.org
+instead and your issue will be handled discreetly.
+
+If you don't get a response within a few days, we may have forgotten
+about it so feel free to ask again.
+
+== Submitting Patches
+
+See the HACKING document (and additionally, the
+Documentation/SubmittingPatches document distributed with git) on
+guidelines for patch submission.
+
+== Mailing List Info
+
+* subscribe: http://rubyforge.org/mailman/listinfo/mongrel-unicorn
+* post: mailto:mongrel-unicorn@rubyforge.org
+* private: mailto:unicorn@bogomips.org
+
+== Mailing List Archives
+
+* nntp://news.gmane.org/gmane.comp.lang.ruby.unicorn.general
+* http://rubyforge.org/pipermail/mongrel-unicorn
diff --git a/KNOWN_ISSUES b/KNOWN_ISSUES
new file mode 100644
index 0000000..83b3584
--- /dev/null
+++ b/KNOWN_ISSUES
@@ -0,0 +1,43 @@
+= Known Issues
+
+Occasionally odd {issues}[link:ISSUES.html] arise without a transparent or
+acceptable solution.  Those issues are documented here.
+
+* Rails 2.3.2 bundles its own version of Rack.  This may cause subtle
+  bugs when simultaneously loaded with the system-wide Rack Rubygem
+  which Unicorn depends on.  Upgrading to Rails 2.3.4 (or later) is
+  strongly recommended for all Rails 2.3.x users for this (and security
+  reasons).  Rails 2.2.x series (or before) did not bundle Rack and are
+  should be unnaffected.  If there is any reason which forces your
+  application to use Rails 2.3.2 and you have no other choice, then
+  you may edit your Unicorn gemspec and remove the Rack dependency.
+
+  ref: http://mid.gmane.org/20091014221552.GA30624@dcvr.yhbt.net
+  Note: the workaround described in the article above only made
+  the issue more subtle and we didn't notice them immediately.
+
+* Installing "unicorn" as a system-wide Rubygem and using the
+  {isolate}[http://github.com/jbarnette/isolate] gem may cause issues if
+  you're using any of the bundled application-level libraries in
+  unicorn/app/* (for compatibility with CGI-based applications, Rails <=
+  2.2.2, or ExecCgi).  For now workarounds include:
+
+  * installing the same version of unicorn as a system-wide Rubygem
+    _and_ isolating unicorn as well.
+  * explicitly setting RUBYLIB or $LOAD_PATH to include any gem path
+    where the unicorn gem is installed (e.g.
+    /usr/lib/ruby/gems/1.8/gems/unicorn-VERSION/lib)
+
+* WONTFIX: code reloading and restarts with Sinatra 0.3.x (and likely older
+  versions) apps is broken.  The workaround is to force production
+  mode to disable code reloading as well as disabling "run" in your
+  Sinatra application:
+    set :env, :production
+    set :run, false
+  Since this is no longer an issue with Sinatra 0.9.x apps, this will not be
+  fixed on our end.  Since Unicorn is itself the application launcher, the
+  at_exit handler used in old Sinatra always caused Mongrel to be launched
+  whenever a Unicorn worker was about to exit.
+
+  Also remember we're capable of replacing the running binary without dropping
+  any connections regardless of framework :)
diff --git a/LICENSE b/LICENSE
index c02e2c5..d48c2d4 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,53 +1,55 @@
-Unicorn Web Server (unicorn) is copyrighted free software by Eric Wong
-(normalperson@yhbt.net) and contributors. You can redistribute it
-and/or modify it under either the terms of the GPL2 or the conditions below:
+Unicorn is copyrighted free software by all contributors, see logs in
+revision control for names and email addresses of all of them.  You can
+redistribute it and/or modify it under either the terms of the
+{GPL2}[http://www.gnu.org/licenses/gpl-2.0.txt] (see link:COPYING) or
+the conditions below:
 
-1. You may make and give away verbatim copies of the source form of the
-   software without restriction, provided that you duplicate all of the
-   original copyright notices and associated disclaimers.
+  1. You may make and give away verbatim copies of the source form of the
+     software without restriction, provided that you duplicate all of the
+     original copyright notices and associated disclaimers.
 
-2. You may modify your copy of the software in any way, provided that
-   you do at least ONE of the following:
+  2. You may modify your copy of the software in any way, provided that
+     you do at least ONE of the following:
 
-     a) place your modifications in the Public Domain or otherwise make them
-     Freely Available, such as by posting said modifications to Usenet or an
-     equivalent medium, or by allowing the author to include your
-     modifications in the software.
+       a) place your modifications in the Public Domain or otherwise make them
+       Freely Available, such as by posting said modifications to Usenet or an
+       equivalent medium, or by allowing the author to include your
+       modifications in the software.
 
-     b) use the modified software only within your corporation or
-        organization.
+       b) use the modified software only within your corporation or
+          organization.
 
-     c) rename any non-standard executables so the names do not conflict with
-     standard executables, which must also be provided.
+       c) rename any non-standard executables so the names do not conflict with
+       standard executables, which must also be provided.
 
-     d) make other distribution arrangements with the author.
+       d) make other distribution arrangements with the author.
 
-3. You may distribute the software in object code or executable
-   form, provided that you do at least ONE of the following:
+  3. You may distribute the software in object code or executable
+     form, provided that you do at least ONE of the following:
 
-     a) distribute the executables and library files of the software,
-     together with instructions (in the manual page or equivalent) on where
-     to get the original distribution.
+       a) distribute the executables and library files of the software,
+       together with instructions (in the manual page or equivalent) on where
+       to get the original distribution.
 
-     b) accompany the distribution with the machine-readable source of the
-     software.
+       b) accompany the distribution with the machine-readable source of the
+       software.
 
-     c) give non-standard executables non-standard names, with
-        instructions on where to get the original software distribution.
+       c) give non-standard executables non-standard names, with
+          instructions on where to get the original software distribution.
 
-     d) make other distribution arrangements with the author.
+       d) make other distribution arrangements with the author.
 
-4. You may modify and include the part of the software into any other
-   software (possibly commercial).  But some files in the distribution
-   are not written by the author, so that they are not under this terms.
+  4. You may modify and include the part of the software into any other
+     software (possibly commercial).  But some files in the distribution
+     are not written by the author, so that they are not under this terms.
 
-5. The scripts and library files supplied as input to or produced as
-   output from the software do not automatically fall under the
-   copyright of the software, but belong to whomever generated them,
-   and may be sold commercially, and may be aggregated with this
-   software.
+  5. The scripts and library files supplied as input to or produced as
+     output from the software do not automatically fall under the
+     copyright of the software, but belong to whomever generated them,
+     and may be sold commercially, and may be aggregated with this
+     software.
 
-6. THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR
-   IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
-   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-   PURPOSE.
+  6. THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR
+     IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+     WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+     PURPOSE.
diff --git a/Manifest b/Manifest
deleted file mode 100644
index cac021a..0000000
--- a/Manifest
+++ /dev/null
@@ -1,131 +0,0 @@
-.document
-.gitignore
-CHANGELOG
-CONTRIBUTORS
-DESIGN
-GNUmakefile
-LICENSE
-Manifest
-PHILOSOPHY
-README
-Rakefile
-SIGNALS
-TODO
-TUNING
-bin/unicorn
-bin/unicorn_rails
-examples/init.sh
-ext/unicorn/http11/ext_help.h
-ext/unicorn/http11/extconf.rb
-ext/unicorn/http11/http11.c
-ext/unicorn/http11/http11_parser.h
-ext/unicorn/http11/http11_parser.rl
-ext/unicorn/http11/http11_parser_common.rl
-lib/unicorn.rb
-lib/unicorn/app/exec_cgi.rb
-lib/unicorn/app/old_rails.rb
-lib/unicorn/app/old_rails/static.rb
-lib/unicorn/cgi_wrapper.rb
-lib/unicorn/configurator.rb
-lib/unicorn/const.rb
-lib/unicorn/http_request.rb
-lib/unicorn/http_response.rb
-lib/unicorn/launcher.rb
-lib/unicorn/socket_helper.rb
-lib/unicorn/util.rb
-local.mk.sample
-setup.rb
-test/aggregate.rb
-test/benchmark/README
-test/benchmark/big_request.rb
-test/benchmark/dd.ru
-test/benchmark/request.rb
-test/benchmark/response.rb
-test/exec/README
-test/exec/test_exec.rb
-test/rails/app-1.2.3/.gitignore
-test/rails/app-1.2.3/Rakefile
-test/rails/app-1.2.3/app/controllers/application.rb
-test/rails/app-1.2.3/app/controllers/foo_controller.rb
-test/rails/app-1.2.3/app/helpers/application_helper.rb
-test/rails/app-1.2.3/config/boot.rb
-test/rails/app-1.2.3/config/database.yml
-test/rails/app-1.2.3/config/environment.rb
-test/rails/app-1.2.3/config/environments/development.rb
-test/rails/app-1.2.3/config/environments/production.rb
-test/rails/app-1.2.3/config/routes.rb
-test/rails/app-1.2.3/db/.gitignore
-test/rails/app-1.2.3/log/.gitignore
-test/rails/app-1.2.3/public/404.html
-test/rails/app-1.2.3/public/500.html
-test/rails/app-2.0.2/.gitignore
-test/rails/app-2.0.2/Rakefile
-test/rails/app-2.0.2/app/controllers/application.rb
-test/rails/app-2.0.2/app/controllers/foo_controller.rb
-test/rails/app-2.0.2/app/helpers/application_helper.rb
-test/rails/app-2.0.2/config/boot.rb
-test/rails/app-2.0.2/config/database.yml
-test/rails/app-2.0.2/config/environment.rb
-test/rails/app-2.0.2/config/environments/development.rb
-test/rails/app-2.0.2/config/environments/production.rb
-test/rails/app-2.0.2/config/routes.rb
-test/rails/app-2.0.2/db/.gitignore
-test/rails/app-2.0.2/log/.gitignore
-test/rails/app-2.0.2/public/404.html
-test/rails/app-2.0.2/public/500.html
-test/rails/app-2.1.2/.gitignore
-test/rails/app-2.1.2/Rakefile
-test/rails/app-2.1.2/app/controllers/application.rb
-test/rails/app-2.1.2/app/controllers/foo_controller.rb
-test/rails/app-2.1.2/app/helpers/application_helper.rb
-test/rails/app-2.1.2/config/boot.rb
-test/rails/app-2.1.2/config/database.yml
-test/rails/app-2.1.2/config/environment.rb
-test/rails/app-2.1.2/config/environments/development.rb
-test/rails/app-2.1.2/config/environments/production.rb
-test/rails/app-2.1.2/config/routes.rb
-test/rails/app-2.1.2/db/.gitignore
-test/rails/app-2.1.2/log/.gitignore
-test/rails/app-2.1.2/public/404.html
-test/rails/app-2.1.2/public/500.html
-test/rails/app-2.2.2/.gitignore
-test/rails/app-2.2.2/Rakefile
-test/rails/app-2.2.2/app/controllers/application.rb
-test/rails/app-2.2.2/app/controllers/foo_controller.rb
-test/rails/app-2.2.2/app/helpers/application_helper.rb
-test/rails/app-2.2.2/config/boot.rb
-test/rails/app-2.2.2/config/database.yml
-test/rails/app-2.2.2/config/environment.rb
-test/rails/app-2.2.2/config/environments/development.rb
-test/rails/app-2.2.2/config/environments/production.rb
-test/rails/app-2.2.2/config/routes.rb
-test/rails/app-2.2.2/db/.gitignore
-test/rails/app-2.2.2/log/.gitignore
-test/rails/app-2.2.2/public/404.html
-test/rails/app-2.2.2/public/500.html
-test/rails/app-2.3.2.1/.gitignore
-test/rails/app-2.3.2.1/Rakefile
-test/rails/app-2.3.2.1/app/controllers/application_controller.rb
-test/rails/app-2.3.2.1/app/controllers/foo_controller.rb
-test/rails/app-2.3.2.1/app/helpers/application_helper.rb
-test/rails/app-2.3.2.1/config/boot.rb
-test/rails/app-2.3.2.1/config/database.yml
-test/rails/app-2.3.2.1/config/environment.rb
-test/rails/app-2.3.2.1/config/environments/development.rb
-test/rails/app-2.3.2.1/config/environments/production.rb
-test/rails/app-2.3.2.1/config/routes.rb
-test/rails/app-2.3.2.1/db/.gitignore
-test/rails/app-2.3.2.1/log/.gitignore
-test/rails/app-2.3.2.1/public/404.html
-test/rails/app-2.3.2.1/public/500.html
-test/rails/test_rails.rb
-test/test_helper.rb
-test/unit/test_configurator.rb
-test/unit/test_http_parser.rb
-test/unit/test_request.rb
-test/unit/test_response.rb
-test/unit/test_server.rb
-test/unit/test_signals.rb
-test/unit/test_socket_helper.rb
-test/unit/test_upload.rb
-test/unit/test_util.rb
diff --git a/PHILOSOPHY b/PHILOSOPHY
index ce7763a..7f30007 100644
--- a/PHILOSOPHY
+++ b/PHILOSOPHY
@@ -101,7 +101,7 @@ A reverse proxy for Unicorn should meet the following requirements:
 nginx is the only (Free) solution we know of that meets the above
 requirements.
 
-Indeed, the author of Unicorn has deployed nginx as a reverse-proxy not
+Indeed, the folks behind Unicorn have deployed nginx as a reverse-proxy not
 only for Ruby applications, but also for production applications running
 Apache/mod_perl, Apache/mod_php and Apache Tomcat.  In every single
 case, performance improved because application servers were able to use
@@ -137,3 +137,8 @@ Unicorn is highly inefficient for Comet/reverse-HTTP/push applications
 where the HTTP connection spends a large amount of time idle.
 Nevertheless, the ease of troubleshooting, debugging, and management of
 Unicorn may still outweigh the drawbacks for these applications.
+
+The {Rainbows!}[http://rainbows.rubyforge.org/] aims to fill the gap for
+odd corner cases where the nginx + Unicorn combination is not enough.
+Keep in mind that Rainbows! is still very new (as of October 2009), far
+more ambitious, and far less tested than Unicorn.
diff --git a/README b/README
index 161a3e1..11a9a3c 100644
--- a/README
+++ b/README
@@ -1,18 +1,22 @@
-= Unicorn: Rack HTTP server for Unix, fast clients and nothing else
+= Unicorn: Rack HTTP server for fast clients and Unix
+
+Unicorn is an HTTP server for Rack applications designed to only serve
+fast clients on low-latency, high-bandwidth connections and take
+advantage of features in Unix/Unix-like kernels.  Slow clients should
+only be served by placing a reverse proxy capable of fully buffering
+both the the request and response in between Unicorn and slow clients.
 
 == Features
 
 * Designed for Rack, Unix, fast clients, and ease-of-debugging.  We
-  cut out all things that are better-supported by nginx or Rack.
+  cut out everything that is better supported by the operating system,
+  {nginx}[http://nginx.net/] or {Rack}[http://rack.rubyforge.org/].
 
-* Mostly written in Ruby, only the HTTP parser (stolen and trimmed
-  down from Mongrel) is written in C.  Unicorn is compatible with
-  both Ruby 1.8 and 1.9.  A pure-Ruby (but still Unix-only) version
-  is planned.
+* Compatible with both Ruby 1.8 and 1.9.  Rubinius support is in-progress.
 
 * Process management: Unicorn will reap and restart workers that
   die from broken apps.  There is no need to manage multiple processes
-  or ports yourself.  Unicorn can spawn and manage any fixed number of
+  or ports yourself.  Unicorn can spawn and manage any number of
   worker processes you choose to scale to your backend.
 
 * Load balancing is done entirely by the operating system kernel.
@@ -20,7 +24,7 @@
 
 * Does not care if your application is thread-safe or not, workers
   all run within their own isolated address space and only serve one
-  client at a time.
+  client at a time for maximum robustness.
 
 * Supports all Rack applications, along with pre-Rack versions of
   Ruby on Rails via a Rack wrapper.
@@ -31,10 +35,9 @@
   Unicorn also takes steps to ensure multi-line log entries from one
   request all stay within the same file.
 
-* nginx-style binary re-execution without losing connections.
+* nginx-style binary upgrades without losing connections.
   You can upgrade Unicorn, your entire application, libraries
-  and even your Ruby interpreter as long as Unicorn is
-  installed in the same path.
+  and even your Ruby interpreter without dropping clients.
 
 * before_fork and after_fork hooks in case your application
   has special needs when dealing with forked processes.  These
@@ -48,10 +51,16 @@
   each worker process can also bind to a private port via the
   after_fork hook for easy debugging.
 
+* Simple and easy Ruby DSL for configuration.
+
+* Decodes chunked transfers on-the-fly, thus allowing upload progress
+  notification to be implemented as well as being able to tunnel
+  arbitrary stream-based protocols over HTTP.
+
 == License
 
-Unicorn is copyright 2009 Eric Wong and contributors.
-It is based on Mongrel and carries the same license:
+Unicorn is copyright 2009 by all contributors (see logs in git).
+It is based on Mongrel and carries the same license.
 
 Mongrel is copyright 2007 Zed A. Shaw and contributors. It is licensed
 under the Ruby license and the GPL2. See the included LICENSE file for
@@ -61,15 +70,15 @@ Unicorn is 100% Free Software.
 
 == Install
 
-The library consists of a C extension so you'll need a C compiler or at
-least a friend who can build it for you.
+The library consists of a C extension so you'll need a C compiler
+and Ruby development libraries/headers.
 
 You may download the tarball from the Mongrel project page on Rubyforge
 and run setup.rb after unpacking it:
 
 http://rubyforge.org/frs/?group_id=1306
 
-You may also install it via Rubygems on Rubyforge:
+You may also install it via RubyGems on Gemcutter:
 
   gem install unicorn
 
@@ -77,9 +86,7 @@ You can get the latest source via git from the following locations
 (these versions may not be stable):
 
   git://git.bogomips.org/unicorn.git
-  http://git.bogomips.org/unicorn.git
   git://repo.or.cz/unicorn.git (mirror)
-  http://repo.or.cz/r/unicorn.git (mirror)
 
 You may browse the code from the web and download the latest snapshot
 tarballs here:
@@ -87,6 +94,9 @@ tarballs here:
 * http://git.bogomips.org/cgit/unicorn.git (cgit)
 * http://repo.or.cz/w/unicorn.git (gitweb)
 
+See the HACKING guide on how to contribute and build prerelease gems
+from git.
+
 == Usage
 
 === non-Rails Rack applications
@@ -121,26 +131,19 @@ options.
 
 == Disclaimer
 
-Like the creatures themselves, production deployments of Unicorn are
-rare or even non-existent.  There is NO WARRANTY whatsoever if anything
-goes wrong, but let us know and we'll try our best to fix it.
+There is NO WARRANTY whatsoever if anything goes wrong, but
+{let us know}[link:ISSUES.html] and we'll try our best to fix it.
 
 Unicorn is designed to only serve fast clients either on the local host
 or a fast LAN.  See the PHILOSOPHY and DESIGN documents for more details
 regarding this.
 
-== Known Issues
-
-* WONTFIX: code reloading with Sinatra 0.3.2 (and likely older
-  versions) apps is broken.  The workaround is to force production
-  mode to disable code reloading in your Sinatra application:
-    set :env, :production
-  Since this is no longer an issue with Sinatra 0.9.x apps and only
-  affected non-production instances, this will not be fixed on our end.
-  Also remember we're capable of replacing the running binary without
-  dropping any connections regardless of framework :)
-
 == Contact
 
-Email Eric Wong at normalperson@yhbt.net for now.
-Newsgroup and mailing list maybe coming...
+All feedback (bug reports, user/development dicussion, patches, pull
+requests) go to the mailing list/newsgroup.  See the ISSUES document for
+information on the {mailing list}[mailto:mongrel-unicorn@rubyforge.org].
+
+For the latest on Unicorn releases, you may also finger us at
+unicorn@bogomips.org or check our NEWS page (and subscribe to our Atom
+feed).
diff --git a/Rakefile b/Rakefile
index e7b4c36..755915c 100644
--- a/Rakefile
+++ b/Rakefile
@@ -1,38 +1,193 @@
+# -*- encoding: binary -*-
 
-require 'rubygems'
-require 'echoe'
-
-Echoe.new("unicorn") do |p|
-  p.summary = "Rack HTTP server for Unix, fast clients and nothing else"
-  p.author = "Eric Wong"
-  p.email = "normalperson@yhbt.net"
-  p.clean_pattern = ['ext/unicorn/http11/*.{bundle,so,o,obj,pdb,lib,def,exp}',
-                     'lib/*.{bundle,so,o,obj,pdb,lib,def,exp}',
-                     'ext/unicorn/http11/Makefile',
-                     'pkg', 'lib/*.bundle', '*.gem',
-                     'site/output', '.config', 'coverage',
-                     'test_*.log', 'log', 'doc']
-  p.url = "http://unicorn.bogomips.org/"
-  p.ignore_pattern = /^(pkg|site|projects|doc|log)|CVS|\.log/
-  p.need_tar_gz = false
-  p.need_tgz = true
-  p.dependencies = [ 'rack' ]
-
-  p.extension_pattern = ["ext/**/extconf.rb"]
-
-  # Eric hasn't bothered to figure out running exec tests properly
-  # from Rake, but Eric prefers GNU make to Rake for tests anyways...
-  p.test_pattern = [ 'test/unit/test*.rb' ]
-end
-
-#### Ragel builder
-
-desc "Rebuild the Ragel sources"
-task :ragel do
-  Dir.chdir "ext/unicorn/http11" do
-    target = "http11_parser.c"
-    File.unlink target if File.exist? target
-    sh "ragel http11_parser.rl -C -G2 -o #{target}"
-    raise "Failed to build C source" unless File.exist? target
+# most tasks are in the GNUmakefile which offers better parallelism
+
+def old_summaries
+  @old_summaries ||= File.readlines(".CHANGELOG.old").inject({}) do |hash, line|
+    version, summary = line.split(/ - /, 2)
+    hash[version] = summary
+    hash
+  end
+end
+
+def tags
+  timefmt = '%Y-%m-%dT%H:%M:%SZ'
+  @tags ||= `git tag -l`.split(/\n/).map do |tag|
+    next if tag == "v0.0.0"
+    if %r{\Av[\d\.]+\z} =~ tag
+      header, subject, body = `git cat-file tag #{tag}`.split(/\n\n/, 3)
+      header = header.split(/\n/)
+      tagger = header.grep(/\Atagger /).first
+      body ||= "initial"
+      {
+        :time => Time.at(tagger.split(/ /)[-2].to_i).utc.strftime(timefmt),
+        :tagger_name => %r{^tagger ([^<]+)}.match(tagger)[1].strip,
+        :tagger_email => %r{<([^>]+)>}.match(tagger)[1].strip,
+        :id => `git rev-parse refs/tags/#{tag}`.chomp!,
+        :tag => tag,
+        :subject => subject,
+        :body => (old = old_summaries[tag]) ? "#{old}\n#{body}" : body,
+      }
+    end
+  end.compact.sort { |a,b| b[:time] <=> a[:time] }
+end
+
+cgit_url = "http://git.bogomips.org/cgit/unicorn.git"
+git_url = ENV['GIT_URL'] || 'git://git.bogomips.org/unicorn.git'
+
+desc 'prints news as an Atom feed'
+task :news_atom do
+  require 'nokogiri'
+  new_tags = tags[0,10]
+  puts(Nokogiri::XML::Builder.new do
+    feed :xmlns => "http://www.w3.org/2005/Atom" do
+      id! "http://unicorn.bogomips.org/NEWS.atom.xml"
+      title "Unicorn news"
+      subtitle "Rack HTTP server for Unix and fast clients"
+      link! :rel => 'alternate', :type => 'text/html',
+            :href => 'http://unicorn.bogomips.org/NEWS.html'
+      updated new_tags.first[:time]
+      new_tags.each do |tag|
+        entry do
+          title tag[:subject]
+          updated tag[:time]
+          published tag[:time]
+          author {
+            name tag[:tagger_name]
+            email tag[:tagger_email]
+          }
+          url = "#{cgit_url}/tag/?id=#{tag[:tag]}"
+          link! :rel => "alternate", :type => "text/html", :href =>url
+          id! url
+          message_only = tag[:body].split(/\n.+\(\d+\):\n {6}/s).first.strip
+          content({:type =>:text}, message_only)
+          content(:type =>:xhtml) { pre tag[:body] }
+        end
+      end
+    end
+  end.to_xml)
+end
+
+desc 'prints RDoc-formatted news'
+task :news_rdoc do
+  tags.each do |tag|
+    time = tag[:time].tr!('T', ' ').gsub!(/:\d\dZ/, ' UTC')
+    puts "=== #{tag[:tag].sub(/^v/, '')} / #{time}"
+    puts ""
+
+    body = tag[:body]
+    puts tag[:body].gsub(/^/sm, "  ").gsub(/[ \t]+$/sm, "")
+    puts ""
+  end
+end
+
+desc "print release changelog for Rubyforge"
+task :release_changes do
+  version = ENV['VERSION'] or abort "VERSION= needed"
+  version = "v#{version}"
+  vtags = tags.map { |tag| tag[:tag] =~ /\Av/ and tag[:tag] }.sort
+  prev = vtags[vtags.index(version) - 1]
+  system('git', 'diff', '--stat', prev, version) or abort $?
+  puts ""
+  system('git', 'log', "#{prev}..#{version}") or abort $?
+end
+
+desc "print release notes for Rubyforge"
+task :release_notes do
+  require 'rubygems'
+
+  spec = Gem::Specification.load('unicorn.gemspec')
+  puts spec.description.strip
+  puts ""
+  puts "* #{spec.homepage}"
+  puts "* #{spec.email}"
+  puts "* #{git_url}"
+
+  _, _, body = `git cat-file tag v#{spec.version}`.split(/\n\n/, 3)
+  print "\nChanges:\n\n"
+  puts body
+end
+
+desc "post to RAA"
+task :raa_update do
+  require 'rubygems'
+  require 'net/http'
+  require 'net/netrc'
+  rc = Net::Netrc.locate('unicorn-raa') or abort "~/.netrc not found"
+  password = rc.password
+
+  s = Gem::Specification.load('unicorn.gemspec')
+  desc = [ s.description.strip ]
+  desc << ""
+  desc << "* #{s.email}"
+  desc << "* #{git_url}"
+  desc << "* #{cgit_url}"
+  desc = desc.join("\n")
+  uri = URI.parse('http://raa.ruby-lang.org/regist.rhtml')
+  form = {
+    :name => s.name,
+    :short_description => s.summary,
+    :version => s.version.to_s,
+    :status => 'stable',
+    :owner => s.authors.first,
+    :email => s.email,
+    :category_major => 'Library',
+    :category_minor => 'Web',
+    :url => s.homepage,
+    :download => "http://rubyforge.org/frs/?group_id=1306",
+    :license => "Ruby's",
+    :description_style => 'Plain',
+    :description => desc,
+    :pass => password,
+    :submit => "Update",
+  }
+  res = Net::HTTP.post_form(uri, form)
+  p res
+  puts res.body
+end
+
+desc "post to FM"
+task :fm_update do
+  require 'tempfile'
+  require 'net/http'
+  require 'net/netrc'
+  require 'json'
+  version = ENV['VERSION'] or abort "VERSION= needed"
+  uri = URI.parse('http://freshmeat.net/projects/unicorn/releases.json')
+  rc = Net::Netrc.locate('unicorn-fm') or abort "~/.netrc not found"
+  api_token = rc.password
+  changelog = tags.find { |t| t[:tag] == "v#{version}" }[:body]
+  tmp = Tempfile.new('fm-changelog')
+  tmp.syswrite(changelog)
+  system(ENV["VISUAL"], tmp.path) or abort "#{ENV["VISUAL"]} failed: #$?"
+  changelog = File.read(tmp.path).strip
+
+  req = {
+    "auth_code" => api_token,
+    "release" => {
+      "tag_list" => "Stable",
+      "version" => version,
+      "changelog" => changelog,
+    },
+  }.to_json
+  Net::HTTP.start(uri.host, uri.port) do |http|
+    p http.post(uri.path, req, {'Content-Type'=>'application/json'})
+  end
+end
+
+# optional rake-compiler support in case somebody needs to cross compile
+begin
+  require 'rubygems'
+  spec = Gem::Specification.load('unicorn.gemspec')
+  require 'rake/extensiontask'
+  unless test ?r, "ext/unicorn_http/unicorn_http.c"
+    abort "run 'gmake ragel' or 'make ragel' to generate the Ragel source"
+  end
+  mk = "ext/unicorn_http/Makefile"
+  if test ?r, mk
+    abort "run 'gmake -C ext/unicorn_http clean' and " \
+          "remove #{mk} before using rake-compiler"
   end
+  Rake::ExtensionTask.new('unicorn_http', spec)
+rescue LoadError
 end
diff --git a/SIGNALS b/SIGNALS
index 4aacd7d..be96892 100644
--- a/SIGNALS
+++ b/SIGNALS
@@ -1,12 +1,20 @@
 == Signal handling
 
 In general, signals need only be sent to the master process.  However,
-the signals unicorn uses internally to communicate with the worker
-processes are documented here as well.
+the signals Unicorn uses internally to communicate with the worker
+processes are documented here as well.  With the exception of TTIN/TTOU,
+signal handling matches the behavior of {nginx}[http://nginx.net/] so it
+should be possible to easily share process management scripts between
+Unicorn and nginx.
 
 === Master Process
 
-* HUP - reload config file, app, and gracefully restart all workers
+* HUP - reloads config file and gracefully restart all workers.
+  If the "preload_app" directive is false (the default), then workers
+  will also pick up any application code changes when restarted.  If
+  "preload_app" is true, then application code changes will have no
+  effect; USR2 + QUIT (see below) must be used to load newer code in
+  this case.
 
 * INT/TERM - quick shutdown, kills all workers immediately
 
@@ -47,6 +55,13 @@ automatically respawned.
   the current request, so multiple log lines for one request
   (as done by Rails) will not be split across multiple logs.
 
+  It is NOT recommended to send the USR1 signal directly to workers via
+  "killall -USR1 unicorn" if you are using user/group-switching support
+  in your workers.  You will encounter incorrect file permissions and
+  workers will need to be respawned.  Sending USR1 to the master process
+  first will ensure logs have the correct permissions before the master
+  forwards the USR1 signal to workers.
+
 === Procedure to replace a running unicorn executable
 
 You may replace a running instance of unicorn with a new one without
@@ -54,9 +69,7 @@ losing any incoming connections.  Doing so will reload all of your
 application code, Unicorn config, Ruby executable, and all libraries.
 The only things that will not change (due to OS limitations) are:
 
-1. The listener backlog size of already-bound sockets
-
-2. The path to the unicorn executable script.  If you want to change to
+1. The path to the unicorn executable script.  If you want to change to
    a different installation of Ruby, you can modify the shebang
    line to point to your alternative interpreter.
 
diff --git a/TODO b/TODO
index 085ef70..66a9c5e 100644
--- a/TODO
+++ b/TODO
@@ -1,17 +1,10 @@
-== 1.0.0
+* Documentation improvements
 
-  * integration tests with nginx including bad client handling
+* ensure test suite passes on non-GNU/Linux systems
+  (likely that it already does)
 
-  * manpages (why do so few Ruby executables come with proper manpages?)
+* fix const-correctness in HTTP parser
 
-== 1.1.0
+* performance validation (esp. TeeInput)
 
-  * Transfer-Encoding: chunked request handling.  Testcase:
-
-      curl -T- http://host:port/path < file_from_stdin
-
-  * code cleanups (launchers)
-
-  * Pure Ruby HTTP parser
-
-  * Rubinius support?
+* improve test suite (steal from Rainbows!, probably...)
diff --git a/TUNING b/TUNING
index 6445fd4..d96529a 100644
--- a/TUNING
+++ b/TUNING
@@ -12,6 +12,12 @@ See Unicorn::Configurator for details on the config file format.
   directives can allow failover to happen more quickly if your
   cluster is configured for it.
 
+* If you're doing extremely simple benchmarks and getting connection
+  errors under high request rates, increasing your :backlog parameter
+  above the already-generous default of 1024 can help avoid connection
+  errors.  Keep in mind this is not recommended for real traffic if
+  you have another machine to failover to (see above).
+
 * :rcvbuf and :sndbuf parameters generally do not need to be set for TCP
   listeners under Linux 2.6 because auto-tuning is enabled.  UNIX domain
   sockets do not have auto-tuning buffer sizes; so increasing those will
@@ -45,7 +51,8 @@ WARNING: Do not change system parameters unless you know what you're doing!
 
 * For load testing/benchmarking with UNIX domain sockets, you should
   consider increasing net.core.somaxconn or else nginx will start
-  failing to connect under heavy load.
+  failing to connect under heavy load.  You may also consider setting
+  a higher :backlog to listen on as noted earlier.
 
 * If you're running out of local ports, consider lowering
   net.ipv4.tcp_fin_timeout to 20-30 (default: 60 seconds).  Also
diff --git a/bin/unicorn b/bin/unicorn
index a34d9bc..5af021d 100755
--- a/bin/unicorn
+++ b/bin/unicorn
@@ -1,8 +1,9 @@
-#!/home/ew/bin/ruby
+#!/this/will/be/overwritten/or/wrapped/anyways/do/not/worry/ruby
+# -*- encoding: binary -*-
 require 'unicorn/launcher'
 require 'optparse'
 
-env = "development"
+ENV["RACK_ENV"] ||= "development"
 daemonize = false
 listeners = []
 options = { :listeners => listeners }
@@ -57,15 +58,17 @@ opts = OptionParser.new("", 24, '  ') do |opts|
 
   opts.on("-E", "--env ENVIRONMENT",
           "use ENVIRONMENT for defaults (default: development)") do |e|
-    env = e
+    ENV["RACK_ENV"] = e
   end
 
   opts.on("-D", "--daemonize", "run daemonized in the background") do |d|
     daemonize = d ? true : false
   end
 
-  opts.on("-P", "--pid FILE", "file to store PID (default: none)") do |f|
-    options[:pid] = File.expand_path(f)
+  opts.on("-P", "--pid FILE", "DEPRECATED") do |f|
+    warn %q{Use of --pid/-P is strongly discouraged}
+    warn %q{Use the 'pid' directive in the Unicorn config file instead}
+    options[:pid] = f
   end
 
   opts.on("-s", "--server SERVER",
@@ -82,7 +85,7 @@ opts = OptionParser.new("", 24, '  ') do |opts|
   end
 
   opts.on("-c", "--config-file FILE", "Unicorn-specific config file") do |f|
-    options[:config_file] = File.expand_path(f)
+    options[:config_file] = f
   end
 
   # I'm avoiding Unicorn-specific config options on the command-line.
@@ -93,7 +96,7 @@ opts = OptionParser.new("", 24, '  ') do |opts|
   opts.separator "Common options:"
 
   opts.on_tail("-h", "--help", "Show this message") do
-    puts opts
+    puts opts.to_s.gsub(/^.*DEPRECATED.*$/s, '')
     exit
   end
 
@@ -120,17 +123,17 @@ require 'pp' if $DEBUG
 app = lambda do ||
   # require Rack as late as possible in case $LOAD_PATH is modified
   # in config.ru or command-line
-  require 'rack'
   inner_app = case config
   when /\.ru$/
     raw = File.open(config, "rb") { |fp| fp.sysread(fp.stat.size) }
+    raw.sub!(/^__END__\n.*/, '')
     eval("Rack::Builder.new {(#{raw}\n)}.to_app", nil, config)
   else
     require config
     Object.const_get(File.basename(config, '.rb').capitalize)
   end
   pp({ :inner_app => inner_app }) if $DEBUG
-  case env
+  case ENV["RACK_ENV"]
   when "development"
     Rack::Builder.new do
       use Rack::CommonLogger, $stderr
@@ -158,5 +161,5 @@ if $DEBUG
   })
 end
 
-Unicorn::Launcher.daemonize! if daemonize
+Unicorn::Launcher.daemonize!(options) if daemonize
 Unicorn.run(app, options)
diff --git a/bin/unicorn_rails b/bin/unicorn_rails
index b3fda7b..b1458fc 100755
--- a/bin/unicorn_rails
+++ b/bin/unicorn_rails
@@ -1,4 +1,5 @@
-#!/home/ew/bin/ruby
+#!/this/will/be/overwritten/or/wrapped/anyways/do/not/worry/ruby
+# -*- encoding: binary -*-
 require 'unicorn/launcher'
 require 'optparse'
 require 'fileutils'
@@ -11,7 +12,6 @@ options = { :listeners => listeners }
 host, port = Unicorn::Const::DEFAULT_HOST, Unicorn::Const::DEFAULT_PORT
 set_listener = false
 ENV['RAILS_ENV'] ||= "development"
-map_path = ENV['RAILS_RELATIVE_URL_ROOT']
 
 opts = OptionParser.new("", 24, '  ') do |opts|
   opts.banner = "Usage: #{cmd} " \
@@ -57,8 +57,8 @@ opts = OptionParser.new("", 24, '  ') do |opts|
     set_listener = true
   end
 
-  opts.on("-E", "--env ENVIRONMENT",
-          "use ENVIRONMENT for defaults (default: development)") do |e|
+  opts.on("-E", "--env RAILS_ENV",
+          "use RAILS_ENV for defaults (default: development)") do |e|
     ENV['RAILS_ENV'] = e
   end
 
@@ -75,12 +75,18 @@ opts = OptionParser.new("", 24, '  ') do |opts|
   end
 
   opts.on("-c", "--config-file FILE", "Unicorn-specific config file") do |f|
-    options[:config_file] = File.expand_path(f)
+    options[:config_file] = f
   end
 
-  opts.on("-P", "--path PATH", "Runs Rails app mounted at a specific path.",
-          "(default: /") do |v|
-    ENV['RAILS_RELATIVE_URL_ROOT'] = map_path = v
+  opts.on("-P PATH", "DEPRECATED") do |v|
+    warn %q{Use of -P is ambiguous and discouraged}
+    warn %q{Use --path or RAILS_RELATIVE_URL_ROOT instead}
+    ENV['RAILS_RELATIVE_URL_ROOT'] = v
+  end
+
+  opts.on("--path PATH", "Runs Rails app mounted at a specific path.",
+          "(default: /)") do |v|
+    ENV['RAILS_RELATIVE_URL_ROOT'] = v
   end
 
   # I'm avoiding Unicorn-specific config options on the command-line.
@@ -91,7 +97,7 @@ opts = OptionParser.new("", 24, '  ') do |opts|
   opts.separator "Common options:"
 
   opts.on_tail("-h", "--help", "Show this message") do
-    puts opts
+    puts opts.to_s.gsub(/^.*DEPRECATED.*$/s, '')
     exit
   end
 
@@ -140,7 +146,6 @@ app = lambda do ||
     end
 
     if old_rails
-      require 'rack'
       require 'unicorn/app/old_rails'
       Unicorn::App::OldRails.new
     else
@@ -148,14 +153,15 @@ app = lambda do ||
     end
   when /\.ru$/
     raw = File.open(config, "rb") { |fp| fp.sysread(fp.stat.size) }
+    raw.sub!(/^__END__\n.*/, '')
     eval("Rack::Builder.new {(#{raw}\n)}.to_app", nil, config)
   else
     require config
     Object.const_get(File.basename(config, '.rb').capitalize)
   end
 
-  map_path ||= '/'
   Rack::Builder.new do
+    map_path = ENV['RAILS_RELATIVE_URL_ROOT'] || '/'
     if inner_app.class.to_s == "Unicorn::App::OldRails"
       if map_path != '/'
         # patches + tests welcome, but I really cbf to deal with this
@@ -165,7 +171,6 @@ app = lambda do ||
       $stderr.puts "LogTailer not available for Rails < 2.3" unless daemonize
       $stderr.puts "Debugger not available" if $DEBUG
       map(map_path) do
-        require 'unicorn/app/old_rails/static'
         use Unicorn::App::OldRails::Static
         run inner_app
       end
@@ -197,6 +202,6 @@ end
 
 if daemonize
   options[:pid] = rails_pid
-  Unicorn::Launcher.daemonize!
+  Unicorn::Launcher.daemonize!(options)
 end
 Unicorn.run(app, options)
diff --git a/examples/echo.ru b/examples/echo.ru
new file mode 100644
index 0000000..14908c5
--- /dev/null
+++ b/examples/echo.ru
@@ -0,0 +1,27 @@
+#\-E none
+#
+# Example application that echoes read data back to the HTTP client.
+# This emulates the old echo protocol people used to run.
+#
+# An example of using this in a client would be to run:
+#   curl --no-buffer -T- http://host:port/
+#
+# Then type random stuff in your terminal to watch it get echoed back!
+
+class EchoBody < Struct.new(:input)
+
+  def each(&block)
+    while buf = input.read(4096)
+      yield buf
+    end
+    self
+  end
+
+end
+
+use Rack::Chunked
+run lambda { |env|
+  /\A100-continue\z/i =~ env['HTTP_EXPECT'] and return [100, {}, []]
+  [ 200, { 'Content-Type' => 'application/octet-stream' },
+    EchoBody.new(env['rack.input']) ]
+}
diff --git a/examples/git.ru b/examples/git.ru
new file mode 100644
index 0000000..59a31c9
--- /dev/null
+++ b/examples/git.ru
@@ -0,0 +1,13 @@
+#\-E none
+
+# See http://thread.gmane.org/gmane.comp.web.curl.general/10473/raw on
+# how to setup git for this.  A better version of the above patch was
+# accepted and committed on June 15, 2009, so you can pull the latest
+# curl CVS snapshot to try this out.
+require 'unicorn/app/inetd'
+
+use Rack::Lint
+use Rack::Chunked # important!
+run Unicorn::App::Inetd.new(
+ *%w(git daemon --verbose --inetd --export-all --base-path=/home/ew/unicorn)
+)
diff --git a/examples/init.sh b/examples/init.sh
index 866a644..35ec896 100644
--- a/examples/init.sh
+++ b/examples/init.sh
@@ -1,5 +1,4 @@
 #!/bin/sh
-set -u
 set -e
 # Example init script, this can be used with nginx, too,
 # since nginx and unicorn accept the same signals
diff --git a/examples/nginx.conf b/examples/nginx.conf
new file mode 100644
index 0000000..d42ade8
--- /dev/null
+++ b/examples/nginx.conf
@@ -0,0 +1,139 @@
+# This is example contains the bare mininum to get nginx going with
+# Unicorn or Rainbows! servers.  Generally these configuration settings
+# are applicable to other HTTP application servers (and not just Ruby
+# ones), so if you have one working well for proxying another app
+# server, feel free to continue using it.
+#
+# The only setting we feel strongly about is the fail_timeout=0
+# directive in the "upstream" block.  max_fails=0 also has the same
+# effect as fail_timeout=0 for current versions of nginx and may be
+# used in its place.
+#
+# Users are strongly encouraged to refer to nginx documentation for more
+# details and search for other example configs.
+
+# you generally only need one nginx worker unless you're serving
+# large amounts of static files which require blocking disk reads
+worker_processes 1;
+
+# # drop privileges, root is needed on most systems for binding to port 80
+# # (or anything < 1024).  Capability-based security may be available for
+# # your system and worth checking out so you won't need to be root to
+# # start nginx to bind on 80
+user nobody nogroup; # for systems with a "nogroup"
+# user nobody nobody; # for systems with "nobody" as a group instead
+
+# Feel free to change all paths to suite your needs here, of course
+pid /tmp/nginx.pid;
+error_log /tmp/nginx.error.log;
+
+events {
+  worker_connections 1024; # increase if you have lots of clients
+  accept_mutex off; # "on" if nginx worker_processes > 1
+  # use epoll; # enable for Linux 2.6+
+  # use kqueue; # enable for FreeBSD, OSX
+}
+
+http {
+  # nginx will find this file in the config directory set at nginx build time
+  include mime.types;
+
+  # fallback in case we can't determine a type
+  default_type application/octet-stream;
+
+  # click tracking!
+  access_log /tmp/nginx.access.log combined;
+
+  # you generally want to serve static files with nginx since neither
+  # Unicorn nor Rainbows! is optimized for it at the moment
+  sendfile on;
+
+  tcp_nopush on; # off may be better for *some* Comet/long-poll stuff
+  tcp_nodelay off; # on may be better for some Comet/long-poll stuff
+
+  # we haven't checked to see if Rack::Deflate on the app server is
+  # faster or not than doing compression via nginx.  It's easier
+  # to configure it all in one place here for static files and also
+  # to disable gzip for clients who don't get gzip/deflate right.
+  # There are other other gzip settings that may be needed used to deal with
+  # bad clients out there, see http://wiki.nginx.org/NginxHttpGzipModule
+  gzip on;
+  gzip_http_version 1.0;
+  gzip_proxied any;
+  gzip_min_length 500;
+  gzip_disable "MSIE [1-6]\.";
+  gzip_types text/plain text/html text/xml text/css
+             text/comma-separated-values
+             text/javascript application/x-javascript
+             application/atom+xml;
+
+  # this can be any application server, not just Unicorn/Rainbows!
+  upstream app_server {
+    # fail_timeout=0 means we always retry an upstream even if it failed
+    # to return a good HTTP response (in case the Unicorn master nukes a
+    # single worker for timing out).
+
+    # for UNIX domain socket setups:
+    server unix:/tmp/.sock fail_timeout=0;
+
+    # for TCP setups, point these to your backend servers
+    # server 192.168.0.7:8080 fail_timeout=0;
+    # server 192.168.0.8:8080 fail_timeout=0;
+    # server 192.168.0.9:8080 fail_timeout=0;
+  }
+
+  server {
+    # listen 80 default deferred; # for Linux
+    # listen 80 default accept_filter=httpready; # for FreeBSD
+    listen 80 default;
+
+    client_max_body_size 4G;
+    server_name _;
+
+    # ~2 seconds is often enough for most folks to parse HTML/CSS and
+    # retrieve needed images/icons/frames, connections are cheap in
+    # nginx so increasing this is generally safe...
+    keepalive_timeout 5;
+
+    # path for static files
+    root /path/to/app/current/public;
+
+    location / {
+      # an HTTP header important enough to have its own Wikipedia entry:
+      #   http://en.wikipedia.org/wiki/X-Forwarded-For
+      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
+      # enable this if and only if you use HTTPS, this helps Rack
+      # set the proper protocol for doing redirects:
+      # proxy_set_header X-Forwarded-Proto https;
+
+      # pass the Host: header from the client right along so redirects
+      # can be set properly within the Rack application
+      proxy_set_header Host $http_host;
+
+      # we don't want nginx trying to do something clever with
+      # redirects, we set the Host: header above already.
+      proxy_redirect off;
+
+      # set "proxy_buffering off" *only* for Rainbows! when doing
+      # Comet/long-poll stuff.  It's also safe to set if you're
+      # using only serving fast clients with Unicorn + nginx.
+      # Otherwise you _want_ nginx to buffer responses to slow
+      # clients, really.
+      # proxy_buffering off;
+
+      # Try to serve static files from nginx, no point in making an
+      # *application* server like Unicorn/Rainbows! serve static files.
+      if (!-f $request_filename) {
+        proxy_pass http://app_server;
+        break;
+      }
+    }
+
+    # Rails error pages
+    error_page 500 502 503 504 /500.html;
+    location = /500.html {
+      root /path/to/app/current/public;
+    }
+  }
+}
diff --git a/examples/unicorn.conf.rb b/examples/unicorn.conf.rb
new file mode 100644
index 0000000..e209894
--- /dev/null
+++ b/examples/unicorn.conf.rb
@@ -0,0 +1,78 @@
+# Sample configuration file for Unicorn (not Rack)
+#
+# See http://unicorn.bogomips.org/Unicorn/Configurator.html for complete
+# documentation.
+
+# Use at least one worker per core if you're on a dedicated server,
+# more will usually help for _short_ waits on databases/caches.
+worker_processes 4
+
+# Help ensure your application will always spawn in the symlinked
+# "current" directory that Capistrano sets up.
+working_directory "/path/to/app/current" # available in 0.94.0+
+
+# listen on both a Unix domain socket and a TCP port,
+# we use a shorter backlog for quicker failover when busy
+listen "/tmp/.sock", :backlog => 64
+listen 8080, :tcp_nopush => true
+
+# nuke workers after 30 seconds instead of 60 seconds (the default)
+timeout 30
+
+# feel free to point this anywhere accessible on the filesystem
+pid "/path/to/app/shared/pids/unicorn.pid"
+
+# some applications/frameworks log to stderr or stdout, so prevent
+# them from going to /dev/null when daemonized here:
+stderr_path "/path/to/app/shared/log/unicorn.stderr.log"
+stdout_path "/path/to/app/shared/log/unicorn.stdout.log"
+
+# combine REE with "preload_app true" for memory savings
+# http://rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
+preload_app true
+GC.respond_to?(:copy_on_write_friendly=) and
+  GC.copy_on_write_friendly = true
+
+before_fork do |server, worker|
+  # the following is highly recomended for Rails + "preload_app true"
+  # as there's no need for the master process to hold a connection
+  defined?(ActiveRecord::Base) and
+    ActiveRecord::Base.connection.disconnect!
+
+  # The following is only recommended for memory/DB-constrained
+  # installations.  It is not needed if your system can house
+  # twice as many worker_processes as you have configured.
+  #
+  # # This allows a new master process to incrementally
+  # # phase out the old master process with SIGTTOU to avoid a
+  # # thundering herd (especially in the "preload_app false" case)
+  # # when doing a transparent upgrade.  The last worker spawned
+  # # will then kill off the old master process with a SIGQUIT.
+  # old_pid = "#{server.config[:pid]}.oldbin"
+  # if old_pid != server.pid
+  #   begin
+  #     sig = (worker.nr + 1) >= server.worker_processes ? :QUIT : :TTOU
+  #     Process.kill(sig, File.read(old_pid).to_i)
+  #   rescue Errno::ENOENT, Errno::ESRCH
+  #   end
+  # end
+  #
+  # # *optionally* throttle the master from forking too quickly by sleeping
+  # sleep 1
+end
+
+after_fork do |server, worker|
+  # per-process listener ports for debugging/admin/migrations
+  # addr = "127.0.0.1:#{9293 + worker.nr}"
+  # server.listen(addr, :tries => -1, :delay => 5, :tcp_nopush => true)
+
+  # the following is *required* for Rails + "preload_app true",
+  defined?(ActiveRecord::Base) and
+    ActiveRecord::Base.establish_connection
+
+  # if preload_app is true, then you may also want to check and
+  # restart any other shared sockets/descriptors such as Memcached,
+  # and Redis.  TokyoCabinet file handles are safe to reuse
+  # between any number of forked children (assuming your kernel
+  # correctly implements pread()/pwrite() system calls)
+end
diff --git a/ext/unicorn/http11/ext_help.h b/ext/unicorn/http11/ext_help.h
deleted file mode 100644
index 17f7b01..0000000
--- a/ext/unicorn/http11/ext_help.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef ext_help_h
-#define ext_help_h
-
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
-
-#ifdef DEBUG
-#define TRACE()  fprintf(stderr, "> %s:%d:%s\n", __FILE__, __LINE__, __FUNCTION__)
-#else
-#define TRACE()
-#endif
-
-#endif
diff --git a/ext/unicorn/http11/extconf.rb b/ext/unicorn/http11/extconf.rb
deleted file mode 100644
index 1b16dff..0000000
--- a/ext/unicorn/http11/extconf.rb
+++ /dev/null
@@ -1,5 +0,0 @@
-require 'mkmf'
-
-dir_config("unicorn/http11")
-have_library("c", "main")
-create_makefile("unicorn/http11")
diff --git a/ext/unicorn/http11/http11.c b/ext/unicorn/http11/http11.c
deleted file mode 100644
index cd7a8f7..0000000
--- a/ext/unicorn/http11/http11.c
+++ /dev/null
@@ -1,450 +0,0 @@
-/**
- * Copyright (c) 2009 Eric Wong (all bugs are Eric's fault)
- * Copyright (c) 2005 Zed A. Shaw
- * You can redistribute it and/or modify it under the same terms as Ruby.
- */
-#include "ruby.h"
-#include "ext_help.h"
-#include <assert.h>
-#include <string.h>
-#include "http11_parser.h"
-
-static http_parser *data_get(VALUE self)
-{
-  http_parser *http;
-
-  Data_Get_Struct(self, http_parser, http);
-  if (!http)
-    rb_raise(rb_eArgError, "NULL found for http when shouldn't be.");
-  return http;
-}
-
-#ifndef RSTRING_PTR
-#define RSTRING_PTR(s) (RSTRING(s)->ptr)
-#endif
-#ifndef RSTRING_LEN
-#define RSTRING_LEN(s) (RSTRING(s)->len)
-#endif
-
-static VALUE mUnicorn;
-static VALUE cHttpParser;
-static VALUE eHttpParserError;
-static VALUE sym_http_body;
-
-#define HTTP_PREFIX "HTTP_"
-#define HTTP_PREFIX_LEN (sizeof(HTTP_PREFIX) - 1)
-
-static VALUE global_rack_url_scheme;
-static VALUE global_request_method;
-static VALUE global_request_uri;
-static VALUE global_fragment;
-static VALUE global_query_string;
-static VALUE global_http_version;
-static VALUE global_request_path;
-static VALUE global_path_info;
-static VALUE global_server_name;
-static VALUE global_server_port;
-static VALUE global_server_protocol;
-static VALUE global_server_protocol_value;
-static VALUE global_http_host;
-static VALUE global_http_x_forwarded_proto;
-static VALUE global_port_80;
-static VALUE global_port_443;
-static VALUE global_localhost;
-static VALUE global_http;
-
-/** Defines common length and error messages for input length validation. */
-#define DEF_MAX_LENGTH(N, length) \
-  static const size_t MAX_##N##_LENGTH = length; \
-  static const char * const MAX_##N##_LENGTH_ERR = \
-    "HTTP element " # N  " is longer than the " # length " allowed length."
-
-/**
- * Validates the max length of given input and throws an HttpParserError
- * exception if over.
- */
-#define VALIDATE_MAX_LENGTH(len, N) do { \
-  if (len > MAX_##N##_LENGTH) \
-    rb_raise(eHttpParserError, MAX_##N##_LENGTH_ERR); \
-} while (0)
-
-/** Defines global strings in the init method. */
-#define DEF_GLOBAL(N, val) do { \
-  global_##N = rb_obj_freeze(rb_str_new(val, sizeof(val) - 1)); \
-  rb_global_variable(&global_##N); \
-} while (0)
-
-/* Defines the maximum allowed lengths for various input elements.*/
-DEF_MAX_LENGTH(FIELD_NAME, 256);
-DEF_MAX_LENGTH(FIELD_VALUE, 80 * 1024);
-DEF_MAX_LENGTH(REQUEST_URI, 1024 * 12);
-DEF_MAX_LENGTH(FRAGMENT, 1024); /* Don't know if this length is specified somewhere or not */
-DEF_MAX_LENGTH(REQUEST_PATH, 1024);
-DEF_MAX_LENGTH(QUERY_STRING, (1024 * 10));
-DEF_MAX_LENGTH(HEADER, (1024 * (80 + 32)));
-
-struct common_field {
-        const signed long len;
-        const char *name;
-        VALUE value;
-};
-
-/*
- * A list of common HTTP headers we expect to receive.
- * This allows us to avoid repeatedly creating identical string
- * objects to be used with rb_hash_aset().
- */
-static struct common_field common_http_fields[] = {
-# define f(N) { (sizeof(N) - 1), N, Qnil }
-        f("ACCEPT"),
-        f("ACCEPT_CHARSET"),
-        f("ACCEPT_ENCODING"),
-        f("ACCEPT_LANGUAGE"),
-        f("ALLOW"),
-        f("AUTHORIZATION"),
-        f("CACHE_CONTROL"),
-        f("CONNECTION"),
-        f("CONTENT_ENCODING"),
-        f("CONTENT_LENGTH"),
-        f("CONTENT_TYPE"),
-        f("COOKIE"),
-        f("DATE"),
-        f("EXPECT"),
-        f("FROM"),
-        f("HOST"),
-        f("IF_MATCH"),
-        f("IF_MODIFIED_SINCE"),
-        f("IF_NONE_MATCH"),
-        f("IF_RANGE"),
-        f("IF_UNMODIFIED_SINCE"),
-        f("KEEP_ALIVE"), /* Firefox sends this */
-        f("MAX_FORWARDS"),
-        f("PRAGMA"),
-        f("PROXY_AUTHORIZATION"),
-        f("RANGE"),
-        f("REFERER"),
-        f("TE"),
-        f("TRAILER"),
-        f("TRANSFER_ENCODING"),
-        f("UPGRADE"),
-        f("USER_AGENT"),
-        f("VIA"),
-        f("X_FORWARDED_FOR"), /* common for proxies */
-        f("X_FORWARDED_PROTO"), /* common for proxies */
-        f("X_REAL_IP"), /* common for proxies */
-        f("WARNING")
-# undef f
-};
-
-/* this function is not performance-critical */
-static void init_common_fields(void)
-{
-  int i;
-  struct common_field *cf = common_http_fields;
-  char tmp[256]; /* MAX_FIELD_NAME_LENGTH */
-  memcpy(tmp, HTTP_PREFIX, HTTP_PREFIX_LEN);
-
-  for(i = 0; i < ARRAY_SIZE(common_http_fields); cf++, i++) {
-    /* Rack doesn't like certain headers prefixed with "HTTP_" */
-    if (!strcmp("CONTENT_LENGTH", cf->name) ||
-        !strcmp("CONTENT_TYPE", cf->name)) {
-      cf->value = rb_str_new(cf->name, cf->len);
-    } else {
-      memcpy(tmp + HTTP_PREFIX_LEN, cf->name, cf->len + 1);
-      cf->value = rb_str_new(tmp, HTTP_PREFIX_LEN + cf->len);
-    }
-    cf->value = rb_obj_freeze(cf->value);
-    rb_global_variable(&cf->value);
-  }
-}
-
-static VALUE find_common_field_value(const char *field, size_t flen)
-{
-  int i;
-  struct common_field *cf = common_http_fields;
-  for(i = 0; i < ARRAY_SIZE(common_http_fields); i++, cf++) {
-    if (cf->len == flen && !memcmp(cf->name, field, flen))
-      return cf->value;
-  }
-  return Qnil;
-}
-
-static void http_field(void *data, const char *field,
-                       size_t flen, const char *value, size_t vlen)
-{
-  VALUE req = (VALUE)data;
-  VALUE f = Qnil;
-
-  VALIDATE_MAX_LENGTH(flen, FIELD_NAME);
-  VALIDATE_MAX_LENGTH(vlen, FIELD_VALUE);
-
-  f = find_common_field_value(field, flen);
-
-  if (f == Qnil) {
-    /*
-     * We got a strange header that we don't have a memoized value for.
-     * Fallback to creating a new string to use as a hash key.
-     *
-     * using rb_str_new(NULL, len) here is faster than rb_str_buf_new(len)
-     * in my testing, because: there's no minimum allocation length (and
-     * no check for it, either), RSTRING_LEN(f) does not need to be
-     * written twice, and and RSTRING_PTR(f) will already be
-     * null-terminated for us.
-     */
-    f = rb_str_new(NULL, HTTP_PREFIX_LEN + flen);
-    memcpy(RSTRING_PTR(f), HTTP_PREFIX, HTTP_PREFIX_LEN);
-    memcpy(RSTRING_PTR(f) + HTTP_PREFIX_LEN, field, flen);
-    assert(*(RSTRING_PTR(f) + RSTRING_LEN(f)) == '\0'); /* paranoia */
-    /* fprintf(stderr, "UNKNOWN HEADER <%s>\n", RSTRING_PTR(f)); */
-  } else if (f == global_http_host && rb_hash_aref(req, f) != Qnil) {
-    return;
-  }
-
-  rb_hash_aset(req, f, rb_str_new(value, vlen));
-}
-
-static void request_method(void *data, const char *at, size_t length)
-{
-  VALUE req = (VALUE)data;
-  VALUE val = Qnil;
-
-  val = rb_str_new(at, length);
-  rb_hash_aset(req, global_request_method, val);
-}
-
-static void scheme(void *data, const char *at, size_t length)
-{
-  rb_hash_aset((VALUE)data, global_rack_url_scheme, rb_str_new(at, length));
-}
-
-static void host(void *data, const char *at, size_t length)
-{
-  rb_hash_aset((VALUE)data, global_http_host, rb_str_new(at, length));
-}
-
-static void request_uri(void *data, const char *at, size_t length)
-{
-  VALUE req = (VALUE)data;
-  VALUE val = Qnil;
-
-  VALIDATE_MAX_LENGTH(length, REQUEST_URI);
-
-  val = rb_str_new(at, length);
-  rb_hash_aset(req, global_request_uri, val);
-
-  /* "OPTIONS * HTTP/1.1\r\n" is a valid request */
-  if (length == 1 && *at == '*') {
-    val = rb_str_new(NULL, 0);
-    rb_hash_aset(req, global_request_path, val);
-    rb_hash_aset(req, global_path_info, val);
-  }
-}
-
-static void fragment(void *data, const char *at, size_t length)
-{
-  VALUE req = (VALUE)data;
-  VALUE val = Qnil;
-
-  VALIDATE_MAX_LENGTH(length, FRAGMENT);
-
-  val = rb_str_new(at, length);
-  rb_hash_aset(req, global_fragment, val);
-}
-
-static void request_path(void *data, const char *at, size_t length)
-{
-  VALUE req = (VALUE)data;
-  VALUE val = Qnil;
-
-  VALIDATE_MAX_LENGTH(length, REQUEST_PATH);
-
-  val = rb_str_new(at, length);
-  rb_hash_aset(req, global_request_path, val);
-
-  /* rack says PATH_INFO must start with "/" or be empty */
-  if (!(length == 1 && *at == '*'))
-    rb_hash_aset(req, global_path_info, val);
-}
-
-static void query_string(void *data, const char *at, size_t length)
-{
-  VALUE req = (VALUE)data;
-  VALUE val = Qnil;
-
-  VALIDATE_MAX_LENGTH(length, QUERY_STRING);
-
-  val = rb_str_new(at, length);
-  rb_hash_aset(req, global_query_string, val);
-}
-
-static void http_version(void *data, const char *at, size_t length)
-{
-  VALUE req = (VALUE)data;
-  VALUE val = rb_str_new(at, length);
-  rb_hash_aset(req, global_http_version, val);
-}
-
-/** Finalizes the request header to have a bunch of stuff that's needed. */
-static void header_done(void *data, const char *at, size_t length)
-{
-  VALUE req = (VALUE)data;
-  VALUE server_name = global_localhost;
-  VALUE server_port = global_port_80;
-  VALUE temp;
-
-  /* rack requires QUERY_STRING */
-  if (rb_hash_aref(req, global_query_string) == Qnil)
-    rb_hash_aset(req, global_query_string, rb_str_new(NULL, 0));
-
-  /* set rack.url_scheme to "https" or "http", no others are allowed by Rack */
-  if ((temp = rb_hash_aref(req, global_rack_url_scheme)) == Qnil) {
-    if ((temp = rb_hash_aref(req, global_http_x_forwarded_proto)) != Qnil &&
-        RSTRING_LEN(temp) == 5 &&
-        !memcmp("https", RSTRING_PTR(temp), 5))
-      server_port = global_port_443;
-    else
-      temp = global_http;
-    rb_hash_aset(req, global_rack_url_scheme, temp);
-  } else if (RSTRING_LEN(temp) == 5 && !memcmp("https", RSTRING_PTR(temp), 5)) {
-    server_port = global_port_443;
-  }
-
-  /* parse and set the SERVER_NAME and SERVER_PORT variables */
-  if ((temp = rb_hash_aref(req, global_http_host)) != Qnil) {
-    char *colon = memchr(RSTRING_PTR(temp), ':', RSTRING_LEN(temp));
-    if (colon) {
-      long port_start = colon - RSTRING_PTR(temp) + 1;
-
-      server_name = rb_str_substr(temp, 0, colon - RSTRING_PTR(temp));
-      if ((RSTRING_LEN(temp) - port_start) > 0)
-        server_port = rb_str_substr(temp, port_start, RSTRING_LEN(temp));
-    } else {
-      server_name = temp;
-    }
-  }
-  rb_hash_aset(req, global_server_name, server_name);
-  rb_hash_aset(req, global_server_port, server_port);
-
-  /* grab the initial body and stuff it into the hash */
-  rb_hash_aset(req, sym_http_body, rb_str_new(at, length));
-  rb_hash_aset(req, global_server_protocol, global_server_protocol_value);
-}
-
-static void HttpParser_free(void *data) {
-  TRACE();
-
-  if(data) {
-    free(data);
-  }
-}
-
-
-static VALUE HttpParser_alloc(VALUE klass)
-{
-  VALUE obj;
-  http_parser *hp = ALLOC_N(http_parser, 1);
-  TRACE();
-  http_parser_init(hp);
-
-  obj = Data_Wrap_Struct(klass, NULL, HttpParser_free, hp);
-
-  return obj;
-}
-
-
-/**
- * call-seq:
- *    parser.new -> parser
- *
- * Creates a new parser.
- */
-static VALUE HttpParser_init(VALUE self)
-{
-  http_parser_init(data_get(self));
-
-  return self;
-}
-
-
-/**
- * call-seq:
- *    parser.reset -> nil
- *
- * Resets the parser to it's initial state so that you can reuse it
- * rather than making new ones.
- */
-static VALUE HttpParser_reset(VALUE self)
-{
-  http_parser_init(data_get(self));
-
-  return Qnil;
-}
-
-
-/**
- * call-seq:
- *    parser.execute(req_hash, data) -> true/false
- *
- * Takes a Hash and a String of data, parses the String of data filling
- * in the Hash returning a boolean to indicate whether or not parsing
- * is finished.
- *
- * This function now throws an exception when there is a parsing error.
- * This makes the logic for working with the parser much easier.  You
- * will need to wrap the parser with an exception handling block.
- */
-
-static VALUE HttpParser_execute(VALUE self, VALUE req_hash, VALUE data)
-{
-  http_parser *http = data_get(self);
-  char *dptr = RSTRING_PTR(data);
-  long dlen = RSTRING_LEN(data);
-
-  if (http->nread < dlen) {
-    http->data = (void *)req_hash;
-    http_parser_execute(http, dptr, dlen);
-
-    VALIDATE_MAX_LENGTH(http->nread, HEADER);
-
-    if (!http_parser_has_error(http))
-      return http_parser_is_finished(http) ? Qtrue : Qfalse;
-
-    rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
-  }
-  rb_raise(eHttpParserError, "Requested start is after data buffer end.");
-}
-
-void Init_http11(void)
-{
-  mUnicorn = rb_define_module("Unicorn");
-
-  DEF_GLOBAL(rack_url_scheme, "rack.url_scheme");
-  DEF_GLOBAL(request_method, "REQUEST_METHOD");
-  DEF_GLOBAL(request_uri, "REQUEST_URI");
-  DEF_GLOBAL(fragment, "FRAGMENT");
-  DEF_GLOBAL(query_string, "QUERY_STRING");
-  DEF_GLOBAL(http_version, "HTTP_VERSION");
-  DEF_GLOBAL(request_path, "REQUEST_PATH");
-  DEF_GLOBAL(path_info, "PATH_INFO");
-  DEF_GLOBAL(server_name, "SERVER_NAME");
-  DEF_GLOBAL(server_port, "SERVER_PORT");
-  DEF_GLOBAL(server_protocol, "SERVER_PROTOCOL");
-  DEF_GLOBAL(server_protocol_value, "HTTP/1.1");
-  DEF_GLOBAL(http_x_forwarded_proto, "HTTP_X_FORWARDED_PROTO");
-  DEF_GLOBAL(port_80, "80");
-  DEF_GLOBAL(port_443, "443");
-  DEF_GLOBAL(localhost, "localhost");
-  DEF_GLOBAL(http, "http");
-
-  eHttpParserError = rb_define_class_under(mUnicorn, "HttpParserError", rb_eIOError);
-
-  cHttpParser = rb_define_class_under(mUnicorn, "HttpParser", rb_cObject);
-  rb_define_alloc_func(cHttpParser, HttpParser_alloc);
-  rb_define_method(cHttpParser, "initialize", HttpParser_init,0);
-  rb_define_method(cHttpParser, "reset", HttpParser_reset,0);
-  rb_define_method(cHttpParser, "execute", HttpParser_execute,2);
-  sym_http_body = ID2SYM(rb_intern("http_body"));
-  init_common_fields();
-  global_http_host = find_common_field_value("HOST", 4);
-  assert(global_http_host != Qnil);
-}
diff --git a/ext/unicorn/http11/http11_parser.h b/ext/unicorn/http11/http11_parser.h
deleted file mode 100644
index 8d95c59..0000000
--- a/ext/unicorn/http11/http11_parser.h
+++ /dev/null
@@ -1,1289 +0,0 @@
-
-#line 1 "http11_parser.rl"
-/**
- * Copyright (c) 2005 Zed A. Shaw
- * You can redistribute it and/or modify it under the same terms as Ruby.
- */
-#ifndef http11_parser_h
-#define http11_parser_h
-
-#include <sys/types.h>
-
-static void http_field(void *data, const char *field,
-                       size_t flen, const char *value, size_t vlen);
-static void request_method(void *data, const char *at, size_t length);
-static void scheme(void *data, const char *at, size_t length);
-static void host(void *data, const char *at, size_t length);
-static void request_uri(void *data, const char *at, size_t length);
-static void fragment(void *data, const char *at, size_t length);
-static void request_path(void *data, const char *at, size_t length);
-static void query_string(void *data, const char *at, size_t length);
-static void http_version(void *data, const char *at, size_t length);
-static void header_done(void *data, const char *at, size_t length);
-
-typedef struct http_parser {
-  int cs;
-  size_t body_start;
-  size_t nread;
-  size_t mark;
-  size_t field_start;
-  size_t field_len;
-  size_t query_start;
-
-  void *data;
-} http_parser;
-
-static int http_parser_has_error(http_parser *parser);
-static int http_parser_is_finished(http_parser *parser);
-
-/*
- * capitalizes all lower-case ASCII characters,
- * converts dashes to underscores.
- */
-static void snake_upcase_char(char *c)
-{
-  if (*c >= 'a' && *c <= 'z')
-    *c &= ~0x20;
-  else if (*c == '-')
-    *c = '_';
-}
-
-static void downcase_char(char *c)
-{
-  if (*c >= 'A' && *c <= 'Z')
-    *c |= 0x20;
-}
-
-#define LEN(AT, FPC) (FPC - buffer - parser->AT)
-#define MARK(M,FPC) (parser->M = (FPC) - buffer)
-#define PTR_TO(F) (buffer + parser->F)
-
-/** Machine **/
-
-
-#line 109 "http11_parser.rl"
-
-
-/** Data **/
-
-#line 70 "http11_parser.h"
-static const int http_parser_start = 1;
-static const int http_parser_first_final = 63;
-static const int http_parser_error = 0;
-
-static const int http_parser_en_main = 1;
-
-
-#line 113 "http11_parser.rl"
-
-static void http_parser_init(http_parser *parser) {
-  int cs = 0;
-  memset(parser, 0, sizeof(*parser));
-
-#line 84 "http11_parser.h"
-        {
-        cs = http_parser_start;
-        }
-
-#line 118 "http11_parser.rl"
-  parser->cs = cs;
-}
-
-/** exec **/
-static void http_parser_execute(
-  http_parser *parser, const char *buffer, size_t len)
-{
-  const char *p, *pe;
-  int cs = parser->cs;
-  size_t off = parser->nread;
-
-  assert(off <= len && "offset past end of buffer");
-
-  p = buffer+off;
-  pe = buffer+len;
-
-  assert(*pe == '\0' && "pointer does not end on NUL");
-  assert(pe - p == len - off && "pointers aren't same distance");
-
-
-#line 110 "http11_parser.h"
-        {
-        if ( p == pe )
-                goto _test_eof;
-        switch ( cs )
-        {
-case 1:
-        switch( (*p) ) {
-                case 36: goto tr0;
-                case 95: goto tr0;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto tr0;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto tr0;
-        } else
-                goto tr0;
-        goto st0;
-st0:
-cs = 0;
-        goto _out;
-tr0:
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-        goto st2;
-st2:
-        if ( ++p == pe )
-                goto _test_eof2;
-case 2:
-#line 141 "http11_parser.h"
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st44;
-                case 95: goto st44;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st44;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st44;
-        } else
-                goto st44;
-        goto st0;
-tr2:
-#line 77 "http11_parser.rl"
-        {
-    request_method(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st3;
-st3:
-        if ( ++p == pe )
-                goto _test_eof3;
-case 3:
-#line 166 "http11_parser.h"
-        switch( (*p) ) {
-                case 42: goto tr4;
-                case 47: goto tr5;
-                case 72: goto tr6;
-                case 104: goto tr6;
-        }
-        goto st0;
-tr4:
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-        goto st4;
-st4:
-        if ( ++p == pe )
-                goto _test_eof4;
-case 4:
-#line 182 "http11_parser.h"
-        switch( (*p) ) {
-                case 32: goto tr7;
-                case 35: goto tr8;
-        }
-        goto st0;
-tr7:
-#line 82 "http11_parser.rl"
-        {
-    request_uri(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st5;
-tr30:
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-#line 85 "http11_parser.rl"
-        {
-    fragment(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st5;
-tr33:
-#line 85 "http11_parser.rl"
-        {
-    fragment(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st5;
-tr37:
-#line 98 "http11_parser.rl"
-        {
-    request_path(parser->data, PTR_TO(mark), LEN(mark,p));
-  }
-#line 82 "http11_parser.rl"
-        {
-    request_uri(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st5;
-tr48:
-#line 89 "http11_parser.rl"
-        {MARK(query_start, p); }
-#line 90 "http11_parser.rl"
-        {
-    query_string(parser->data, PTR_TO(query_start), LEN(query_start, p));
-  }
-#line 82 "http11_parser.rl"
-        {
-    request_uri(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st5;
-tr52:
-#line 90 "http11_parser.rl"
-        {
-    query_string(parser->data, PTR_TO(query_start), LEN(query_start, p));
-  }
-#line 82 "http11_parser.rl"
-        {
-    request_uri(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st5;
-st5:
-        if ( ++p == pe )
-                goto _test_eof5;
-case 5:
-#line 244 "http11_parser.h"
-        if ( (*p) == 72 )
-                goto tr9;
-        goto st0;
-tr9:
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-        goto st6;
-st6:
-        if ( ++p == pe )
-                goto _test_eof6;
-case 6:
-#line 256 "http11_parser.h"
-        if ( (*p) == 84 )
-                goto st7;
-        goto st0;
-st7:
-        if ( ++p == pe )
-                goto _test_eof7;
-case 7:
-        if ( (*p) == 84 )
-                goto st8;
-        goto st0;
-st8:
-        if ( ++p == pe )
-                goto _test_eof8;
-case 8:
-        if ( (*p) == 80 )
-                goto st9;
-        goto st0;
-st9:
-        if ( ++p == pe )
-                goto _test_eof9;
-case 9:
-        if ( (*p) == 47 )
-                goto st10;
-        goto st0;
-st10:
-        if ( ++p == pe )
-                goto _test_eof10;
-case 10:
-        if ( 48 <= (*p) && (*p) <= 57 )
-                goto st11;
-        goto st0;
-st11:
-        if ( ++p == pe )
-                goto _test_eof11;
-case 11:
-        if ( (*p) == 46 )
-                goto st12;
-        if ( 48 <= (*p) && (*p) <= 57 )
-                goto st11;
-        goto st0;
-st12:
-        if ( ++p == pe )
-                goto _test_eof12;
-case 12:
-        if ( 48 <= (*p) && (*p) <= 57 )
-                goto st13;
-        goto st0;
-st13:
-        if ( ++p == pe )
-                goto _test_eof13;
-case 13:
-        if ( (*p) == 13 )
-                goto tr17;
-        if ( 48 <= (*p) && (*p) <= 57 )
-                goto st13;
-        goto st0;
-tr17:
-#line 94 "http11_parser.rl"
-        {
-    http_version(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st14;
-tr25:
-#line 73 "http11_parser.rl"
-        { MARK(mark, p); }
-#line 74 "http11_parser.rl"
-        {
-    http_field(parser->data, PTR_TO(field_start), parser->field_len, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st14;
-tr28:
-#line 74 "http11_parser.rl"
-        {
-    http_field(parser->data, PTR_TO(field_start), parser->field_len, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st14;
-st14:
-        if ( ++p == pe )
-                goto _test_eof14;
-case 14:
-#line 337 "http11_parser.h"
-        if ( (*p) == 10 )
-                goto st15;
-        goto st0;
-st15:
-        if ( ++p == pe )
-                goto _test_eof15;
-case 15:
-        switch( (*p) ) {
-                case 13: goto st16;
-                case 33: goto tr20;
-                case 124: goto tr20;
-                case 126: goto tr20;
-        }
-        if ( (*p) < 45 ) {
-                if ( (*p) > 39 ) {
-                        if ( 42 <= (*p) && (*p) <= 43 )
-                                goto tr20;
-                } else if ( (*p) >= 35 )
-                        goto tr20;
-        } else if ( (*p) > 46 ) {
-                if ( (*p) < 65 ) {
-                        if ( 48 <= (*p) && (*p) <= 57 )
-                                goto tr20;
-                } else if ( (*p) > 90 ) {
-                        if ( 94 <= (*p) && (*p) <= 122 )
-                                goto tr20;
-                } else
-                        goto tr20;
-        } else
-                goto tr20;
-        goto st0;
-st16:
-        if ( ++p == pe )
-                goto _test_eof16;
-case 16:
-        if ( (*p) == 10 )
-                goto tr21;
-        goto st0;
-tr21:
-#line 102 "http11_parser.rl"
-        {
-    parser->body_start = p - buffer + 1;
-    header_done(parser->data, p + 1, pe - p - 1);
-    {p++; cs = 63; goto _out;}
-  }
-        goto st63;
-st63:
-        if ( ++p == pe )
-                goto _test_eof63;
-case 63:
-#line 388 "http11_parser.h"
-        goto st0;
-tr20:
-#line 66 "http11_parser.rl"
-        { MARK(field_start, p); }
-#line 67 "http11_parser.rl"
-        { snake_upcase_char((char *)p); }
-        goto st17;
-tr22:
-#line 67 "http11_parser.rl"
-        { snake_upcase_char((char *)p); }
-        goto st17;
-st17:
-        if ( ++p == pe )
-                goto _test_eof17;
-case 17:
-#line 404 "http11_parser.h"
-        switch( (*p) ) {
-                case 33: goto tr22;
-                case 58: goto tr23;
-                case 124: goto tr22;
-                case 126: goto tr22;
-        }
-        if ( (*p) < 45 ) {
-                if ( (*p) > 39 ) {
-                        if ( 42 <= (*p) && (*p) <= 43 )
-                                goto tr22;
-                } else if ( (*p) >= 35 )
-                        goto tr22;
-        } else if ( (*p) > 46 ) {
-                if ( (*p) < 65 ) {
-                        if ( 48 <= (*p) && (*p) <= 57 )
-                                goto tr22;
-                } else if ( (*p) > 90 ) {
-                        if ( 94 <= (*p) && (*p) <= 122 )
-                                goto tr22;
-                } else
-                        goto tr22;
-        } else
-                goto tr22;
-        goto st0;
-tr23:
-#line 69 "http11_parser.rl"
-        {
-    parser->field_len = LEN(field_start, p);
-  }
-        goto st18;
-tr26:
-#line 73 "http11_parser.rl"
-        { MARK(mark, p); }
-        goto st18;
-st18:
-        if ( ++p == pe )
-                goto _test_eof18;
-case 18:
-#line 443 "http11_parser.h"
-        switch( (*p) ) {
-                case 13: goto tr25;
-                case 32: goto tr26;
-        }
-        goto tr24;
-tr24:
-#line 73 "http11_parser.rl"
-        { MARK(mark, p); }
-        goto st19;
-st19:
-        if ( ++p == pe )
-                goto _test_eof19;
-case 19:
-#line 457 "http11_parser.h"
-        if ( (*p) == 13 )
-                goto tr28;
-        goto st19;
-tr8:
-#line 82 "http11_parser.rl"
-        {
-    request_uri(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st20;
-tr38:
-#line 98 "http11_parser.rl"
-        {
-    request_path(parser->data, PTR_TO(mark), LEN(mark,p));
-  }
-#line 82 "http11_parser.rl"
-        {
-    request_uri(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st20;
-tr49:
-#line 89 "http11_parser.rl"
-        {MARK(query_start, p); }
-#line 90 "http11_parser.rl"
-        {
-    query_string(parser->data, PTR_TO(query_start), LEN(query_start, p));
-  }
-#line 82 "http11_parser.rl"
-        {
-    request_uri(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st20;
-tr53:
-#line 90 "http11_parser.rl"
-        {
-    query_string(parser->data, PTR_TO(query_start), LEN(query_start, p));
-  }
-#line 82 "http11_parser.rl"
-        {
-    request_uri(parser->data, PTR_TO(mark), LEN(mark, p));
-  }
-        goto st20;
-st20:
-        if ( ++p == pe )
-                goto _test_eof20;
-case 20:
-#line 503 "http11_parser.h"
-        switch( (*p) ) {
-                case 32: goto tr30;
-                case 35: goto st0;
-                case 37: goto tr31;
-                case 127: goto st0;
-        }
-        if ( 0 <= (*p) && (*p) <= 31 )
-                goto st0;
-        goto tr29;
-tr29:
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-        goto st21;
-st21:
-        if ( ++p == pe )
-                goto _test_eof21;
-case 21:
-#line 521 "http11_parser.h"
-        switch( (*p) ) {
-                case 32: goto tr33;
-                case 35: goto st0;
-                case 37: goto st22;
-                case 127: goto st0;
-        }
-        if ( 0 <= (*p) && (*p) <= 31 )
-                goto st0;
-        goto st21;
-tr31:
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-        goto st22;
-st22:
-        if ( ++p == pe )
-                goto _test_eof22;
-case 22:
-#line 539 "http11_parser.h"
-        if ( (*p) < 65 ) {
-                if ( 48 <= (*p) && (*p) <= 57 )
-                        goto st23;
-        } else if ( (*p) > 70 ) {
-                if ( 97 <= (*p) && (*p) <= 102 )
-                        goto st23;
-        } else
-                goto st23;
-        goto st0;
-st23:
-        if ( ++p == pe )
-                goto _test_eof23;
-case 23:
-        if ( (*p) < 65 ) {
-                if ( 48 <= (*p) && (*p) <= 57 )
-                        goto st21;
-        } else if ( (*p) > 70 ) {
-                if ( 97 <= (*p) && (*p) <= 102 )
-                        goto st21;
-        } else
-                goto st21;
-        goto st0;
-tr5:
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-        goto st24;
-tr65:
-#line 81 "http11_parser.rl"
-        { host(parser->data, PTR_TO(mark), LEN(mark, p)); }
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-        goto st24;
-st24:
-        if ( ++p == pe )
-                goto _test_eof24;
-case 24:
-#line 576 "http11_parser.h"
-        switch( (*p) ) {
-                case 32: goto tr37;
-                case 35: goto tr38;
-                case 37: goto st25;
-                case 59: goto tr40;
-                case 63: goto tr41;
-                case 127: goto st0;
-        }
-        if ( 0 <= (*p) && (*p) <= 31 )
-                goto st0;
-        goto st24;
-st25:
-        if ( ++p == pe )
-                goto _test_eof25;
-case 25:
-        if ( (*p) < 65 ) {
-                if ( 48 <= (*p) && (*p) <= 57 )
-                        goto st26;
-        } else if ( (*p) > 70 ) {
-                if ( 97 <= (*p) && (*p) <= 102 )
-                        goto st26;
-        } else
-                goto st26;
-        goto st0;
-st26:
-        if ( ++p == pe )
-                goto _test_eof26;
-case 26:
-        if ( (*p) < 65 ) {
-                if ( 48 <= (*p) && (*p) <= 57 )
-                        goto st24;
-        } else if ( (*p) > 70 ) {
-                if ( 97 <= (*p) && (*p) <= 102 )
-                        goto st24;
-        } else
-                goto st24;
-        goto st0;
-tr40:
-#line 98 "http11_parser.rl"
-        {
-    request_path(parser->data, PTR_TO(mark), LEN(mark,p));
-  }
-        goto st27;
-st27:
-        if ( ++p == pe )
-                goto _test_eof27;
-case 27:
-#line 624 "http11_parser.h"
-        switch( (*p) ) {
-                case 32: goto tr7;
-                case 35: goto tr8;
-                case 37: goto st28;
-                case 63: goto st30;
-                case 127: goto st0;
-        }
-        if ( 0 <= (*p) && (*p) <= 31 )
-                goto st0;
-        goto st27;
-st28:
-        if ( ++p == pe )
-                goto _test_eof28;
-case 28:
-        if ( (*p) < 65 ) {
-                if ( 48 <= (*p) && (*p) <= 57 )
-                        goto st29;
-        } else if ( (*p) > 70 ) {
-                if ( 97 <= (*p) && (*p) <= 102 )
-                        goto st29;
-        } else
-                goto st29;
-        goto st0;
-st29:
-        if ( ++p == pe )
-                goto _test_eof29;
-case 29:
-        if ( (*p) < 65 ) {
-                if ( 48 <= (*p) && (*p) <= 57 )
-                        goto st27;
-        } else if ( (*p) > 70 ) {
-                if ( 97 <= (*p) && (*p) <= 102 )
-                        goto st27;
-        } else
-                goto st27;
-        goto st0;
-tr41:
-#line 98 "http11_parser.rl"
-        {
-    request_path(parser->data, PTR_TO(mark), LEN(mark,p));
-  }
-        goto st30;
-st30:
-        if ( ++p == pe )
-                goto _test_eof30;
-case 30:
-#line 671 "http11_parser.h"
-        switch( (*p) ) {
-                case 32: goto tr48;
-                case 35: goto tr49;
-                case 37: goto tr50;
-                case 127: goto st0;
-        }
-        if ( 0 <= (*p) && (*p) <= 31 )
-                goto st0;
-        goto tr47;
-tr47:
-#line 89 "http11_parser.rl"
-        {MARK(query_start, p); }
-        goto st31;
-st31:
-        if ( ++p == pe )
-                goto _test_eof31;
-case 31:
-#line 689 "http11_parser.h"
-        switch( (*p) ) {
-                case 32: goto tr52;
-                case 35: goto tr53;
-                case 37: goto st32;
-                case 127: goto st0;
-        }
-        if ( 0 <= (*p) && (*p) <= 31 )
-                goto st0;
-        goto st31;
-tr50:
-#line 89 "http11_parser.rl"
-        {MARK(query_start, p); }
-        goto st32;
-st32:
-        if ( ++p == pe )
-                goto _test_eof32;
-case 32:
-#line 707 "http11_parser.h"
-        if ( (*p) < 65 ) {
-                if ( 48 <= (*p) && (*p) <= 57 )
-                        goto st33;
-        } else if ( (*p) > 70 ) {
-                if ( 97 <= (*p) && (*p) <= 102 )
-                        goto st33;
-        } else
-                goto st33;
-        goto st0;
-st33:
-        if ( ++p == pe )
-                goto _test_eof33;
-case 33:
-        if ( (*p) < 65 ) {
-                if ( 48 <= (*p) && (*p) <= 57 )
-                        goto st31;
-        } else if ( (*p) > 70 ) {
-                if ( 97 <= (*p) && (*p) <= 102 )
-                        goto st31;
-        } else
-                goto st31;
-        goto st0;
-tr6:
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-#line 68 "http11_parser.rl"
-        { downcase_char((char *)p); }
-        goto st34;
-st34:
-        if ( ++p == pe )
-                goto _test_eof34;
-case 34:
-#line 740 "http11_parser.h"
-        switch( (*p) ) {
-                case 84: goto tr56;
-                case 116: goto tr56;
-        }
-        goto st0;
-tr56:
-#line 68 "http11_parser.rl"
-        { downcase_char((char *)p); }
-        goto st35;
-st35:
-        if ( ++p == pe )
-                goto _test_eof35;
-case 35:
-#line 754 "http11_parser.h"
-        switch( (*p) ) {
-                case 84: goto tr57;
-                case 116: goto tr57;
-        }
-        goto st0;
-tr57:
-#line 68 "http11_parser.rl"
-        { downcase_char((char *)p); }
-        goto st36;
-st36:
-        if ( ++p == pe )
-                goto _test_eof36;
-case 36:
-#line 768 "http11_parser.h"
-        switch( (*p) ) {
-                case 80: goto tr58;
-                case 112: goto tr58;
-        }
-        goto st0;
-tr58:
-#line 68 "http11_parser.rl"
-        { downcase_char((char *)p); }
-        goto st37;
-st37:
-        if ( ++p == pe )
-                goto _test_eof37;
-case 37:
-#line 782 "http11_parser.h"
-        switch( (*p) ) {
-                case 58: goto tr59;
-                case 83: goto tr60;
-                case 115: goto tr60;
-        }
-        goto st0;
-tr59:
-#line 80 "http11_parser.rl"
-        { scheme(parser->data, PTR_TO(mark), LEN(mark, p)); }
-        goto st38;
-st38:
-        if ( ++p == pe )
-                goto _test_eof38;
-case 38:
-#line 797 "http11_parser.h"
-        if ( (*p) == 47 )
-                goto st39;
-        goto st0;
-st39:
-        if ( ++p == pe )
-                goto _test_eof39;
-case 39:
-        if ( (*p) == 47 )
-                goto st40;
-        goto st0;
-st40:
-        if ( ++p == pe )
-                goto _test_eof40;
-case 40:
-        if ( (*p) == 95 )
-                goto tr63;
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto tr63;
-        } else if ( (*p) > 57 ) {
-                if ( (*p) > 90 ) {
-                        if ( 97 <= (*p) && (*p) <= 122 )
-                                goto tr63;
-                } else if ( (*p) >= 65 )
-                        goto tr63;
-        } else
-                goto tr63;
-        goto st0;
-tr63:
-#line 64 "http11_parser.rl"
-        {MARK(mark, p); }
-        goto st41;
-st41:
-        if ( ++p == pe )
-                goto _test_eof41;
-case 41:
-#line 834 "http11_parser.h"
-        switch( (*p) ) {
-                case 47: goto tr65;
-                case 58: goto st42;
-                case 95: goto st41;
-        }
-        if ( (*p) < 65 ) {
-                if ( 45 <= (*p) && (*p) <= 57 )
-                        goto st41;
-        } else if ( (*p) > 90 ) {
-                if ( 97 <= (*p) && (*p) <= 122 )
-                        goto st41;
-        } else
-                goto st41;
-        goto st0;
-st42:
-        if ( ++p == pe )
-                goto _test_eof42;
-case 42:
-        if ( (*p) == 47 )
-                goto tr65;
-        if ( 48 <= (*p) && (*p) <= 57 )
-                goto st42;
-        goto st0;
-tr60:
-#line 68 "http11_parser.rl"
-        { downcase_char((char *)p); }
-        goto st43;
-st43:
-        if ( ++p == pe )
-                goto _test_eof43;
-case 43:
-#line 866 "http11_parser.h"
-        if ( (*p) == 58 )
-                goto tr59;
-        goto st0;
-st44:
-        if ( ++p == pe )
-                goto _test_eof44;
-case 44:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st45;
-                case 95: goto st45;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st45;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st45;
-        } else
-                goto st45;
-        goto st0;
-st45:
-        if ( ++p == pe )
-                goto _test_eof45;
-case 45:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st46;
-                case 95: goto st46;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st46;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st46;
-        } else
-                goto st46;
-        goto st0;
-st46:
-        if ( ++p == pe )
-                goto _test_eof46;
-case 46:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st47;
-                case 95: goto st47;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st47;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st47;
-        } else
-                goto st47;
-        goto st0;
-st47:
-        if ( ++p == pe )
-                goto _test_eof47;
-case 47:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st48;
-                case 95: goto st48;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st48;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st48;
-        } else
-                goto st48;
-        goto st0;
-st48:
-        if ( ++p == pe )
-                goto _test_eof48;
-case 48:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st49;
-                case 95: goto st49;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st49;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st49;
-        } else
-                goto st49;
-        goto st0;
-st49:
-        if ( ++p == pe )
-                goto _test_eof49;
-case 49:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st50;
-                case 95: goto st50;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st50;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st50;
-        } else
-                goto st50;
-        goto st0;
-st50:
-        if ( ++p == pe )
-                goto _test_eof50;
-case 50:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st51;
-                case 95: goto st51;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st51;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st51;
-        } else
-                goto st51;
-        goto st0;
-st51:
-        if ( ++p == pe )
-                goto _test_eof51;
-case 51:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st52;
-                case 95: goto st52;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st52;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st52;
-        } else
-                goto st52;
-        goto st0;
-st52:
-        if ( ++p == pe )
-                goto _test_eof52;
-case 52:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st53;
-                case 95: goto st53;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st53;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st53;
-        } else
-                goto st53;
-        goto st0;
-st53:
-        if ( ++p == pe )
-                goto _test_eof53;
-case 53:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st54;
-                case 95: goto st54;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st54;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st54;
-        } else
-                goto st54;
-        goto st0;
-st54:
-        if ( ++p == pe )
-                goto _test_eof54;
-case 54:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st55;
-                case 95: goto st55;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st55;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st55;
-        } else
-                goto st55;
-        goto st0;
-st55:
-        if ( ++p == pe )
-                goto _test_eof55;
-case 55:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st56;
-                case 95: goto st56;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st56;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st56;
-        } else
-                goto st56;
-        goto st0;
-st56:
-        if ( ++p == pe )
-                goto _test_eof56;
-case 56:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st57;
-                case 95: goto st57;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st57;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st57;
-        } else
-                goto st57;
-        goto st0;
-st57:
-        if ( ++p == pe )
-                goto _test_eof57;
-case 57:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st58;
-                case 95: goto st58;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st58;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st58;
-        } else
-                goto st58;
-        goto st0;
-st58:
-        if ( ++p == pe )
-                goto _test_eof58;
-case 58:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st59;
-                case 95: goto st59;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st59;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st59;
-        } else
-                goto st59;
-        goto st0;
-st59:
-        if ( ++p == pe )
-                goto _test_eof59;
-case 59:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st60;
-                case 95: goto st60;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st60;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st60;
-        } else
-                goto st60;
-        goto st0;
-st60:
-        if ( ++p == pe )
-                goto _test_eof60;
-case 60:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st61;
-                case 95: goto st61;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st61;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st61;
-        } else
-                goto st61;
-        goto st0;
-st61:
-        if ( ++p == pe )
-                goto _test_eof61;
-case 61:
-        switch( (*p) ) {
-                case 32: goto tr2;
-                case 36: goto st62;
-                case 95: goto st62;
-        }
-        if ( (*p) < 48 ) {
-                if ( 45 <= (*p) && (*p) <= 46 )
-                        goto st62;
-        } else if ( (*p) > 57 ) {
-                if ( 65 <= (*p) && (*p) <= 90 )
-                        goto st62;
-        } else
-                goto st62;
-        goto st0;
-st62:
-        if ( ++p == pe )
-                goto _test_eof62;
-case 62:
-        if ( (*p) == 32 )
-                goto tr2;
-        goto st0;
-        }
-        _test_eof2: cs = 2; goto _test_eof;
-        _test_eof3: cs = 3; goto _test_eof;
-        _test_eof4: cs = 4; goto _test_eof;
-        _test_eof5: cs = 5; goto _test_eof;
-        _test_eof6: cs = 6; goto _test_eof;
-        _test_eof7: cs = 7; goto _test_eof;
-        _test_eof8: cs = 8; goto _test_eof;
-        _test_eof9: cs = 9; goto _test_eof;
-        _test_eof10: cs = 10; goto _test_eof;
-        _test_eof11: cs = 11; goto _test_eof;
-        _test_eof12: cs = 12; goto _test_eof;
-        _test_eof13: cs = 13; goto _test_eof;
-        _test_eof14: cs = 14; goto _test_eof;
-        _test_eof15: cs = 15; goto _test_eof;
-        _test_eof16: cs = 16; goto _test_eof;
-        _test_eof63: cs = 63; goto _test_eof;
-        _test_eof17: cs = 17; goto _test_eof;
-        _test_eof18: cs = 18; goto _test_eof;
-        _test_eof19: cs = 19; goto _test_eof;
-        _test_eof20: cs = 20; goto _test_eof;
-        _test_eof21: cs = 21; goto _test_eof;
-        _test_eof22: cs = 22; goto _test_eof;
-        _test_eof23: cs = 23; goto _test_eof;
-        _test_eof24: cs = 24; goto _test_eof;
-        _test_eof25: cs = 25; goto _test_eof;
-        _test_eof26: cs = 26; goto _test_eof;
-        _test_eof27: cs = 27; goto _test_eof;
-        _test_eof28: cs = 28; goto _test_eof;
-        _test_eof29: cs = 29; goto _test_eof;
-        _test_eof30: cs = 30; goto _test_eof;
-        _test_eof31: cs = 31; goto _test_eof;
-        _test_eof32: cs = 32; goto _test_eof;
-        _test_eof33: cs = 33; goto _test_eof;
-        _test_eof34: cs = 34; goto _test_eof;
-        _test_eof35: cs = 35; goto _test_eof;
-        _test_eof36: cs = 36; goto _test_eof;
-        _test_eof37: cs = 37; goto _test_eof;
-        _test_eof38: cs = 38; goto _test_eof;
-        _test_eof39: cs = 39; goto _test_eof;
-        _test_eof40: cs = 40; goto _test_eof;
-        _test_eof41: cs = 41; goto _test_eof;
-        _test_eof42: cs = 42; goto _test_eof;
-        _test_eof43: cs = 43; goto _test_eof;
-        _test_eof44: cs = 44; goto _test_eof;
-        _test_eof45: cs = 45; goto _test_eof;
-        _test_eof46: cs = 46; goto _test_eof;
-        _test_eof47: cs = 47; goto _test_eof;
-        _test_eof48: cs = 48; goto _test_eof;
-        _test_eof49: cs = 49; goto _test_eof;
-        _test_eof50: cs = 50; goto _test_eof;
-        _test_eof51: cs = 51; goto _test_eof;
-        _test_eof52: cs = 52; goto _test_eof;
-        _test_eof53: cs = 53; goto _test_eof;
-        _test_eof54: cs = 54; goto _test_eof;
-        _test_eof55: cs = 55; goto _test_eof;
-        _test_eof56: cs = 56; goto _test_eof;
-        _test_eof57: cs = 57; goto _test_eof;
-        _test_eof58: cs = 58; goto _test_eof;
-        _test_eof59: cs = 59; goto _test_eof;
-        _test_eof60: cs = 60; goto _test_eof;
-        _test_eof61: cs = 61; goto _test_eof;
-        _test_eof62: cs = 62; goto _test_eof;
-
-        _test_eof: {}
-        _out: {}
-        }
-
-#line 138 "http11_parser.rl"
-
-  if (!http_parser_has_error(parser))
-    parser->cs = cs;
-  parser->nread += p - (buffer + off);
-
-  assert(p <= pe && "buffer overflow after parsing execute");
-  assert(parser->nread <= len && "nread longer than length");
-  assert(parser->body_start <= len && "body starts after buffer end");
-  assert(parser->mark < len && "mark is after buffer end");
-  assert(parser->field_len <= len && "field has length longer than whole buffer");
-  assert(parser->field_start < len && "field starts after buffer end");
-}
-
-static int http_parser_has_error(http_parser *parser) {
-  return parser->cs == http_parser_error;
-}
-
-static int http_parser_is_finished(http_parser *parser) {
-  return parser->cs == http_parser_first_final;
-}
-#endif /* http11_parser_h */
diff --git a/ext/unicorn/http11/http11_parser.rl b/ext/unicorn/http11/http11_parser.rl
deleted file mode 100644
index 9894276..0000000
--- a/ext/unicorn/http11/http11_parser.rl
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Copyright (c) 2005 Zed A. Shaw
- * You can redistribute it and/or modify it under the same terms as Ruby.
- */
-#ifndef http11_parser_h
-#define http11_parser_h
-
-#include <sys/types.h>
-
-static void http_field(void *data, const char *field,
-                       size_t flen, const char *value, size_t vlen);
-static void request_method(void *data, const char *at, size_t length);
-static void scheme(void *data, const char *at, size_t length);
-static void host(void *data, const char *at, size_t length);
-static void request_uri(void *data, const char *at, size_t length);
-static void fragment(void *data, const char *at, size_t length);
-static void request_path(void *data, const char *at, size_t length);
-static void query_string(void *data, const char *at, size_t length);
-static void http_version(void *data, const char *at, size_t length);
-static void header_done(void *data, const char *at, size_t length);
-
-typedef struct http_parser {
-  int cs;
-  size_t body_start;
-  size_t nread;
-  size_t mark;
-  size_t field_start;
-  size_t field_len;
-  size_t query_start;
-
-  void *data;
-} http_parser;
-
-static int http_parser_has_error(http_parser *parser);
-static int http_parser_is_finished(http_parser *parser);
-
-/*
- * capitalizes all lower-case ASCII characters,
- * converts dashes to underscores.
- */
-static void snake_upcase_char(char *c)
-{
-  if (*c >= 'a' && *c <= 'z')
-    *c &= ~0x20;
-  else if (*c == '-')
-    *c = '_';
-}
-
-static void downcase_char(char *c)
-{
-  if (*c >= 'A' && *c <= 'Z')
-    *c |= 0x20;
-}
-
-#define LEN(AT, FPC) (FPC - buffer - parser->AT)
-#define MARK(M,FPC) (parser->M = (FPC) - buffer)
-#define PTR_TO(F) (buffer + parser->F)
-
-/** Machine **/
-
-%%{
-  machine http_parser;
-
-  action mark {MARK(mark, fpc); }
-
-  action start_field { MARK(field_start, fpc); }
-  action snake_upcase_field { snake_upcase_char((char *)fpc); }
-  action downcase_char { downcase_char((char *)fpc); }
-  action write_field {
-    parser->field_len = LEN(field_start, fpc);
-  }
-
-  action start_value { MARK(mark, fpc); }
-  action write_value {
-    http_field(parser->data, PTR_TO(field_start), parser->field_len, PTR_TO(mark), LEN(mark, fpc));
-  }
-  action request_method {
-    request_method(parser->data, PTR_TO(mark), LEN(mark, fpc));
-  }
-  action scheme { scheme(parser->data, PTR_TO(mark), LEN(mark, fpc)); }
-  action host { host(parser->data, PTR_TO(mark), LEN(mark, fpc)); }
-  action request_uri {
-    request_uri(parser->data, PTR_TO(mark), LEN(mark, fpc));
-  }
-  action fragment {
-    fragment(parser->data, PTR_TO(mark), LEN(mark, fpc));
-  }
-
-  action start_query {MARK(query_start, fpc); }
-  action query_string {
-    query_string(parser->data, PTR_TO(query_start), LEN(query_start, fpc));
-  }
-
-  action http_version {
-    http_version(parser->data, PTR_TO(mark), LEN(mark, fpc));
-  }
-
-  action request_path {
-    request_path(parser->data, PTR_TO(mark), LEN(mark,fpc));
-  }
-
-  action done {
-    parser->body_start = fpc - buffer + 1;
-    header_done(parser->data, fpc + 1, pe - fpc - 1);
-    fbreak;
-  }
-
-  include http_parser_common "http11_parser_common.rl";
-}%%
-
-/** Data **/
-%% write data;
-
-static void http_parser_init(http_parser *parser) {
-  int cs = 0;
-  memset(parser, 0, sizeof(*parser));
-  %% write init;
-  parser->cs = cs;
-}
-
-/** exec **/
-static void http_parser_execute(
-  http_parser *parser, const char *buffer, size_t len)
-{
-  const char *p, *pe;
-  int cs = parser->cs;
-  size_t off = parser->nread;
-
-  assert(off <= len && "offset past end of buffer");
-
-  p = buffer+off;
-  pe = buffer+len;
-
-  assert(*pe == '\0' && "pointer does not end on NUL");
-  assert(pe - p == len - off && "pointers aren't same distance");
-
-  %% write exec;
-
-  if (!http_parser_has_error(parser))
-    parser->cs = cs;
-  parser->nread += p - (buffer + off);
-
-  assert(p <= pe && "buffer overflow after parsing execute");
-  assert(parser->nread <= len && "nread longer than length");
-  assert(parser->body_start <= len && "body starts after buffer end");
-  assert(parser->mark < len && "mark is after buffer end");
-  assert(parser->field_len <= len && "field has length longer than whole buffer");
-  assert(parser->field_start < len && "field starts after buffer end");
-}
-
-static int http_parser_has_error(http_parser *parser) {
-  return parser->cs == http_parser_error;
-}
-
-static int http_parser_is_finished(http_parser *parser) {
-  return parser->cs == http_parser_first_final;
-}
-#endif /* http11_parser_h */
diff --git a/ext/unicorn_http/c_util.h b/ext/unicorn_http/c_util.h
new file mode 100644
index 0000000..9e674fa
--- /dev/null
+++ b/ext/unicorn_http/c_util.h
@@ -0,0 +1,107 @@
+/*
+ * Generic C functions and macros go here, there are no dependencies
+ * on Unicorn internal structures or the Ruby C API in here.
+ */
+
+#ifndef UH_util_h
+#define UH_util_h
+
+#include <unistd.h>
+#include <assert.h>
+
+#define MIN(a,b) (a < b ? a : b)
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
+
+#ifndef SIZEOF_OFF_T
+#  define SIZEOF_OFF_T 4
+#  warning SIZEOF_OFF_T not defined, guessing 4.  Did you run extconf.rb?
+#endif
+
+#if SIZEOF_OFF_T == 4
+#  define UH_OFF_T_MAX 0x7fffffff
+#elif SIZEOF_OFF_T == 8
+#  if SIZEOF_LONG == 4
+#    define UH_OFF_T_MAX 0x7fffffffffffffffLL
+#  else
+#    define UH_OFF_T_MAX 0x7fffffffffffffff
+#  endif
+#else
+#  error off_t size unknown for this platform!
+#endif /* SIZEOF_OFF_T check */
+
+/*
+ * capitalizes all lower-case ASCII characters and converts dashes
+ * to underscores for HTTP headers.  Locale-agnostic.
+ */
+static void snake_upcase_char(char *c)
+{
+  if (*c >= 'a' && *c <= 'z')
+    *c &= ~0x20;
+  else if (*c == '-')
+    *c = '_';
+}
+
+/* Downcases a single ASCII character.  Locale-agnostic. */
+static void downcase_char(char *c)
+{
+  if (*c >= 'A' && *c <= 'Z')
+    *c |= 0x20;
+}
+
+static int hexchar2int(int xdigit)
+{
+  if (xdigit >= 'A' && xdigit <= 'F')
+    return xdigit - 'A' + 10;
+  if (xdigit >= 'a' && xdigit <= 'f')
+    return xdigit - 'a' + 10;
+
+  /* Ragel already does runtime range checking for us in Unicorn: */
+  assert(xdigit >= '0' && xdigit <= '9' && "invalid digit character");
+
+  return xdigit - '0';
+}
+
+/*
+ * multiplies +i+ by +base+ and increments the result by the parsed
+ * integer value of +xdigit+.  +xdigit+ is a character byte
+ * representing a number the range of 0..(base-1)
+ * returns the new value of +i+ on success
+ * returns -1 on errors (including overflow)
+ */
+static off_t step_incr(off_t i, int xdigit, const int base)
+{
+  static const off_t max = UH_OFF_T_MAX;
+  const off_t next_max = (max - (max % base)) / base;
+  off_t offset = hexchar2int(xdigit);
+
+  if (offset > (base - 1))
+    return -1;
+  if (i > next_max)
+    return -1;
+  i *= base;
+
+  if ((offset > (base - 1)) || ((max - i) < offset))
+    return -1;
+
+  return i + offset;
+}
+
+/*
+ * parses a non-negative length according to base-10 and
+ * returns it as an off_t value.  Returns -1 on errors
+ * (including overflow).
+ */
+static off_t parse_length(const char *value, size_t length)
+{
+  off_t rv;
+
+  for (rv = 0; length-- && rv >= 0; ++value)
+    rv = step_incr(rv, *value, 10);
+
+  return rv;
+}
+
+#define CONST_MEM_EQ(const_p, buf, len) \
+  ((sizeof(const_p) - 1) == len && !memcmp(const_p, buf, sizeof(const_p) - 1))
+
+#endif /* UH_util_h */
diff --git a/ext/unicorn_http/common_field_optimization.h b/ext/unicorn_http/common_field_optimization.h
new file mode 100644
index 0000000..850fb90
--- /dev/null
+++ b/ext/unicorn_http/common_field_optimization.h
@@ -0,0 +1,111 @@
+#ifndef common_field_optimization
+#define common_field_optimization
+#include "ruby.h"
+#include "c_util.h"
+
+struct common_field {
+  const signed long len;
+  const char *name;
+  VALUE value;
+};
+
+/*
+ * A list of common HTTP headers we expect to receive.
+ * This allows us to avoid repeatedly creating identical string
+ * objects to be used with rb_hash_aset().
+ */
+static struct common_field common_http_fields[] = {
+# define f(N) { (sizeof(N) - 1), N, Qnil }
+  f("ACCEPT"),
+  f("ACCEPT_CHARSET"),
+  f("ACCEPT_ENCODING"),
+  f("ACCEPT_LANGUAGE"),
+  f("ALLOW"),
+  f("AUTHORIZATION"),
+  f("CACHE_CONTROL"),
+  f("CONNECTION"),
+  f("CONTENT_ENCODING"),
+  f("CONTENT_LENGTH"),
+  f("CONTENT_TYPE"),
+  f("COOKIE"),
+  f("DATE"),
+  f("EXPECT"),
+  f("FROM"),
+  f("HOST"),
+  f("IF_MATCH"),
+  f("IF_MODIFIED_SINCE"),
+  f("IF_NONE_MATCH"),
+  f("IF_RANGE"),
+  f("IF_UNMODIFIED_SINCE"),
+  f("KEEP_ALIVE"), /* Firefox sends this */
+  f("MAX_FORWARDS"),
+  f("PRAGMA"),
+  f("PROXY_AUTHORIZATION"),
+  f("RANGE"),
+  f("REFERER"),
+  f("TE"),
+  f("TRAILER"),
+  f("TRANSFER_ENCODING"),
+  f("UPGRADE"),
+  f("USER_AGENT"),
+  f("VIA"),
+  f("X_FORWARDED_FOR"), /* common for proxies */
+  f("X_FORWARDED_PROTO"), /* common for proxies */
+  f("X_REAL_IP"), /* common for proxies */
+  f("WARNING")
+# undef f
+};
+
+#define HTTP_PREFIX "HTTP_"
+#define HTTP_PREFIX_LEN (sizeof(HTTP_PREFIX) - 1)
+
+/* this function is not performance-critical, called only at load time */
+static void init_common_fields(void)
+{
+  int i;
+  struct common_field *cf = common_http_fields;
+  char tmp[64];
+  memcpy(tmp, HTTP_PREFIX, HTTP_PREFIX_LEN);
+
+  for(i = 0; i < ARRAY_SIZE(common_http_fields); cf++, i++) {
+    /* Rack doesn't like certain headers prefixed with "HTTP_" */
+    if (!strcmp("CONTENT_LENGTH", cf->name) ||
+        !strcmp("CONTENT_TYPE", cf->name)) {
+      cf->value = rb_str_new(cf->name, cf->len);
+    } else {
+      memcpy(tmp + HTTP_PREFIX_LEN, cf->name, cf->len + 1);
+      cf->value = rb_str_new(tmp, HTTP_PREFIX_LEN + cf->len);
+    }
+    cf->value = rb_obj_freeze(cf->value);
+    rb_global_variable(&cf->value);
+  }
+}
+
+/* this function is called for every header set */
+static VALUE find_common_field(const char *field, size_t flen)
+{
+  int i;
+  struct common_field *cf = common_http_fields;
+
+  for(i = 0; i < ARRAY_SIZE(common_http_fields); i++, cf++) {
+    if (cf->len == flen && !memcmp(cf->name, field, flen))
+      return cf->value;
+  }
+  return Qnil;
+}
+
+/*
+ * We got a strange header that we don't have a memoized value for.
+ * Fallback to creating a new string to use as a hash key.
+ */
+static VALUE uncommon_field(const char *field, size_t flen)
+{
+  VALUE f = rb_str_new(NULL, HTTP_PREFIX_LEN + flen);
+  memcpy(RSTRING_PTR(f), HTTP_PREFIX, HTTP_PREFIX_LEN);
+  memcpy(RSTRING_PTR(f) + HTTP_PREFIX_LEN, field, flen);
+  assert(*(RSTRING_PTR(f) + RSTRING_LEN(f)) == '\0' &&
+         "string didn't end with \\0"); /* paranoia */
+  return rb_obj_freeze(f);
+}
+
+#endif /* common_field_optimization_h */
diff --git a/ext/unicorn_http/ext_help.h b/ext/unicorn_http/ext_help.h
new file mode 100644
index 0000000..7df73f7
--- /dev/null
+++ b/ext/unicorn_http/ext_help.h
@@ -0,0 +1,77 @@
+#ifndef ext_help_h
+#define ext_help_h
+
+#ifndef RSTRING_PTR
+#define RSTRING_PTR(s) (RSTRING(s)->ptr)
+#endif /* !defined(RSTRING_PTR) */
+#ifndef RSTRING_LEN
+#define RSTRING_LEN(s) (RSTRING(s)->len)
+#endif /* !defined(RSTRING_LEN) */
+
+#ifndef RUBINIUS
+#  define rb_str_update(x) do {} while (0)
+#  define rb_str_flush(x) do {} while (0)
+#endif /* !RUBINIUS */
+
+#ifndef HAVE_RB_STR_SET_LEN
+#  ifdef RUBINIUS
+#    define rb_str_set_len(str,len) rb_str_resize(str,len)
+#  else /* 1.8.6 optimized version */
+/* this is taken from Ruby 1.8.7, 1.8.6 may not have it */
+static void rb_18_str_set_len(VALUE str, long len)
+{
+  RSTRING(str)->len = len;
+  RSTRING(str)->ptr[len] = '\0';
+  rb_str_flush(str);
+}
+#    define rb_str_set_len(str,len) rb_18_str_set_len(str,len)
+#  endif /* ! RUBINIUS */
+#endif /* !defined(HAVE_RB_STR_SET_LEN) */
+
+/* not all Ruby implementations support frozen objects (Rubinius does not) */
+#if defined(OBJ_FROZEN)
+#  define assert_frozen(f) assert(OBJ_FROZEN(f) && "unfrozen object")
+#else
+#  define assert_frozen(f) do {} while (0)
+#endif /* !defined(OBJ_FROZEN) */
+
+#if !defined(OFFT2NUM)
+#  if SIZEOF_OFF_T == SIZEOF_LONG
+#    define OFFT2NUM(n) LONG2NUM(n)
+#  else
+#    define OFFT2NUM(n) LL2NUM(n)
+#  endif
+#endif /* ! defined(OFFT2NUM) */
+
+#ifndef HAVE_RB_STR_MODIFY
+#  define rb_str_modify(x) do {} while (0)
+#endif /* ! defined(HAVE_RB_STR_MODIFY) */
+
+static inline int str_cstr_eq(VALUE val, const char *ptr, size_t len)
+{
+  return (RSTRING_LEN(val) == len && !memcmp(ptr, RSTRING_PTR(val), len));
+}
+
+#define STR_CSTR_EQ(val, const_str) \
+  str_cstr_eq(val, const_str, sizeof(const_str) - 1)
+
+/* strcasecmp isn't locale independent */
+static int str_cstr_case_eq(VALUE val, const char *ptr, size_t len)
+{
+  if (RSTRING_LEN(val) == len) {
+    const char *v = RSTRING_PTR(val);
+
+    for (; len--; ++ptr, ++v) {
+      if ((*ptr == *v) || (*v >= 'A' && *v <= 'Z' && (*v | 0x20) == *ptr))
+        continue;
+      return 0;
+    }
+    return 1;
+  }
+  return 0;
+}
+
+#define STR_CSTR_CASE_EQ(val, const_str) \
+  str_cstr_case_eq(val, const_str, sizeof(const_str) - 1)
+
+#endif /* ext_help_h */
diff --git a/ext/unicorn_http/extconf.rb b/ext/unicorn_http/extconf.rb
new file mode 100644
index 0000000..0c6a186
--- /dev/null
+++ b/ext/unicorn_http/extconf.rb
@@ -0,0 +1,14 @@
+# -*- encoding: binary -*-
+require 'mkmf'
+
+dir_config("unicorn_http")
+
+have_macro("SIZEOF_OFF_T", "ruby.h") or check_sizeof("off_t", "sys/types.h")
+have_macro("SIZEOF_LONG", "ruby.h") or check_sizeof("long", "sys/types.h")
+have_func("rb_str_set_len", "ruby.h")
+have_func("rb_str_modify", "ruby.h")
+
+# -fPIC is needed for Rubinius, MRI already uses it regardless
+with_cflags($CFLAGS + " -fPIC ") do
+  create_makefile("unicorn_http")
+end
diff --git a/ext/unicorn_http/global_variables.h b/ext/unicorn_http/global_variables.h
new file mode 100644
index 0000000..e593cf6
--- /dev/null
+++ b/ext/unicorn_http/global_variables.h
@@ -0,0 +1,91 @@
+#ifndef global_variables_h
+#define global_variables_h
+static VALUE mUnicorn;
+static VALUE cHttpParser;
+static VALUE eHttpParserError;
+
+static VALUE g_rack_url_scheme;
+static VALUE g_request_method;
+static VALUE g_request_uri;
+static VALUE g_fragment;
+static VALUE g_query_string;
+static VALUE g_http_version;
+static VALUE g_request_path;
+static VALUE g_path_info;
+static VALUE g_server_name;
+static VALUE g_server_port;
+static VALUE g_server_protocol;
+static VALUE g_http_host;
+static VALUE g_http_x_forwarded_proto;
+static VALUE g_http_transfer_encoding;
+static VALUE g_content_length;
+static VALUE g_http_trailer;
+static VALUE g_http_connection;
+static VALUE g_port_80;
+static VALUE g_port_443;
+static VALUE g_localhost;
+static VALUE g_http;
+static VALUE g_http_09;
+static VALUE g_http_10;
+static VALUE g_http_11;
+static VALUE g_GET;
+static VALUE g_HEAD;
+
+/** Defines common length and error messages for input length validation. */
+#define DEF_MAX_LENGTH(N, length) \
+  static const size_t MAX_##N##_LENGTH = length; \
+  static const char * const MAX_##N##_LENGTH_ERR = \
+    "HTTP element " # N  " is longer than the " # length " allowed length."
+
+/**
+ * Validates the max length of given input and throws an HttpParserError
+ * exception if over.
+ */
+#define VALIDATE_MAX_LENGTH(len, N) do { \
+  if (len > MAX_##N##_LENGTH) \
+    rb_raise(eHttpParserError, MAX_##N##_LENGTH_ERR); \
+} while (0)
+
+/** Defines global strings in the init method. */
+#define DEF_GLOBAL(N, val) do { \
+  g_##N = rb_obj_freeze(rb_str_new(val, sizeof(val) - 1)); \
+  rb_global_variable(&g_##N); \
+} while (0)
+
+/* Defines the maximum allowed lengths for various input elements.*/
+DEF_MAX_LENGTH(FIELD_NAME, 256);
+DEF_MAX_LENGTH(FIELD_VALUE, 80 * 1024);
+DEF_MAX_LENGTH(REQUEST_URI, 1024 * 12);
+DEF_MAX_LENGTH(FRAGMENT, 1024); /* Don't know if this length is specified somewhere or not */
+DEF_MAX_LENGTH(REQUEST_PATH, 1024);
+DEF_MAX_LENGTH(QUERY_STRING, (1024 * 10));
+DEF_MAX_LENGTH(HEADER, (1024 * (80 + 32)));
+
+void init_globals(void)
+{
+  DEF_GLOBAL(rack_url_scheme, "rack.url_scheme");
+  DEF_GLOBAL(request_method, "REQUEST_METHOD");
+  DEF_GLOBAL(request_uri, "REQUEST_URI");
+  DEF_GLOBAL(fragment, "FRAGMENT");
+  DEF_GLOBAL(query_string, "QUERY_STRING");
+  DEF_GLOBAL(http_version, "HTTP_VERSION");
+  DEF_GLOBAL(request_path, "REQUEST_PATH");
+  DEF_GLOBAL(path_info, "PATH_INFO");
+  DEF_GLOBAL(server_name, "SERVER_NAME");
+  DEF_GLOBAL(server_port, "SERVER_PORT");
+  DEF_GLOBAL(server_protocol, "SERVER_PROTOCOL");
+  DEF_GLOBAL(http_x_forwarded_proto, "HTTP_X_FORWARDED_PROTO");
+  DEF_GLOBAL(port_80, "80");
+  DEF_GLOBAL(port_443, "443");
+  DEF_GLOBAL(localhost, "localhost");
+  DEF_GLOBAL(http, "http");
+  DEF_GLOBAL(http_11, "HTTP/1.1");
+  DEF_GLOBAL(http_10, "HTTP/1.0");
+  DEF_GLOBAL(http_09, "HTTP/0.9");
+  DEF_GLOBAL(GET, "GET");
+  DEF_GLOBAL(HEAD, "HEAD");
+}
+
+#undef DEF_GLOBAL
+
+#endif /* global_variables_h */
diff --git a/ext/unicorn_http/unicorn_http.rl b/ext/unicorn_http/unicorn_http.rl
new file mode 100644
index 0000000..6232e2c
--- /dev/null
+++ b/ext/unicorn_http/unicorn_http.rl
@@ -0,0 +1,716 @@
+/**
+ * Copyright (c) 2009 Eric Wong (all bugs are Eric's fault)
+ * Copyright (c) 2005 Zed A. Shaw
+ * You can redistribute it and/or modify it under the same terms as Ruby.
+ */
+#include "ruby.h"
+#include "ext_help.h"
+#include <assert.h>
+#include <string.h>
+#include <sys/types.h>
+#include "common_field_optimization.h"
+#include "global_variables.h"
+#include "c_util.h"
+
+#define UH_FL_CHUNKED  0x1
+#define UH_FL_HASBODY  0x2
+#define UH_FL_INBODY   0x4
+#define UH_FL_HASTRAILER 0x8
+#define UH_FL_INTRAILER 0x10
+#define UH_FL_INCHUNK  0x20
+#define UH_FL_KAMETHOD 0x40
+#define UH_FL_KAVERSION 0x80
+#define UH_FL_HASHEADER 0x100
+
+/* both of these flags need to be set for keepalive to be supported */
+#define UH_FL_KEEPALIVE (UH_FL_KAMETHOD | UH_FL_KAVERSION)
+
+/* keep this small for Rainbows! since every client has one */
+struct http_parser {
+  int cs; /* Ragel internal state */
+  unsigned int flags;
+  size_t mark;
+  size_t offset;
+  union { /* these 2 fields don't nest */
+    size_t field;
+    size_t query;
+  } start;
+  union {
+    size_t field_len; /* only used during header processing */
+    size_t dest_offset; /* only used during body processing */
+  } s;
+  VALUE cont; /* Qfalse: unset, Qnil: ignored header, T_STRING: append */
+  union {
+    off_t content;
+    off_t chunk;
+  } len;
+};
+
+static void finalize_header(struct http_parser *hp, VALUE req);
+
+#define REMAINING (unsigned long)(pe - p)
+#define LEN(AT, FPC) (FPC - buffer - hp->AT)
+#define MARK(M,FPC) (hp->M = (FPC) - buffer)
+#define PTR_TO(F) (buffer + hp->F)
+#define STR_NEW(M,FPC) rb_str_new(PTR_TO(M), LEN(M, FPC))
+
+#define HP_FL_TEST(hp,fl) ((hp)->flags & (UH_FL_##fl))
+#define HP_FL_SET(hp,fl) ((hp)->flags |= (UH_FL_##fl))
+#define HP_FL_UNSET(hp,fl) ((hp)->flags &= ~(UH_FL_##fl))
+#define HP_FL_ALL(hp,fl) (HP_FL_TEST(hp, fl) == (UH_FL_##fl))
+
+/*
+ * handles values of the "Connection:" header, keepalive is implied
+ * for HTTP/1.1 but needs to be explicitly enabled with HTTP/1.0
+ * Additionally, we require GET/HEAD requests to support keepalive.
+ */
+static void hp_keepalive_connection(struct http_parser *hp, VALUE val)
+{
+  /* REQUEST_METHOD is always set before any headers */
+  if (HP_FL_TEST(hp, KAMETHOD)) {
+    if (STR_CSTR_CASE_EQ(val, "keep-alive")) {
+      /* basically have HTTP/1.0 masquerade as HTTP/1.1+ */
+      HP_FL_SET(hp, KAVERSION);
+    } else if (STR_CSTR_CASE_EQ(val, "close")) {
+      /*
+       * it doesn't matter what HTTP version or request method we have,
+       * if a client says "Connection: close", we disable keepalive
+       */
+      HP_FL_UNSET(hp, KEEPALIVE);
+    } else {
+      /*
+       * client could've sent anything, ignore it for now.  Maybe
+       * "HP_FL_UNSET(hp, KEEPALIVE);" just in case?
+       * Raising an exception might be too mean...
+       */
+    }
+  }
+}
+
+static void
+request_method(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
+{
+  VALUE v;
+
+  /*
+   * we only support keepalive for GET and HEAD requests for now other
+   * methods are too rarely seen to be worth optimizing.  POST is unsafe
+   * since some clients send extra bytes after POST bodies.
+   */
+  if (CONST_MEM_EQ("GET", ptr, len)) {
+    HP_FL_SET(hp, KAMETHOD);
+    v = g_GET;
+  } else if (CONST_MEM_EQ("HEAD", ptr, len)) {
+    HP_FL_SET(hp, KAMETHOD);
+    v = g_HEAD;
+  } else {
+    v = rb_str_new(ptr, len);
+  }
+  rb_hash_aset(req, g_request_method, v);
+}
+
+static void
+http_version(struct http_parser *hp, VALUE req, const char *ptr, size_t len)
+{
+  VALUE v;
+
+  HP_FL_SET(hp, HASHEADER);
+
+  if (CONST_MEM_EQ("HTTP/1.1", ptr, len)) {
+    /* HTTP/1.1 implies keepalive unless "Connection: close" is set */
+    HP_FL_SET(hp, KAVERSION);
+    v = g_http_11;
+  } else if (CONST_MEM_EQ("HTTP/1.0", ptr, len)) {
+    v = g_http_10;
+  } else {
+    v = rb_str_new(ptr, len);
+  }
+  rb_hash_aset(req, g_server_protocol, v);
+  rb_hash_aset(req, g_http_version, v);
+}
+
+static inline void hp_invalid_if_trailer(struct http_parser *hp)
+{
+  if (HP_FL_TEST(hp, INTRAILER))
+    rb_raise(eHttpParserError, "invalid Trailer");
+}
+
+static void write_cont_value(struct http_parser *hp,
+                             const char *buffer, const char *p)
+{
+  char *vptr;
+
+  if (hp->cont == Qfalse)
+     rb_raise(eHttpParserError, "invalid continuation line");
+  if (NIL_P(hp->cont))
+     return; /* we're ignoring this header (probably Host:) */
+
+  assert(TYPE(hp->cont) == T_STRING && "continuation line is not a string");
+  assert(hp->mark > 0 && "impossible continuation line offset");
+
+  if (LEN(mark, p) == 0)
+    return;
+
+  if (RSTRING_LEN(hp->cont) > 0)
+    --hp->mark;
+
+  vptr = (char *)PTR_TO(mark);
+
+  if (RSTRING_LEN(hp->cont) > 0) {
+    assert((' ' == *vptr || '\t' == *vptr) && "invalid leading white space");
+    *vptr = ' ';
+  }
+  rb_str_buf_cat(hp->cont, vptr, LEN(mark, p));
+}
+
+static void write_value(VALUE req, struct http_parser *hp,
+                        const char *buffer, const char *p)
+{
+  VALUE f = find_common_field(PTR_TO(start.field), hp->s.field_len);
+  VALUE v;
+  VALUE e;
+
+  VALIDATE_MAX_LENGTH(LEN(mark, p), FIELD_VALUE);
+  v = LEN(mark, p) == 0 ? rb_str_buf_new(128) : STR_NEW(mark, p);
+  if (NIL_P(f)) {
+    VALIDATE_MAX_LENGTH(hp->s.field_len, FIELD_NAME);
+    f = uncommon_field(PTR_TO(start.field), hp->s.field_len);
+  } else if (f == g_http_connection) {
+    hp_keepalive_connection(hp, v);
+  } else if (f == g_content_length) {
+    hp->len.content = parse_length(RSTRING_PTR(v), RSTRING_LEN(v));
+    if (hp->len.content < 0)
+      rb_raise(eHttpParserError, "invalid Content-Length");
+    HP_FL_SET(hp, HASBODY);
+    hp_invalid_if_trailer(hp);
+  } else if (f == g_http_transfer_encoding) {
+    if (STR_CSTR_CASE_EQ(v, "chunked")) {
+      HP_FL_SET(hp, CHUNKED);
+      HP_FL_SET(hp, HASBODY);
+    }
+    hp_invalid_if_trailer(hp);
+  } else if (f == g_http_trailer) {
+    HP_FL_SET(hp, HASTRAILER);
+    hp_invalid_if_trailer(hp);
+  } else {
+    assert(TYPE(f) == T_STRING && "memoized object is not a string");
+    assert_frozen(f);
+  }
+
+  e = rb_hash_aref(req, f);
+  if (NIL_P(e)) {
+    hp->cont = rb_hash_aset(req, f, v);
+  } else if (f == g_http_host) {
+    /*
+     * ignored, absolute URLs in REQUEST_URI take precedence over
+     * the Host: header (ref: rfc 2616, section 5.2.1)
+     */
+     hp->cont = Qnil;
+  } else {
+    rb_str_buf_cat(e, ",", 1);
+    hp->cont = rb_str_buf_append(e, v);
+  }
+}
+
+/** Machine **/
+
+%%{
+  machine http_parser;
+
+  action mark {MARK(mark, fpc); }
+
+  action start_field { MARK(start.field, fpc); }
+  action snake_upcase_field { snake_upcase_char((char *)fpc); }
+  action downcase_char { downcase_char((char *)fpc); }
+  action write_field { hp->s.field_len = LEN(start.field, fpc); }
+  action start_value { MARK(mark, fpc); }
+  action write_value { write_value(req, hp, buffer, fpc); }
+  action write_cont_value { write_cont_value(hp, buffer, fpc); }
+  action request_method {
+    request_method(hp, req, PTR_TO(mark), LEN(mark, fpc));
+  }
+  action scheme {
+    rb_hash_aset(req, g_rack_url_scheme, STR_NEW(mark, fpc));
+  }
+  action host {
+    rb_hash_aset(req, g_http_host, STR_NEW(mark, fpc));
+  }
+  action request_uri {
+    size_t len = LEN(mark, fpc);
+    VALUE str;
+
+    VALIDATE_MAX_LENGTH(len, REQUEST_URI);
+    str = rb_hash_aset(req, g_request_uri, STR_NEW(mark, fpc));
+    /*
+     * "OPTIONS * HTTP/1.1\r\n" is a valid request, but we can't have '*'
+     * in REQUEST_PATH or PATH_INFO or else Rack::Lint will complain
+     */
+    if (STR_CSTR_EQ(str, "*")) {
+      str = rb_str_new(NULL, 0);
+      rb_hash_aset(req, g_path_info, str);
+      rb_hash_aset(req, g_request_path, str);
+    }
+  }
+  action fragment {
+    VALIDATE_MAX_LENGTH(LEN(mark, fpc), FRAGMENT);
+    rb_hash_aset(req, g_fragment, STR_NEW(mark, fpc));
+  }
+  action start_query {MARK(start.query, fpc); }
+  action query_string {
+    VALIDATE_MAX_LENGTH(LEN(start.query, fpc), QUERY_STRING);
+    rb_hash_aset(req, g_query_string, STR_NEW(start.query, fpc));
+  }
+  action http_version { http_version(hp, req, PTR_TO(mark), LEN(mark, fpc)); }
+  action request_path {
+    VALUE val;
+    size_t len = LEN(mark, fpc);
+
+    VALIDATE_MAX_LENGTH(len, REQUEST_PATH);
+    val = rb_hash_aset(req, g_request_path, STR_NEW(mark, fpc));
+
+    /* rack says PATH_INFO must start with "/" or be empty */
+    if (!STR_CSTR_EQ(val, "*"))
+      rb_hash_aset(req, g_path_info, val);
+  }
+  action add_to_chunk_size {
+    hp->len.chunk = step_incr(hp->len.chunk, fc, 16);
+    if (hp->len.chunk < 0)
+      rb_raise(eHttpParserError, "invalid chunk size");
+  }
+  action header_done {
+    finalize_header(hp, req);
+
+    cs = http_parser_first_final;
+    if (HP_FL_TEST(hp, HASBODY)) {
+      HP_FL_SET(hp, INBODY);
+      if (HP_FL_TEST(hp, CHUNKED))
+        cs = http_parser_en_ChunkedBody;
+    } else {
+      assert(!HP_FL_TEST(hp, CHUNKED) && "chunked encoding without body!");
+    }
+    /*
+     * go back to Ruby so we can call the Rack application, we'll reenter
+     * the parser iff the body needs to be processed.
+     */
+    goto post_exec;
+  }
+
+  action end_trailers {
+    cs = http_parser_first_final;
+    goto post_exec;
+  }
+
+  action end_chunked_body {
+    if (HP_FL_TEST(hp, HASTRAILER)) {
+      HP_FL_SET(hp, INTRAILER);
+      cs = http_parser_en_Trailers;
+    } else {
+      cs = http_parser_first_final;
+    }
+    ++p;
+    assert(p <= pe && "buffer overflow after chunked body");
+    goto post_exec;
+  }
+
+  action skip_chunk_data {
+  skip_chunk_data_hack: {
+    size_t nr = MIN(hp->len.chunk, REMAINING);
+    memcpy(RSTRING_PTR(req) + hp->s.dest_offset, fpc, nr);
+    hp->s.dest_offset += nr;
+    hp->len.chunk -= nr;
+    p += nr;
+    assert(hp->len.chunk >= 0 && "negative chunk length");
+    if (hp->len.chunk > REMAINING) {
+      HP_FL_SET(hp, INCHUNK);
+      goto post_exec;
+    } else {
+      fhold;
+      fgoto chunk_end;
+    }
+  }}
+
+  include unicorn_http_common "unicorn_http_common.rl";
+}%%
+
+/** Data **/
+%% write data;
+
+static void http_parser_init(struct http_parser *hp)
+{
+  int cs = 0;
+  memset(hp, 0, sizeof(struct http_parser));
+  hp->cont = Qfalse; /* zero on MRI, should be optimized away by above */
+  %% write init;
+  hp->cs = cs;
+}
+
+/** exec **/
+static void http_parser_execute(struct http_parser *hp,
+  VALUE req, const char *buffer, size_t len)
+{
+  const char *p, *pe;
+  int cs = hp->cs;
+  size_t off = hp->offset;
+
+  if (cs == http_parser_first_final)
+    return;
+
+  assert(off <= len && "offset past end of buffer");
+
+  p = buffer+off;
+  pe = buffer+len;
+
+  assert(pe - p == len - off && "pointers aren't same distance");
+
+  if (HP_FL_TEST(hp, INCHUNK)) {
+    HP_FL_UNSET(hp, INCHUNK);
+    goto skip_chunk_data_hack;
+  }
+  %% write exec;
+post_exec: /* "_out:" also goes here */
+  if (hp->cs != http_parser_error)
+    hp->cs = cs;
+  hp->offset = p - buffer;
+
+  assert(p <= pe && "buffer overflow after parsing execute");
+  assert(hp->offset <= len && "offset longer than length");
+}
+
+static struct http_parser *data_get(VALUE self)
+{
+  struct http_parser *hp;
+
+  Data_Get_Struct(self, struct http_parser, hp);
+  assert(hp && "failed to extract http_parser struct");
+  return hp;
+}
+
+static void finalize_header(struct http_parser *hp, VALUE req)
+{
+  VALUE temp = rb_hash_aref(req, g_rack_url_scheme);
+  VALUE server_name = g_localhost;
+  VALUE server_port = g_port_80;
+
+  /* set rack.url_scheme to "https" or "http", no others are allowed by Rack */
+  if (NIL_P(temp)) {
+    temp = rb_hash_aref(req, g_http_x_forwarded_proto);
+    if (!NIL_P(temp) && STR_CSTR_EQ(temp, "https"))
+      server_port = g_port_443;
+    else
+      temp = g_http;
+    rb_hash_aset(req, g_rack_url_scheme, temp);
+  } else if (STR_CSTR_EQ(temp, "https")) {
+    server_port = g_port_443;
+  } else {
+    assert(server_port == g_port_80 && "server_port not set");
+  }
+
+  /* parse and set the SERVER_NAME and SERVER_PORT variables */
+  temp = rb_hash_aref(req, g_http_host);
+  if (!NIL_P(temp)) {
+    char *colon = memchr(RSTRING_PTR(temp), ':', RSTRING_LEN(temp));
+    if (colon) {
+      long port_start = colon - RSTRING_PTR(temp) + 1;
+
+      server_name = rb_str_substr(temp, 0, colon - RSTRING_PTR(temp));
+      if ((RSTRING_LEN(temp) - port_start) > 0)
+        server_port = rb_str_substr(temp, port_start, RSTRING_LEN(temp));
+    } else {
+      server_name = temp;
+    }
+  }
+  rb_hash_aset(req, g_server_name, server_name);
+  rb_hash_aset(req, g_server_port, server_port);
+  if (!HP_FL_TEST(hp, HASHEADER))
+    rb_hash_aset(req, g_server_protocol, g_http_09);
+
+  /* rack requires QUERY_STRING */
+  if (NIL_P(rb_hash_aref(req, g_query_string)))
+    rb_hash_aset(req, g_query_string, rb_str_new(NULL, 0));
+}
+
+static void hp_mark(void *ptr)
+{
+  struct http_parser *hp = ptr;
+
+  rb_gc_mark(hp->cont);
+}
+
+static VALUE HttpParser_alloc(VALUE klass)
+{
+  struct http_parser *hp;
+  return Data_Make_Struct(klass, struct http_parser, hp_mark, NULL, hp);
+}
+
+
+/**
+ * call-seq:
+ *    parser.new => parser
+ *
+ * Creates a new parser.
+ */
+static VALUE HttpParser_init(VALUE self)
+{
+  http_parser_init(data_get(self));
+
+  return self;
+}
+
+/**
+ * call-seq:
+ *    parser.reset => nil
+ *
+ * Resets the parser to it's initial state so that you can reuse it
+ * rather than making new ones.
+ */
+static VALUE HttpParser_reset(VALUE self)
+{
+  http_parser_init(data_get(self));
+
+  return Qnil;
+}
+
+static void advance_str(VALUE str, off_t nr)
+{
+  long len = RSTRING_LEN(str);
+
+  if (len == 0)
+    return;
+
+  rb_str_modify(str);
+
+  assert(nr <= len && "trying to advance past end of buffer");
+  len -= nr;
+  if (len > 0) /* unlikely, len is usually 0 */
+    memmove(RSTRING_PTR(str), RSTRING_PTR(str) + nr, len);
+  rb_str_set_len(str, len);
+}
+
+/**
+ * call-seq:
+ *   parser.content_length => nil or Integer
+ *
+ * Returns the number of bytes left to run through HttpParser#filter_body.
+ * This will initially be the value of the "Content-Length" HTTP header
+ * after header parsing is complete and will decrease in value as
+ * HttpParser#filter_body is called for each chunk.  This should return
+ * zero for requests with no body.
+ *
+ * This will return nil on "Transfer-Encoding: chunked" requests.
+ */
+static VALUE HttpParser_content_length(VALUE self)
+{
+  struct http_parser *hp = data_get(self);
+
+  return HP_FL_TEST(hp, CHUNKED) ? Qnil : OFFT2NUM(hp->len.content);
+}
+
+/**
+ * Document-method: trailers
+ * call-seq:
+ *    parser.trailers(req, data) => req or nil
+ *
+ * This is an alias for HttpParser#headers
+ */
+
+/**
+ * Document-method: headers
+ * call-seq:
+ *    parser.headers(req, data) => req or nil
+ *
+ * Takes a Hash and a String of data, parses the String of data filling
+ * in the Hash returning the Hash if parsing is finished, nil otherwise
+ * When returning the req Hash, it may modify data to point to where
+ * body processing should begin.
+ *
+ * Raises HttpParserError if there are parsing errors.
+ */
+static VALUE HttpParser_headers(VALUE self, VALUE req, VALUE data)
+{
+  struct http_parser *hp = data_get(self);
+
+  rb_str_update(data);
+
+  http_parser_execute(hp, req, RSTRING_PTR(data), RSTRING_LEN(data));
+  VALIDATE_MAX_LENGTH(hp->offset, HEADER);
+
+  if (hp->cs == http_parser_first_final ||
+      hp->cs == http_parser_en_ChunkedBody) {
+    advance_str(data, hp->offset + 1);
+    hp->offset = 0;
+
+    return req;
+  }
+
+  if (hp->cs == http_parser_error)
+    rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
+
+  return Qnil;
+}
+
+static int chunked_eof(struct http_parser *hp)
+{
+  return ((hp->cs == http_parser_first_final) || HP_FL_TEST(hp, INTRAILER));
+}
+
+/**
+ * call-seq:
+ *    parser.body_eof? => true or false
+ *
+ * Detects if we're done filtering the body or not.  This can be used
+ * to detect when to stop calling HttpParser#filter_body.
+ */
+static VALUE HttpParser_body_eof(VALUE self)
+{
+  struct http_parser *hp = data_get(self);
+
+  if (HP_FL_TEST(hp, CHUNKED))
+    return chunked_eof(hp) ? Qtrue : Qfalse;
+
+  return hp->len.content == 0 ? Qtrue : Qfalse;
+}
+
+/**
+ * call-seq:
+ *    parser.keepalive? => true or false
+ *
+ * This should be used to detect if a request can really handle
+ * keepalives and pipelining.  Currently, the rules are:
+ *
+ * 1. MUST be a GET or HEAD request
+ * 2. MUST be HTTP/1.1 +or+ HTTP/1.0 with "Connection: keep-alive"
+ * 3. MUST NOT have "Connection: close" set
+ */
+static VALUE HttpParser_keepalive(VALUE self)
+{
+  struct http_parser *hp = data_get(self);
+
+  return HP_FL_ALL(hp, KEEPALIVE) ? Qtrue : Qfalse;
+}
+
+/**
+ * call-seq:
+ *    parser.headers? => true or false
+ *
+ * This should be used to detect if a request has headers (and if
+ * the response will have headers as well).  HTTP/0.9 requests
+ * should return false, all subsequent HTTP versions will return true
+ */
+static VALUE HttpParser_has_headers(VALUE self)
+{
+  struct http_parser *hp = data_get(self);
+
+  return HP_FL_TEST(hp, HASHEADER) ? Qtrue : Qfalse;
+}
+
+/**
+ * call-seq:
+ *    parser.filter_body(buf, data) => nil/data
+ *
+ * Takes a String of +data+, will modify data if dechunking is done.
+ * Returns +nil+ if there is more data left to process.  Returns
+ * +data+ if body processing is complete. When returning +data+,
+ * it may modify +data+ so the start of the string points to where
+ * the body ended so that trailer processing can begin.
+ *
+ * Raises HttpParserError if there are dechunking errors.
+ * Basically this is a glorified memcpy(3) that copies +data+
+ * into +buf+ while filtering it through the dechunker.
+ */
+static VALUE HttpParser_filter_body(VALUE self, VALUE buf, VALUE data)
+{
+  struct http_parser *hp = data_get(self);
+  char *dptr;
+  long dlen;
+
+  rb_str_update(data);
+  dptr = RSTRING_PTR(data);
+  dlen = RSTRING_LEN(data);
+
+  StringValue(buf);
+  rb_str_resize(buf, dlen); /* we can never copy more than dlen bytes */
+  OBJ_TAINT(buf); /* keep weirdo $SAFE users happy */
+
+  if (HP_FL_TEST(hp, CHUNKED)) {
+    if (!chunked_eof(hp)) {
+      hp->s.dest_offset = 0;
+      http_parser_execute(hp, buf, dptr, dlen);
+      if (hp->cs == http_parser_error)
+        rb_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
+
+      assert(hp->s.dest_offset <= hp->offset &&
+             "destination buffer overflow");
+      advance_str(data, hp->offset);
+      rb_str_set_len(buf, hp->s.dest_offset);
+
+      if (RSTRING_LEN(buf) == 0 && chunked_eof(hp)) {
+        assert(hp->len.chunk == 0 && "chunk at EOF but more to parse");
+      } else {
+        data = Qnil;
+      }
+    }
+  } else {
+    /* no need to enter the Ragel machine for unchunked transfers */
+    assert(hp->len.content >= 0 && "negative Content-Length");
+    if (hp->len.content > 0) {
+      long nr = MIN(dlen, hp->len.content);
+
+      memcpy(RSTRING_PTR(buf), dptr, nr);
+      hp->len.content -= nr;
+      if (hp->len.content == 0)
+        hp->cs = http_parser_first_final;
+      advance_str(data, nr);
+      rb_str_set_len(buf, nr);
+      data = Qnil;
+    }
+  }
+  hp->offset = 0; /* for trailer parsing */
+  return data;
+}
+
+#define SET_GLOBAL(var,str) do { \
+  var = find_common_field(str, sizeof(str) - 1); \
+  assert(!NIL_P(var) && "missed global field"); \
+} while (0)
+
+void Init_unicorn_http(void)
+{
+  mUnicorn = rb_define_module("Unicorn");
+  eHttpParserError =
+         rb_define_class_under(mUnicorn, "HttpParserError", rb_eIOError);
+  cHttpParser = rb_define_class_under(mUnicorn, "HttpParser", rb_cObject);
+  init_globals();
+  rb_define_alloc_func(cHttpParser, HttpParser_alloc);
+  rb_define_method(cHttpParser, "initialize", HttpParser_init,0);
+  rb_define_method(cHttpParser, "reset", HttpParser_reset,0);
+  rb_define_method(cHttpParser, "headers", HttpParser_headers, 2);
+  rb_define_method(cHttpParser, "filter_body", HttpParser_filter_body, 2);
+  rb_define_method(cHttpParser, "trailers", HttpParser_headers, 2);
+  rb_define_method(cHttpParser, "content_length", HttpParser_content_length, 0);
+  rb_define_method(cHttpParser, "body_eof?", HttpParser_body_eof, 0);
+  rb_define_method(cHttpParser, "keepalive?", HttpParser_keepalive, 0);
+  rb_define_method(cHttpParser, "headers?", HttpParser_has_headers, 0);
+
+  /*
+   * The maximum size a single chunk when using chunked transfer encoding.
+   * This is only a theoretical maximum used to detect errors in clients,
+   * it is highly unlikely to encounter clients that send more than
+   * several kilobytes at once.
+   */
+  rb_define_const(cHttpParser, "CHUNK_MAX", OFFT2NUM(UH_OFF_T_MAX));
+
+  /*
+   * The maximum size of the body as specified by Content-Length.
+   * This is only a theoretical maximum, the actual limit is subject
+   * to the limits of the file system used for +Dir.tmpdir+.
+   */
+  rb_define_const(cHttpParser, "LENGTH_MAX", OFFT2NUM(UH_OFF_T_MAX));
+
+  init_common_fields();
+  SET_GLOBAL(g_http_host, "HOST");
+  SET_GLOBAL(g_http_trailer, "TRAILER");
+  SET_GLOBAL(g_http_transfer_encoding, "TRANSFER_ENCODING");
+  SET_GLOBAL(g_content_length, "CONTENT_LENGTH");
+  SET_GLOBAL(g_http_connection, "CONNECTION");
+}
+#undef SET_GLOBAL
diff --git a/ext/unicorn/http11/http11_parser_common.rl b/ext/unicorn_http/unicorn_http_common.rl
index ae01a55..6fca604 100644
--- a/ext/unicorn/http11/http11_parser_common.rl
+++ b/ext/unicorn_http/unicorn_http_common.rl
@@ -1,6 +1,6 @@
 %%{
-  
-  machine http_parser_common;
+
+  machine unicorn_http_common;
 
 #### HTTP PROTOCOL GRAMMAR
 # line endings
@@ -19,6 +19,7 @@
   uchar = (unreserved | escape | sorta_safe);
   pchar = (uchar | ":" | "@" | "&" | "=" | "+");
   tspecials = ("(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\\" | "\"" | "/" | "[" | "]" | "?" | "=" | "{" | "}" | " " | "\t");
+  lws = (" " | "\t");
 
 # elements
   token = (ascii -- (CTL | tspecials));
@@ -27,19 +28,21 @@
   scheme = ( "http"i ("s"i)? ) $downcase_char >mark %scheme;
   hostname = (alnum | "-" | "." | "_")+;
   host_with_port = (hostname (":" digit*)?) >mark %host;
+  userinfo = ((unreserved | escape | ";" | ":" | "&" | "=" | "+")+ "@")*;
 
   path = ( pchar+ ( "/" pchar* )* ) ;
   query = ( uchar | reserved )* %query_string ;
   param = ( pchar | "/" )* ;
   params = ( param ( ";" param )* ) ;
-  rel_path = ( path? %request_path (";" params)? ) ("?" %start_query query)?;
+  rel_path = (path? (";" params)? %request_path) ("?" %start_query query)?;
   absolute_path = ( "/"+ rel_path );
   path_uri = absolute_path > mark %request_uri;
-  Absolute_URI = (scheme "://" host_with_port path_uri);
+  Absolute_URI = (scheme "://" userinfo host_with_port path_uri);
 
   Request_URI = ((absolute_path | "*") >mark %request_uri) | Absolute_URI;
   Fragment = ( uchar | reserved )* >mark %fragment;
-  Method = ( upper | digit | safe ){1,20} >mark %request_method;
+  Method = (token){1,20} >mark %request_method;
+  GetOnly = "GET" >mark %request_method;
 
   http_number = ( digit+ "." digit+ ) ;
   HTTP_Version = ( "HTTP/" http_number ) >mark %http_version ;
@@ -49,10 +52,24 @@
 
   field_value = any* >start_value %write_value;
 
-  message_header = field_name ":" " "* field_value :> CRLF;
+  value_cont = lws+ any* >start_value %write_cont_value;
+
+  message_header = ((field_name ":" " "* field_value)|value_cont) :> CRLF;
+  chunk_ext_val = token*;
+  chunk_ext_name = token*;
+  chunk_extension = ( ";" " "* chunk_ext_name ("=" chunk_ext_val)? )*;
+  last_chunk = "0"+ chunk_extension CRLF;
+  chunk_size = (xdigit* [1-9a-fA-F] xdigit*) $add_to_chunk_size;
+  chunk_end = CRLF;
+  chunk_body = any >skip_chunk_data;
+  chunk_begin = chunk_size chunk_extension CRLF;
+  chunk = chunk_begin chunk_body chunk_end;
+  ChunkedBody := chunk* last_chunk @end_chunked_body;
+  Trailers := (message_header)* CRLF @end_trailers;
 
-  Request = Request_Line ( message_header )* ( CRLF @done );
+  FullRequest = Request_Line (message_header)* CRLF @header_done;
+  SimpleRequest = GetOnly " " Request_URI ("#"Fragment){0,1} CRLF @header_done;
 
-main := Request;
+main := FullRequest | SimpleRequest;
 
 }%%
diff --git a/lib/unicorn.rb b/lib/unicorn.rb
index 49435d8..7a1ef34 100644
--- a/lib/unicorn.rb
+++ b/lib/unicorn.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'fcntl'
 require 'unicorn/socket_helper'
 autoload :Rack, 'rack'
@@ -6,10 +8,19 @@ autoload :Rack, 'rack'
 # a Unicorn web server.  It contains a minimalist HTTP server with just enough
 # functionality to service web application requests fast as possible.
 module Unicorn
+
+  # raised inside TeeInput when a client closes the socket inside the
+  # application dispatch.  This is always raised with an empty backtrace
+  # since there is nothing in the application stack that is responsible
+  # for client shutdowns/disconnects.
+  class ClientShutdown < EOFError
+  end
+
   autoload :Const, 'unicorn/const'
   autoload :HttpRequest, 'unicorn/http_request'
   autoload :HttpResponse, 'unicorn/http_response'
   autoload :Configurator, 'unicorn/configurator'
+  autoload :TeeInput, 'unicorn/tee_input'
   autoload :Util, 'unicorn/util'
 
   class << self
@@ -22,8 +33,12 @@ module Unicorn
   # processes which in turn handle the I/O and application process.
   # Listener sockets are started in the master process and shared with
   # forked worker children.
-  class HttpServer
-    attr_reader :logger
+
+  class HttpServer < Struct.new(:listener_opts, :timeout, :worker_processes,
+                                :before_fork, :after_fork, :before_exec,
+                                :logger, :pid, :app, :preload_app,
+                                :reexec_pid, :orig_app, :init_listeners,
+                                :master_pid, :config, :ready_pipe)
     include ::Unicorn::SocketHelper
 
     # prevents IO objects in here from being GC-ed
@@ -35,7 +50,23 @@ module Unicorn
     # This hash maps PIDs to Workers
     WORKERS = {}
 
-    # See: http://cr.yp.to/docs/selfpipe.html
+    # We use SELF_PIPE differently in the master and worker processes:
+    #
+    # * The master process never closes or reinitializes this once
+    # initialized.  Signal handlers in the master process will write to
+    # it to wake up the master from IO.select in exactly the same manner
+    # djb describes in http://cr.yp.to/docs/selfpipe.html
+    #
+    # * The workers immediately close the pipe they inherit from the
+    # master and replace it with a new pipe after forking.  This new
+    # pipe is also used to wakeup from IO.select from inside (worker)
+    # signal handlers.  However, workers *close* the pipe descriptors in
+    # the signal handlers to raise EBADF in IO.select instead of writing
+    # like we do in the master.  We cannot easily use the reader set for
+    # IO.select because LISTENERS is already that set, and it's extra
+    # work (and cycles) to distinguish the pipe FD from the reader set
+    # once IO.select returns.  So we're lazy and just close the pipe when
+    # a (rare) signal arrives in the worker and reinitialize the pipe later.
     SELF_PIPE = []
 
     # signal queue used for self-piping
@@ -46,19 +77,82 @@ module Unicorn
 
     # We populate this at startup so we can figure out how to reexecute
     # and upgrade the currently running instance of Unicorn
+    # This Hash is considered a stable interface and changing its contents
+    # will allow you to switch between different installations of Unicorn
+    # or even different installations of the same applications without
+    # downtime.  Keys of this constant Hash are described as follows:
+    #
+    # * 0 - the path to the unicorn/unicorn_rails executable
+    # * :argv - a deep copy of the ARGV array the executable originally saw
+    # * :cwd - the working directory of the application, this is where
+    # you originally started Unicorn.
+    #
+    # The following example may be used in your Unicorn config file to
+    # change your working directory during a config reload (HUP) without
+    # upgrading or restarting:
+    #
+    #   Dir.chdir(Unicorn::HttpServer::START_CTX[:cwd] = path)
+    #
+    # To change your unicorn executable to a different path without downtime,
+    # you can set the following in your Unicorn config file, HUP and then
+    # continue with the traditional USR2 + QUIT upgrade steps:
+    #
+    #   Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
     START_CTX = {
       :argv => ARGV.map { |arg| arg.dup },
-      # don't rely on Dir.pwd here since it's not symlink-aware, and
-      # symlink dirs are the default with Capistrano...
-      :cwd => `/bin/sh -c pwd`.chomp("\n"),
+      :cwd => lambda {
+          # favor ENV['PWD'] since it is (usually) symlink aware for
+          # Capistrano and like systems
+          begin
+            a = File.stat(pwd = ENV['PWD'])
+            b = File.stat(Dir.pwd)
+            a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
+          rescue
+            Dir.pwd
+          end
+        }.call,
       0 => $0.dup,
     }
 
+    # This class and its members can be considered a stable interface
+    # and will not change in a backwards-incompatible fashion between
+    # releases of Unicorn.  You may need to access it in the
+    # before_fork/after_fork hooks.  See the Unicorn::Configurator RDoc
+    # for examples.
     class Worker < Struct.new(:nr, :tmp)
+
+      autoload :Etc, 'etc'
+
       # worker objects may be compared to just plain numbers
       def ==(other_nr)
         self.nr == other_nr
       end
+
+      # Changes the worker process to the specified +user+ and +group+
+      # This is only intended to be called from within the worker
+      # process from the +after_fork+ hook.  This should be called in
+      # the +after_fork+ hook after any priviledged functions need to be
+      # run (e.g. to set per-worker CPU affinity, niceness, etc)
+      #
+      # Any and all errors raised within this method will be propagated
+      # directly back to the caller (usually the +after_fork+ hook.
+      # These errors commonly include ArgumentError for specifying an
+      # invalid user/group and Errno::EPERM for insufficient priviledges
+      def user(user, group = nil)
+        # we do not protect the caller, checking Process.euid == 0 is
+        # insufficient because modern systems have fine-grained
+        # capabilities.  Let the caller handle any and all errors.
+        uid = Etc.getpwnam(user).uid
+        gid = Etc.getgrnam(group).gid if group
+        Unicorn::Util.chown_logs(uid, gid)
+        tmp.chown(uid, gid)
+        if gid && Process.egid != gid
+          Process.initgroups(user, gid)
+          Process::GID.change_privilege(gid)
+        end
+        Process.euid != uid and Process::UID.change_privilege(uid)
+      end
+
     end
 
     # Creates a working server on host:port (strange things happen if
@@ -66,14 +160,25 @@ module Unicorn
     # HttpServer.run.join to join the thread that's processing
     # incoming requests on the socket.
     def initialize(app, options = {})
-      @app = app
-      @pid = nil
-      @reexec_pid = 0
-      @init_listeners = options[:listeners] ? options[:listeners].dup : []
-      @config = Configurator.new(options.merge(:use_defaults => true))
-      @listener_opts = {}
-      @config.commit!(self, :skip => [:listeners, :pid])
-      @orig_app = app
+      self.app = app
+      self.reexec_pid = 0
+      self.ready_pipe = options.delete(:ready_pipe)
+      self.init_listeners = options[:listeners] ? options[:listeners].dup : []
+      self.config = Configurator.new(options.merge(:use_defaults => true))
+      self.listener_opts = {}
+
+      # we try inheriting listeners first, so we bind them later.
+      # we don't write the pid file until we've bound listeners in case
+      # unicorn was started twice by mistake.  Even though our #pid= method
+      # checks for stale/existing pid files, race conditions are still
+      # possible (and difficult/non-portable to avoid) and can be likely
+      # to clobber the pid if the second start was in quick succession
+      # after the first, so we rely on the listener binding to fail in
+      # that case.  Some tests (in and outside of this source tree) and
+      # monitoring tools may also rely on pid files existing before we
+      # attempt to connect to the listener(s)
+      config.commit!(self, :skip => [:listeners, :pid])
+      self.orig_app = app
     end
 
     # Runs the thing.  Returns self so you can run join on it
@@ -84,13 +189,13 @@ module Unicorn
       # before they become UNIXServer or TCPServer
       inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
         io = Socket.for_fd(fd.to_i)
-        set_server_sockopt(io, @listener_opts[sock_name(io)])
+        set_server_sockopt(io, listener_opts[sock_name(io)])
         IO_PURGATORY << io
         logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
         server_cast(io)
       end
 
-      config_listeners = @config[:listeners].dup
+      config_listeners = config[:listeners].dup
       LISTENERS.replace(inherited)
 
       # we start out with generic Socket objects that get cast to either
@@ -100,11 +205,14 @@ module Unicorn
       config_listeners -= listener_names
       if config_listeners.empty? && LISTENERS.empty?
         config_listeners << Unicorn::Const::DEFAULT_LISTEN
+        init_listeners << Unicorn::Const::DEFAULT_LISTEN
+        START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
       end
       config_listeners.each { |addr| listen(addr) }
       raise ArgumentError, "no listeners" if LISTENERS.empty?
-      self.pid = @config[:pid]
-      build_app! if @preload_app
+      self.pid = config[:pid]
+      self.master_pid = $$
+      build_app! if preload_app
       maintain_worker_count
       self
     end
@@ -114,7 +222,7 @@ module Unicorn
     def listeners=(listeners)
       cur_names, dead_names = [], []
       listener_names.each do |name|
-        if "/" == name[0..0]
+        if ?/ == name[0]
           # mark unlinked sockets as dead so we can rebind them
           (File.socket?(name) ? cur_names : dead_names) << name
         else
@@ -122,8 +230,7 @@ module Unicorn
         end
       end
       set_names = listener_names(listeners)
-      dead_names += cur_names - set_names
-      dead_names.uniq!
+      dead_names.concat(cur_names - set_names).uniq!
 
       LISTENERS.delete_if do |io|
         if dead_names.include?(sock_name(io))
@@ -132,7 +239,7 @@ module Unicorn
           end
           (io.close rescue nil).nil? # true
         else
-          set_server_sockopt(io, @listener_opts[sock_name(io)])
+          set_server_sockopt(io, listener_opts[sock_name(io)])
           false
         end
       end
@@ -144,30 +251,49 @@ module Unicorn
     def stderr_path=(path); redirect_io($stderr, path); end
 
     def logger=(obj)
-      REQUEST.logger = @logger = obj
+      HttpRequest::DEFAULTS["rack.logger"] = super
     end
 
     # sets the path for the PID file of the master process
     def pid=(path)
       if path
         if x = valid_pid?(path)
-          return path if @pid && path == @pid && x == $$
+          return path if pid && path == pid && x == $$
           raise ArgumentError, "Already running on PID:#{x} " \
                                "(or pid=#{path} is stale)"
         end
       end
-      unlink_pid_safe(@pid) if @pid
-      File.open(path, 'wb') { |fp| fp.syswrite("#$$\n") } if path
-      @pid = path
+      unlink_pid_safe(pid) if pid
+
+      if path
+        fp = begin
+          tmp = "#{File.dirname(path)}/#{rand}.#$$"
+          File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
+        rescue Errno::EEXIST
+          retry
+        end
+        fp.syswrite("#$$\n")
+        File.rename(fp.path, path)
+        fp.close
+      end
+      super(path)
     end
 
     # add a given address to the +listeners+ set, idempotently
     # Allows workers to add a private, per-process listener via the
-    # @after_fork hook.  Very useful for debugging and testing.
-    def listen(address, opt = {}.merge(@listener_opts[address] || {}))
+    # after_fork hook.  Very useful for debugging and testing.
+    # +:tries+ may be specified as an option for the number of times
+    # to retry, and +:delay+ may be specified as the time in seconds
+    # to delay between retries.
+    # A negative value for +:tries+ indicates the listen will be
+    # retried indefinitely, this is useful when workers belonging to
+    # different masters are spawned during a transparent upgrade.
+    def listen(address, opt = {}.merge(listener_opts[address] || {}))
+      address = config.expand_addr(address)
       return if String === address && listener_names.include?(address)
 
-      delay, tries = 0.5, 5
+      delay = opt[:delay] || 0.5
+      tries = opt[:tries] || 5
       begin
         io = bind_listen(address, opt)
         unless TCPServer === io || UNIXServer === io
@@ -176,14 +302,18 @@ module Unicorn
         end
         logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
         LISTENERS << io
-        return io
+        io
       rescue Errno::EADDRINUSE => err
         logger.error "adding listener failed addr=#{address} (in use)"
         raise err if tries == 0
         tries -= 1
-        logger.error "retrying in #{delay} seconds (#{tries} tries left)"
+        logger.error "retrying in #{delay} seconds " \
+                     "(#{tries < 0 ? 'infinite' : tries} tries left)"
         sleep(delay)
         retry
+      rescue => err
+        logger.fatal "error adding listener addr=#{address}"
+        raise err
       end
     end
 
@@ -196,17 +326,27 @@ module Unicorn
       # are trapped.  See trap_deferred
       init_self_pipe!
       respawn = true
+      last_check = Time.now
 
       QUEUE_SIGS.each { |sig| trap_deferred(sig) }
       trap(:CHLD) { |sig_nr| awaken_master }
       proc_name 'master'
       logger.info "master process ready" # test_exec.rb relies on this message
+      if ready_pipe
+        ready_pipe.syswrite($$.to_s)
+        ready_pipe.close rescue nil
+        self.ready_pipe = nil
+      end
       begin
         loop do
           reap_all_workers
           case SIG_QUEUE.shift
           when nil
-            murder_lazy_workers
+            # avoid murdering workers after our master process (or the
+            # machine) comes out of suspend/hibernation
+            if (last_check + timeout) >= (last_check = Time.now)
+              murder_lazy_workers
+            end
             maintain_worker_count if respawn
             master_sleep
           when :QUIT # graceful shutdown
@@ -230,12 +370,12 @@ module Unicorn
               logger.info "SIGWINCH ignored because we're not daemonized"
             end
           when :TTIN
-            @worker_processes += 1
+            self.worker_processes += 1
           when :TTOU
-            @worker_processes -= 1 if @worker_processes > 0
+            self.worker_processes -= 1 if self.worker_processes > 0
           when :HUP
             respawn = true
-            if @config.config_file
+            if config.config_file
               load_config!
               redo # immediate reaping since we may have QUIT workers
             else # exec binary and exit if there's no config file
@@ -247,36 +387,33 @@ module Unicorn
         end
       rescue Errno::EINTR
         retry
-      rescue Object => e
+      rescue => e
         logger.error "Unhandled master loop exception #{e.inspect}."
         logger.error e.backtrace.join("\n")
         retry
       end
       stop # gracefully shutdown all workers on our way out
       logger.info "master complete"
-      unlink_pid_safe(@pid) if @pid
+      unlink_pid_safe(pid) if pid
     end
 
     # Terminates all workers, but does not exit master process
     def stop(graceful = true)
       self.listeners = []
-      kill_each_worker(graceful ? :QUIT : :TERM)
-      timeleft = @timeout
-      step = 0.2
-      reap_all_workers
-      until WORKERS.empty?
-        sleep(step)
+      limit = Time.now + timeout
+      until WORKERS.empty? || Time.now > limit
+        kill_each_worker(graceful ? :QUIT : :TERM)
+        sleep(0.1)
         reap_all_workers
-        (timeleft -= step) > 0 and next
-        kill_each_worker(:KILL)
       end
+      kill_each_worker(:KILL)
     end
 
     private
 
     # list of signals we care about and trap in master.
     QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP,
-                   :TTIN, :TTOU ].freeze
+                   :TTIN, :TTOU ]
 
     # defer a signal for later processing in #join (master process)
     def trap_deferred(signal)
@@ -314,15 +451,15 @@ module Unicorn
     def reap_all_workers
       begin
         loop do
-          pid, status = Process.waitpid2(-1, Process::WNOHANG)
-          pid or break
-          if @reexec_pid == pid
+          wpid, status = Process.waitpid2(-1, Process::WNOHANG)
+          wpid or break
+          if reexec_pid == wpid
             logger.error "reaped #{status.inspect} exec()-ed"
-            @reexec_pid = 0
-            self.pid = @pid.chomp('.oldbin') if @pid
+            self.reexec_pid = 0
+            self.pid = pid.chomp('.oldbin') if pid
             proc_name 'master'
           else
-            worker = WORKERS.delete(pid) and worker.tmp.close rescue nil
+            worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
             logger.info "reaped #{status.inspect} " \
                         "worker=#{worker.nr rescue 'unknown'}"
           end
@@ -333,32 +470,32 @@ module Unicorn
 
     # reexecutes the START_CTX with a new binary
     def reexec
-      if @reexec_pid > 0
+      if reexec_pid > 0
         begin
-          Process.kill(0, @reexec_pid)
-          logger.error "reexec-ed child already running PID:#{@reexec_pid}"
+          Process.kill(0, reexec_pid)
+          logger.error "reexec-ed child already running PID:#{reexec_pid}"
           return
         rescue Errno::ESRCH
-          @reexec_pid = 0
+          self.reexec_pid = 0
         end
       end
 
-      if @pid
-        old_pid = "#{@pid}.oldbin"
-        prev_pid = @pid.dup
+      if pid
+        old_pid = "#{pid}.oldbin"
+        prev_pid = pid.dup
         begin
           self.pid = old_pid  # clear the path for a new pid file
         rescue ArgumentError
           logger.error "old PID:#{valid_pid?(old_pid)} running with " \
                        "existing pid=#{old_pid}, refusing rexec"
           return
-        rescue Object => e
+        rescue => e
           logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
           return
         end
       end
 
-      @reexec_pid = fork do
+      self.reexec_pid = fork do
         listener_fds = LISTENERS.map { |sock| sock.fileno }
         ENV['UNICORN_FD'] = listener_fds.join(',')
         Dir.chdir(START_CTX[:cwd])
@@ -375,86 +512,91 @@ module Unicorn
           io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
         end
         logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
-        @before_exec.call(self)
+        before_exec.call(self)
         exec(*cmd)
       end
       proc_name 'master (old)'
     end
 
-    # forcibly terminate all workers that haven't checked in in @timeout
+    # forcibly terminate all workers that haven't checked in in timeout
     # seconds.  The timeout is implemented using an unlinked File
     # shared between the parent process and each worker.  The worker
     # runs File#chmod to modify the ctime of the File.  If the ctime
-    # is stale for >@timeout seconds, then we'll kill the corresponding
+    # is stale for >timeout seconds, then we'll kill the corresponding
     # worker.
     def murder_lazy_workers
-      diff = stat = nil
-      WORKERS.dup.each_pair do |pid, worker|
-        stat = begin
-          worker.tmp.stat
-        rescue => e
-          logger.warn "worker=#{worker.nr} PID:#{pid} stat error: #{e.inspect}"
-          kill_worker(:QUIT, pid)
-          next
-        end
-        stat.mode == 0100000 and next
-        (diff = (Time.now - stat.ctime)) <= @timeout and next
-        logger.error "worker=#{worker.nr} PID:#{pid} timeout " \
-                     "(#{diff}s > #{@timeout}s), killing"
-        kill_worker(:KILL, pid) # take no prisoners for @timeout violations
+      WORKERS.dup.each_pair do |wpid, worker|
+        stat = worker.tmp.stat
+        # skip workers that disable fchmod or have never fchmod-ed
+        stat.mode == 0100600 and next
+        (diff = (Time.now - stat.ctime)) <= timeout and next
+        logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
+                     "(#{diff}s > #{timeout}s), killing"
+        kill_worker(:KILL, wpid) # take no prisoners for timeout violations
       end
     end
 
     def spawn_missing_workers
-      (0...@worker_processes).each do |worker_nr|
+      (0...worker_processes).each do |worker_nr|
         WORKERS.values.include?(worker_nr) and next
-        begin
-          Dir.chdir(START_CTX[:cwd])
-        rescue Errno::ENOENT => err
-          logger.fatal "#{err.inspect} (#{START_CTX[:cwd]})"
-          SIG_QUEUE << :QUIT # forcibly emulate SIGQUIT
-          return
-        end
         worker = Worker.new(worker_nr, Unicorn::Util.tmpio)
-        @before_fork.call(self, worker)
-        pid = fork { worker_loop(worker) }
-        WORKERS[pid] = worker
+        before_fork.call(self, worker)
+        WORKERS[fork {
+          ready_pipe.close if ready_pipe
+          self.ready_pipe = nil
+          worker_loop(worker)
+        }] = worker
       end
     end
 
     def maintain_worker_count
-      (off = WORKERS.size - @worker_processes) == 0 and return
+      (off = WORKERS.size - worker_processes) == 0 and return
       off < 0 and return spawn_missing_workers
-      WORKERS.dup.each_pair { |pid,w|
-        w.nr >= @worker_processes and kill_worker(:QUIT, pid) rescue nil
+      WORKERS.dup.each_pair { |wpid,w|
+        w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
       }
     end
 
-    # once a client is accepted, it is processed in its entirety here
-    # in 3 easy steps: read request, call app, write app response
-    def process_client(app, client)
-      client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      HttpResponse.write(client, app.call(REQUEST.read(client)))
     # if we get any error, try to write something back to the client
     # assuming we haven't closed the socket, but don't get hung up
     # if the socket is already closed or broken.  We'll always ensure
     # the socket is closed at the end of this function
-    rescue EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
-      client.write_nonblock(Const::ERROR_500_RESPONSE) rescue nil
-      client.close rescue nil
-    rescue HttpParserError # try to tell the client they're bad
-      client.write_nonblock(Const::ERROR_400_RESPONSE) rescue nil
-      client.close rescue nil
-    rescue Object => e
-      client.write_nonblock(Const::ERROR_500_RESPONSE) rescue nil
-      client.close rescue nil
-      logger.error "Read error: #{e.inspect}"
-      logger.error e.backtrace.join("\n")
+    def handle_error(client, e)
+      msg = case e
+      when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
+        Const::ERROR_500_RESPONSE
+      when HttpParserError # try to tell the client they're bad
+        Const::ERROR_400_RESPONSE
+      else
+        logger.error "Read error: #{e.inspect}"
+        logger.error e.backtrace.join("\n")
+        Const::ERROR_500_RESPONSE
+      end
+      client.write_nonblock(msg)
+      client.close
+      rescue
+        nil
+    end
+
+    # once a client is accepted, it is processed in its entirety here
+    # in 3 easy steps: read request, call app, write app response
+    def process_client(client)
+      client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+      response = app.call(env = REQUEST.read(client))
+
+      if 100 == response.first.to_i
+        client.write(Const::EXPECT_100_RESPONSE)
+        env.delete(Const::HTTP_EXPECT)
+        response = app.call(env)
+      end
+      HttpResponse.write(client, response, HttpRequest::PARSER.headers?)
+    rescue => e
+      handle_error(client, e)
     end
 
     # gets rid of stuff the worker has no business keeping track of
     # to free some resources and drops all sig handlers.
-    # traps for USR1, USR2, and HUP may be set in the @after_fork Proc
+    # traps for USR1, USR2, and HUP may be set in the after_fork Proc
     # by the user.
     def init_worker_process(worker)
       QUEUE_SIGS.each { |sig| trap(sig, nil) }
@@ -467,15 +609,15 @@ module Unicorn
       WORKERS.clear
       LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
       worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      @after_fork.call(self, worker) # can drop perms
-      @timeout /= 2.0 # halve it for select()
-      build_app! unless @preload_app
+      after_fork.call(self, worker) # can drop perms
+      self.timeout /= 2.0 # halve it for select()
+      build_app! unless preload_app
     end
 
     def reopen_worker_logs(worker_nr)
-      @logger.info "worker=#{worker_nr} reopening logs..."
+      logger.info "worker=#{worker_nr} reopening logs..."
       Unicorn::Util.reopen_logs
-      @logger.info "worker=#{worker_nr} done reopening logs"
+      logger.info "worker=#{worker_nr} done reopening logs"
       init_self_pipe!
     end
 
@@ -483,25 +625,24 @@ module Unicorn
     # for connections and doesn't die until the parent dies (or is
     # given a INT, QUIT, or TERM signal)
     def worker_loop(worker)
-      master_pid = Process.ppid # slightly racy, but less memory usage
+      ppid = master_pid
       init_worker_process(worker)
       nr = 0 # this becomes negative if we need to reopen logs
       alive = worker.tmp # tmp is our lifeline to the master process
       ready = LISTENERS
-      t = ti = 0
 
       # closing anything we IO.select on will raise EBADF
       trap(:USR1) { nr = -65536; SELF_PIPE.first.close rescue nil }
       trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
       [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
-      @logger.info "worker=#{worker.nr} ready"
-      app = @app
+      logger.info "worker=#{worker.nr} ready"
+      m = 0
 
       begin
         nr < 0 and reopen_worker_logs(worker.nr)
         nr = 0
 
-        # we're a goner in @timeout seconds anyways if alive.chmod
+        # we're a goner in timeout seconds anyways if alive.chmod
         # breaks, so don't trap the exception.  Using fchmod() since
         # futimes() is not available in base Ruby and I very strongly
         # prefer temporary files to be unlinked for security,
@@ -509,13 +650,13 @@ module Unicorn
         # changes with chmod doesn't update ctime on all filesystems; so
         # we change our counter each and every time (after process_client
         # and before IO.select).
-        t == (ti = Time.now.to_i) or alive.chmod(t = ti)
+        alive.chmod(m = 0 == m ? 1 : 0)
 
         ready.each do |sock|
           begin
-            process_client(app, sock.accept_nonblock)
+            process_client(sock.accept_nonblock)
             nr += 1
-            t == (ti = Time.now.to_i) or alive.chmod(t = ti)
+            alive.chmod(m = 0 == m ? 1 : 0)
           rescue Errno::EAGAIN, Errno::ECONNABORTED
           end
           break if nr < 0
@@ -523,22 +664,22 @@ module Unicorn
 
         # make the following bet: if we accepted clients this round,
         # we're probably reasonably busy, so avoid calling select()
-        # and do a speculative accept_nonblock on every listener
+        # and do a speculative accept_nonblock on ready listeners
         # before we sleep again in select().
         redo unless nr == 0 # (nr < 0) => reopen logs
 
-        master_pid == Process.ppid or return
-        alive.chmod(t = 0)
+        ppid == Process.ppid or return
+        alive.chmod(m = 0 == m ? 1 : 0)
         begin
           # timeout used so we can detect parent death:
-          ret = IO.select(LISTENERS, nil, SELF_PIPE, @timeout) or redo
+          ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo
           ready = ret.first
         rescue Errno::EINTR
           ready = LISTENERS
         rescue Errno::EBADF
           nr < 0 or return
         end
-      rescue Object => e
+      rescue => e
         if alive
           logger.error "Unhandled listen loop exception #{e.inspect}."
           logger.error e.backtrace.join("\n")
@@ -548,21 +689,23 @@ module Unicorn
 
     # delivers a signal to a worker and fails gracefully if the worker
     # is no longer running.
-    def kill_worker(signal, pid)
+    def kill_worker(signal, wpid)
       begin
-        Process.kill(signal, pid)
+        Process.kill(signal, wpid)
       rescue Errno::ESRCH
-        worker = WORKERS.delete(pid) and worker.tmp.close rescue nil
+        worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
       end
     end
 
     # delivers a signal to each worker
     def kill_each_worker(signal)
-      WORKERS.keys.each { |pid| kill_worker(signal, pid) }
+      WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
     end
 
     # unlinks a PID file at given +path+ if it contains the current PID
-    # useful as an at_exit handler.
+    # still potentially racy without locking the directory (which is
+    # non-portable and may interact badly with other programs), but the
+    # window for hitting the race condition is small
     def unlink_pid_safe(path)
       (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
     end
@@ -570,29 +713,30 @@ module Unicorn
     # returns a PID if a given path contains a non-stale PID file,
     # nil otherwise.
     def valid_pid?(path)
-      if File.exist?(path) && (pid = File.read(path).to_i) > 1
-        begin
-          Process.kill(0, pid)
-          return pid
-        rescue Errno::ESRCH
-        end
+      wpid = File.read(path).to_i
+      wpid <= 0 and return nil
+      begin
+        Process.kill(0, wpid)
+        wpid
+      rescue Errno::ESRCH
+        # don't unlink stale pid files, racy without non-portable locking...
       end
-      nil
+      rescue Errno::ENOENT
     end
 
     def load_config!
       begin
-        logger.info "reloading config_file=#{@config.config_file}"
-        @config[:listeners].replace(@init_listeners)
-        @config.reload
-        @config.commit!(self)
+        logger.info "reloading config_file=#{config.config_file}"
+        config[:listeners].replace(init_listeners)
+        config.reload
+        config.commit!(self)
         kill_each_worker(:QUIT)
         Unicorn::Util.reopen_logs
-        @app = @orig_app
-        build_app! if @preload_app
-        logger.info "done reloading config_file=#{@config.config_file}"
-      rescue Object => e
-        logger.error "error reloading config_file=#{@config.config_file}: " \
+        self.app = orig_app
+        build_app! if preload_app
+        logger.info "done reloading config_file=#{config.config_file}"
+      rescue => e
+        logger.error "error reloading config_file=#{config.config_file}: " \
                      "#{e.class} #{e.message}"
       end
     end
@@ -603,12 +747,16 @@ module Unicorn
     end
 
     def build_app!
-      if @app.respond_to?(:arity) && @app.arity == 0
+      if app.respond_to?(:arity) && app.arity == 0
+        # exploit COW in case of preload_app.  Also avoids race
+        # conditions in Rainbows! since load/require are not thread-safe
+        Unicorn.constants.each { |x| Unicorn.const_get(x) }
+
         if defined?(Gem) && Gem.respond_to?(:refresh)
           logger.info "Refreshing Gem list"
           Gem.refresh
         end
-        @app = @app.call
+        self.app = app.call
       end
     end
 
@@ -618,7 +766,7 @@ module Unicorn
     end
 
     def redirect_io(io, path)
-      File.open(path, 'a') { |fp| io.reopen(fp) } if path
+      File.open(path, 'ab') { |fp| io.reopen(fp) } if path
       io.sync = true
     end
 
diff --git a/lib/unicorn/app/exec_cgi.rb b/lib/unicorn/app/exec_cgi.rb
index 861d5e6..ef2a18e 100644
--- a/lib/unicorn/app/exec_cgi.rb
+++ b/lib/unicorn/app/exec_cgi.rb
@@ -1,11 +1,12 @@
+# -*- encoding: binary -*-
+
 require 'unicorn'
-require 'rack'
 
 module Unicorn::App
 
   # This class is highly experimental (even more so than the rest of Unicorn)
   # and has never run anything other than cgit.
-  class ExecCgi
+  class ExecCgi < Struct.new(:args)
 
     CHUNK_SIZE = 16384
     PASS_VARS = %w(
@@ -25,19 +26,19 @@ module Unicorn::App
       SERVER_PORT
       SERVER_PROTOCOL
       SERVER_SOFTWARE
-    ).map { |x| x.freeze }.freeze # frozen strings are faster for Hash lookups
+    ).map { |x| x.freeze } # frozen strings are faster for Hash assignments
 
     # Intializes the app, example of usage in a config.ru
     #   map "/cgit" do
     #     run Unicorn::App::ExecCgi.new("/path/to/cgit.cgi")
     #   end
     def initialize(*args)
-      @args = args.dup
-      first = @args[0] or
+      self.args = args
+      first = args[0] or
         raise ArgumentError, "need path to executable"
-      first[0..0] == "/" or @args[0] = ::File.expand_path(first)
-      File.executable?(@args[0]) or
-        raise ArgumentError, "#{@args[0]} is not executable"
+      first[0] == ?/ or args[0] = ::File.expand_path(first)
+      File.executable?(args[0]) or
+        raise ArgumentError, "#{args[0]} is not executable"
     end
 
     # Calls the app
@@ -62,14 +63,14 @@ module Unicorn::App
         val = env[key] or next
         ENV[key] = val
       end
-      ENV['SCRIPT_NAME'] = @args[0]
+      ENV['SCRIPT_NAME'] = args[0]
       ENV['GATEWAY_INTERFACE'] = 'CGI/1.1'
       env.keys.grep(/^HTTP_/) { |key| ENV[key] = env[key] }
 
       a = IO.new(0).reopen(inp)
       b = IO.new(1).reopen(out)
       c = IO.new(2).reopen(err)
-      exec(*@args)
+      exec(*args)
     end
 
     # Extracts headers from CGI out, will change the offset of out.
@@ -86,23 +87,24 @@ module Unicorn::App
         offset = 4
       end
       offset += head.length
-      out.instance_variable_set('@unicorn_app_exec_cgi_offset', offset)
-      size -= offset
 
       # Allows +out+ to be used as a Rack body.
-      def out.each
-        sysseek(@unicorn_app_exec_cgi_offset)
-
-        # don't use a preallocated buffer for sysread since we can't
-        # guarantee an actual socket is consuming the yielded string
-        # (or if somebody is pushing to an array for eventual concatenation
-        begin
-          yield(sysread(CHUNK_SIZE))
-        rescue EOFError
-          return
-        end while true
-      end
+      out.instance_eval { class << self; self; end }.instance_eval {
+        define_method(:each) { |&blk|
+          sysseek(offset)
+
+          # don't use a preallocated buffer for sysread since we can't
+          # guarantee an actual socket is consuming the yielded string
+          # (or if somebody is pushing to an array for eventual concatenation
+          begin
+            blk.call(sysread(CHUNK_SIZE))
+          rescue EOFError
+            break
+          end while true
+        }
+      }
 
+      size -= offset
       prev = nil
       headers = Rack::Utils::HeaderHash.new
       head.split(/\r?\n/).each do |line|
@@ -118,18 +120,15 @@ module Unicorn::App
     # ensures rack.input is a file handle that we can redirect stdin to
     def force_file_input(env)
       inp = env['rack.input']
-      if inp.respond_to?(:fileno) && Integer === inp.fileno
-        inp
-      elsif inp.size == 0 # inp could be a StringIO or StringIO-like object
+      if inp.size == 0 # inp could be a StringIO or StringIO-like object
         ::File.open('/dev/null', 'rb')
       else
         tmp = Unicorn::Util.tmpio
 
-        # Rack::Lint::InputWrapper doesn't allow sysread :(
-        buf = Unicorn::Z.dup
-        while inp.read(CHUNK_SIZE, buf)
+        buf = inp.read(CHUNK_SIZE)
+        begin
           tmp.syswrite(buf)
-        end
+        end while inp.read(CHUNK_SIZE, buf)
         tmp.sysseek(0)
         tmp
       end
@@ -141,7 +140,7 @@ module Unicorn::App
       err.seek(0)
       dst = env['rack.errors']
       pid = status.pid
-      dst.write("#{pid}: #{@args.inspect} status=#{status} stderr:\n")
+      dst.write("#{pid}: #{args.inspect} status=#{status} stderr:\n")
       err.each_line { |line| dst.write("#{pid}: #{line}") }
       dst.flush
     end
diff --git a/lib/unicorn/app/inetd.rb b/lib/unicorn/app/inetd.rb
new file mode 100644
index 0000000..9bfa7cb
--- /dev/null
+++ b/lib/unicorn/app/inetd.rb
@@ -0,0 +1,109 @@
+# -*- encoding: binary -*-
+
+# Copyright (c) 2009 Eric Wong
+# You can redistribute it and/or modify it under the same terms as Ruby.
+
+# this class *must* be used with Rack::Chunked
+
+module Unicorn::App
+  class Inetd < Struct.new(:cmd)
+
+    class CatBody < Struct.new(:errors, :err_rd, :out_rd, :pid_map)
+      def initialize(env, cmd)
+        self.errors = env['rack.errors']
+        in_rd, in_wr = IO.pipe
+        self.err_rd, err_wr = IO.pipe
+        self.out_rd, out_wr = IO.pipe
+
+        cmd_pid = fork {
+          inp, out, err = (0..2).map { |i| IO.new(i) }
+          inp.reopen(in_rd)
+          out.reopen(out_wr)
+          err.reopen(err_wr)
+          [ in_rd, in_wr, err_rd, err_wr, out_rd, out_wr ].each { |i| i.close }
+          exec(*cmd)
+        }
+        [ in_rd, err_wr, out_wr ].each { |io| io.close }
+        [ in_wr, err_rd, out_rd ].each { |io| io.binmode }
+        in_wr.sync = true
+
+        # Unfortunately, input here must be processed inside a seperate
+        # thread/process using blocking I/O since env['rack.input'] is not
+        # IO.select-able and attempting to make it so would trip Rack::Lint
+        inp_pid = fork {
+          input = env['rack.input']
+          [ err_rd, out_rd ].each { |io| io.close }
+
+          # this is dependent on input.read having readpartial semantics:
+          buf = input.read(16384)
+          begin
+            in_wr.write(buf)
+          end while input.read(16384, buf)
+        }
+        in_wr.close
+        self.pid_map = {
+          inp_pid => 'input streamer',
+          cmd_pid => cmd.inspect,
+        }
+      end
+
+      def each(&block)
+        begin
+          rd, = IO.select([err_rd, out_rd])
+          rd && rd.first or next
+
+          if rd.include?(err_rd)
+            begin
+              errors.write(err_rd.read_nonblock(16384))
+            rescue Errno::EINTR
+            rescue Errno::EAGAIN
+              break
+            end while true
+          end
+
+          rd.include?(out_rd) or next
+
+          begin
+            yield out_rd.read_nonblock(16384)
+          rescue Errno::EINTR
+          rescue Errno::EAGAIN
+            break
+          end while true
+        rescue EOFError,Errno::EPIPE,Errno::EBADF,Errno::EINVAL
+          break
+        end while true
+
+        self
+      end
+
+      def close
+        pid_map.each { |pid, str|
+          begin
+            pid, status = Process.waitpid2(pid)
+            status.success? or
+              errors.write("#{str}: #{status.inspect} (PID:#{pid})\n")
+          rescue Errno::ECHILD
+            errors.write("Failed to reap #{str} (PID:#{pid})\n")
+          end
+        }
+        out_rd.close
+        err_rd.close
+      end
+
+    end
+
+    def initialize(*cmd)
+      self.cmd = cmd
+    end
+
+    def call(env)
+      /\A100-continue\z/i =~ env[Unicorn::Const::HTTP_EXPECT] and
+          return [ 100, {} , [] ]
+
+      [ 200, { 'Content-Type' => 'application/octet-stream' },
+       CatBody.new(env, cmd) ]
+    end
+
+  end
+
+end
diff --git a/lib/unicorn/app/old_rails.rb b/lib/unicorn/app/old_rails.rb
index 9b3a3b1..e674d78 100644
--- a/lib/unicorn/app/old_rails.rb
+++ b/lib/unicorn/app/old_rails.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # This code is based on the original Rails handler in Mongrel
 # Copyright (c) 2005 Zed A. Shaw
 # Copyright (c) 2009 Eric Wong
@@ -11,13 +13,15 @@ module Unicorn; module App; end; end
 # Implements a handler that can run Rails.
 class Unicorn::App::OldRails
 
+  autoload :Static, "unicorn/app/old_rails/static"
+
   def call(env)
     cgi = Unicorn::CGIWrapper.new(env)
     begin
       Dispatcher.dispatch(cgi,
           ActionController::CgiRequest::DEFAULT_SESSION_OPTIONS,
           cgi.body)
-    rescue Object => e
+    rescue => e
       err = env['rack.errors']
       err.write("#{e} #{e.message}\n")
       e.backtrace.each { |line| err.write("#{line}\n") }
diff --git a/lib/unicorn/app/old_rails/static.rb b/lib/unicorn/app/old_rails/static.rb
index 17c007c..13a435e 100644
--- a/lib/unicorn/app/old_rails/static.rb
+++ b/lib/unicorn/app/old_rails/static.rb
@@ -1,10 +1,10 @@
+# -*- encoding: binary -*-
+
 # This code is based on the original Rails handler in Mongrel
 # Copyright (c) 2005 Zed A. Shaw
 # Copyright (c) 2009 Eric Wong
 # You can redistribute it and/or modify it under the same terms as Ruby.
 
-require 'rack/file'
-
 # Static file handler for Rails < 2.3.  This handler is only provided
 # as a convenience for developers.  Performance-minded deployments should
 # use nginx (or similar) for serving static files.
@@ -19,42 +19,40 @@ require 'rack/file'
 # This means that if you are using page caching it will actually work
 # with Unicorn and you should see a decent speed boost (but not as
 # fast as if you use a static server like nginx).
-class Unicorn::App::OldRails::Static
-  FILE_METHODS = { 'GET' => true, 'HEAD' => true }.freeze
-  REQUEST_METHOD = 'REQUEST_METHOD'.freeze
-  REQUEST_URI = 'REQUEST_URI'.freeze
-  PATH_INFO = 'PATH_INFO'.freeze
+class Unicorn::App::OldRails::Static < Struct.new(:app, :root, :file_server)
+  FILE_METHODS = { 'GET' => true, 'HEAD' => true }
+
+  # avoid allocating new strings for hash lookups
+  REQUEST_METHOD = 'REQUEST_METHOD'
+  REQUEST_URI = 'REQUEST_URI'
+  PATH_INFO = 'PATH_INFO'
 
   def initialize(app)
-    @app = app
-    @root = "#{::RAILS_ROOT}/public"
-    @file_server = ::Rack::File.new(@root)
+    self.app = app
+    self.root = "#{::RAILS_ROOT}/public"
+    self.file_server = ::Rack::File.new(root)
   end
 
   def call(env)
     # short circuit this ASAP if serving non-file methods
-    FILE_METHODS.include?(env[REQUEST_METHOD]) or return @app.call(env)
+    FILE_METHODS.include?(env[REQUEST_METHOD]) or return app.call(env)
 
     # first try the path as-is
     path_info = env[PATH_INFO].chomp("/")
-    if File.file?("#@root/#{::Rack::Utils.unescape(path_info)}")
+    if File.file?("#{root}/#{::Rack::Utils.unescape(path_info)}")
       # File exists as-is so serve it up
       env[PATH_INFO] = path_info
-      return @file_server.call(env)
+      return file_server.call(env)
     end
 
     # then try the cached version:
+    path_info << ActionController::Base.page_cache_extension
 
-    # grab the semi-colon REST operator used by old versions of Rails
-    # this is the reason we didn't just copy the new Rails::Rack::Static
-    env[REQUEST_URI] =~ /^#{Regexp.escape(path_info)}(;[^\?]+)/
-    path_info << "#$1#{ActionController::Base.page_cache_extension}"
-
-    if File.file?("#@root/#{::Rack::Utils.unescape(path_info)}")
+    if File.file?("#{root}/#{::Rack::Utils.unescape(path_info)}")
       env[PATH_INFO] = path_info
-      return @file_server.call(env)
+      return file_server.call(env)
     end
 
-    @app.call(env) # call OldRails
+    app.call(env) # call OldRails
   end
 end if defined?(Unicorn::App::OldRails)
diff --git a/lib/unicorn/cgi_wrapper.rb b/lib/unicorn/cgi_wrapper.rb
index bc622ea..b6eeb33 100644
--- a/lib/unicorn/cgi_wrapper.rb
+++ b/lib/unicorn/cgi_wrapper.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # This code is based on the original CGIWrapper from Mongrel
 # Copyright (c) 2005 Zed A. Shaw
 # Copyright (c) 2009 Eric Wong
@@ -44,7 +46,7 @@ class Unicorn::CGIWrapper < ::CGI
     'language' => 'Content-Language'.freeze,
     'expires' => 'Expires'.freeze,
     'length' => CONTENT_LENGTH,
-  }.freeze
+  }
 
   # Takes an a Rackable environment, plus any additional CGI.new
   # arguments These are used internally to create a wrapper around the
@@ -57,21 +59,20 @@ class Unicorn::CGIWrapper < ::CGI
     @status = nil
     @head = {}
     @headv = Hash.new { |hash,key| hash[key] = [] }
-    @body = StringIO.new
+    @body = StringIO.new("")
     super(*args)
   end
 
   # finalizes the response in a way Rack applications would expect
   def rack_response
     # @head[CONTENT_LENGTH] ||= @body.size
-    @headv[SET_COOKIE] += @output_cookies if @output_cookies
+    @headv[SET_COOKIE].concat(@output_cookies) if @output_cookies
     @headv.each_pair do |key,value|
       @head[key] ||= value.join("\n") unless value.empty?
     end
 
     # Capitalized "Status:", with human-readable status code (e.g. "200 OK")
-    parseable_status = @head.delete(Status)
-    @status ||= parseable_status.split(/ /)[0].to_i rescue 500
+    @status ||= @head.delete(Status)
 
     [ @status || 500, @head, [ @body.string ] ]
   end
@@ -136,13 +137,8 @@ class Unicorn::CGIWrapper < ::CGI
     @env_table[RACK_INPUT]
   end
 
-  # The stdoutput should be completely bypassed but we'll drop a
-  # warning just in case
+  # return a pointer to the StringIO body since it's STDOUT-like
   def stdoutput
-    err = @env_table[RACK_ERRORS]
-    err.puts "WARNING: Your program is doing something not expected."
-    err.puts "Please tell Eric that stdoutput was used and what software " \
-             "you are running.  Thanks."
     @body
   end
 
diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb
index 860962a..f6d13ab 100644
--- a/lib/unicorn/configurator.rb
+++ b/lib/unicorn/configurator.rb
@@ -1,38 +1,24 @@
+# -*- encoding: binary -*-
+
 require 'socket'
 require 'logger'
 
 module Unicorn
 
-  # Implements a simple DSL for configuring a unicorn server.
+  # Implements a simple DSL for configuring a Unicorn server.
   #
-  # Example (when used with the unicorn config file):
-  #   worker_processes 4
-  #   listen '/tmp/my_app.sock', :backlog => 1
-  #   listen '0.0.0.0:9292'
-  #   timeout 10
-  #   pid "/tmp/my_app.pid"
-  #   after_fork do |server,worker|
-  #     server.listen("127.0.0.1:#{9293 + worker.nr}") rescue nil
-  #   end
-  class Configurator
-    # The default logger writes its output to $stderr
-    DEFAULT_LOGGER = Logger.new($stderr) unless defined?(DEFAULT_LOGGER)
+  # See http://unicorn.bogomips.org/examples/unicorn.conf.rb for an
+  # example config file.  An example config file for use with nginx is
+  # also available at http://unicorn.bogomips.org/examples/nginx.conf
+  class Configurator < Struct.new(:set, :config_file)
 
     # Default settings for Unicorn
     DEFAULTS = {
       :timeout => 60,
-      :listeners => [],
-      :logger => DEFAULT_LOGGER,
+      :logger => Logger.new($stderr),
       :worker_processes => 1,
       :after_fork => lambda { |server, worker|
           server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
-
-          # per-process listener ports for debugging/admin:
-          # "rescue nil" statement is needed because USR2 will
-          # cause the master process to reexecute itself and the
-          # per-worker ports can be taken, necessitating another
-          # HUP after QUIT-ing the original master:
-          # server.listen("127.0.0.1:#{8081 + worker.nr}") rescue nil
         },
       :before_fork => lambda { |server, worker|
           server.logger.info("worker=#{worker.nr} spawning...")
@@ -42,42 +28,44 @@ module Unicorn
         },
       :pid => nil,
       :preload_app => false,
-      :stderr_path => nil,
-      :stdout_path => nil,
     }
 
-    attr_reader :config_file #:nodoc:
-
     def initialize(defaults = {}) #:nodoc:
-      @set = Hash.new(:unset)
+      self.set = Hash.new(:unset)
       use_defaults = defaults.delete(:use_defaults)
-      @config_file = defaults.delete(:config_file)
-      @config_file.freeze
-      @set.merge!(DEFAULTS) if use_defaults
+      self.config_file = defaults.delete(:config_file)
+      set.merge!(DEFAULTS) if use_defaults
       defaults.each { |key, value| self.send(key, value) }
+      Hash === set[:listener_opts] or
+          set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
+      Array === set[:listeners] or set[:listeners] = []
       reload
     end
 
     def reload #:nodoc:
-      instance_eval(File.read(@config_file)) if @config_file
+      instance_eval(File.read(config_file), config_file) if config_file
+
+      # working_directory binds immediately (easier error checking that way),
+      # now ensure any paths we changed are correctly set.
+      [ :pid, :stderr_path, :stdout_path ].each do |var|
+        String === (path = set[var]) or next
+        path = File.expand_path(path)
+        test(?w, path) || test(?w, File.dirname(path)) or \
+              raise ArgumentError, "directory for #{var}=#{path} not writable"
+      end
     end
 
     def commit!(server, options = {}) #:nodoc:
       skip = options[:skip] || []
-      @set.each do |key, value|
-        (Symbol === value && value == :unset) and next
+      set.each do |key, value|
+        value == :unset and next
         skip.include?(key) and next
-        setter = "#{key}="
-        if server.respond_to?(setter)
-          server.send(setter, value)
-        else
-          server.instance_variable_set("@#{key}", value)
-        end
+        server.__send__("#{key}=", value)
       end
     end
 
     def [](key) # :nodoc:
-      @set[key]
+      set[key]
     end
 
     # sets object to the +new+ Logger-like object.  The new logger-like
@@ -89,7 +77,7 @@ module Unicorn
         raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
       end
 
-      @set[:logger] = new
+      set[:logger] = new
     end
 
     # sets after_fork hook to a given block.  This block will be called by
@@ -98,25 +86,18 @@ module Unicorn
     #
     #  after_fork do |server,worker|
     #    # per-process listener ports for debugging/admin:
-    #    # "rescue nil" statement is needed because USR2 will
-    #    # cause the master process to reexecute itself and the
-    #    # per-worker ports can be taken, necessitating another
-    #    # HUP after QUIT-ing the original master:
-    #    server.listen("127.0.0.1:#{9293 + worker.nr}") rescue nil
+    #    addr = "127.0.0.1:#{9293 + worker.nr}"
+    #
+    #    # the negative :tries parameter indicates we will retry forever
+    #    # waiting on the existing process to exit with a 5 second :delay
+    #    # Existing options for Unicorn::Configurator#listen such as
+    #    # :backlog, :rcvbuf, :sndbuf are available here as well.
+    #    server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
     #
     #    # drop permissions to "www-data" in the worker
     #    # generally there's no reason to start Unicorn as a priviledged user
     #    # as it is not recommended to expose Unicorn to public clients.
-    #    uid, gid = Process.euid, Process.egid
-    #    user, group = 'www-data', 'www-data'
-    #    target_uid = Etc.getpwnam(user).uid
-    #    target_gid = Etc.getgrnam(group).gid
-    #    worker.tempfile.chown(target_uid, target_gid)
-    #    if uid != target_uid || gid != target_gid
-    #      Process.initgroups(user, target_gid)
-    #      Process::GID.change_privilege(target_gid)
-    #      Process::UID.change_privilege(target_uid)
-    #    end
+    #    worker.user('www-data', 'www-data') if Process.euid == 0
     #  end
     def after_fork(*args, &block)
       set_hook(:after_fork, block_given? ? block : args[0])
@@ -146,22 +127,43 @@ module Unicorn
     # to the scheduling limitations by the worker process.  Due the
     # low-complexity, low-overhead implementation, timeouts of less
     # than 3.0 seconds can be considered inaccurate and unsafe.
+    #
+    # For running Unicorn behind nginx, it is recommended to set
+    # "fail_timeout=0" for in your nginx configuration like this
+    # to have nginx always retry backends that may have had workers
+    # SIGKILL-ed due to timeouts.
+    #
+    #    # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
+    #    # on nginx upstream configuration:
+    #    upstream unicorn_backend {
+    #      # for UNIX domain socket setups:
+    #      server unix:/path/to/unicorn.sock fail_timeout=0;
+    #
+    #      # for TCP setups
+    #      server 192.168.0.7:8080 fail_timeout=0;
+    #      server 192.168.0.8:8080 fail_timeout=0;
+    #      server 192.168.0.9:8080 fail_timeout=0;
+    #    }
     def timeout(seconds)
       Numeric === seconds or raise ArgumentError,
                                   "not numeric: timeout=#{seconds.inspect}"
       seconds >= 3 or raise ArgumentError,
                                   "too low: timeout=#{seconds.inspect}"
-      @set[:timeout] = seconds
+      set[:timeout] = seconds
     end
 
     # sets the current number of worker_processes to +nr+.  Each worker
-    # process will serve exactly one client at a time.
+    # process will serve exactly one client at a time.  You can
+    # increment or decrement this value at runtime by sending SIGTTIN
+    # or SIGTTOU respectively to the master process without reloading
+    # the rest of your Unicorn configuration.  See the SIGNALS document
+    # for more information.
     def worker_processes(nr)
       Integer === nr or raise ArgumentError,
                              "not an integer: worker_processes=#{nr.inspect}"
       nr >= 0 or raise ArgumentError,
                              "not non-negative: worker_processes=#{nr.inspect}"
-      @set[:worker_processes] = nr
+      set[:worker_processes] = nr
     end
 
     # sets listeners to the given +addresses+, replacing or augmenting the
@@ -172,14 +174,14 @@ module Unicorn
     def listeners(addresses) # :nodoc:
       Array === addresses or addresses = Array(addresses)
       addresses.map! { |addr| expand_addr(addr) }
-      @set[:listeners] = addresses
+      set[:listeners] = addresses
     end
 
     # adds an +address+ to the existing listener set.
     #
     # The following options may be specified (but are generally not needed):
     #
-    # +backlog+: this is the backlog of the listen() syscall.
+    # +:backlog+: this is the backlog of the listen() syscall.
     #
     # Some operating systems allow negative values here to specify the
     # maximum allowable value.  In most cases, this number is only
@@ -194,7 +196,7 @@ module Unicorn
     #
     # Default: 1024
     #
-    # +rcvbuf+, +sndbuf+: maximum send and receive buffer sizes of sockets
+    # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
     #
     # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
     # can be set via the setsockopt(2) syscall.  Some kernels
@@ -208,13 +210,13 @@ module Unicorn
     #
     # Defaults: operating system defaults
     #
-    # +tcp_nodelay+: disables Nagle's algorithm on TCP sockets
+    # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
     #
     # This has no effect on UNIX sockets.
     #
     # Default: operating system defaults (usually Nagle's algorithm enabled)
     #
-    # +tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
+    # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
     #
     # This will prevent partial TCP frames from being sent out.
     # Enabling +tcp_nopush+ is generally not needed or recommended as
@@ -224,12 +226,33 @@ module Unicorn
     #
     # This has no effect on UNIX sockets.
     #
+    # +:tries+: times to retry binding a socket if it is already in use
+    #
+    # A negative number indicates we will retry indefinitely, this is
+    # useful for migrations and upgrades when individual workers
+    # are binding to different ports.
+    #
+    # Default: 5
+    #
+    # +:delay+: seconds to wait between successive +tries+
+    #
+    # Default: 0.5 seconds
+    #
+    # +:umask+: sets the file mode creation mask for UNIX sockets
+    #
+    # Typically UNIX domain sockets are created with more liberal
+    # file permissions than the rest of the application.  By default,
+    # we create UNIX domain sockets to be readable and writable by
+    # all local users to give them the same accessibility as
+    # locally-bound TCP listeners.
+    #
+    # This has no effect on TCP listeners.
+    #
+    # Default: 0 (world read/writable)
     def listen(address, opt = {})
       address = expand_addr(address)
       if String === address
-        Hash === @set[:listener_opts] or
-          @set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
-        [ :backlog, :sndbuf, :rcvbuf ].each do |key|
+        [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
           value = opt[key] or next
           Integer === value or
             raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
@@ -239,11 +262,14 @@ module Unicorn
           TrueClass === value || FalseClass === value or
             raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
         end
-        @set[:listener_opts][address].merge!(opt)
+        unless (value = opt[:delay]).nil?
+          Numeric === value or
+            raise ArgumentError, "not numeric: delay=#{value.inspect}"
+        end
+        set[:listener_opts][address].merge!(opt)
       end
 
-      @set[:listeners] = [] unless Array === @set[:listeners]
-      @set[:listeners] << address
+      set[:listeners] << address
     end
 
     # sets the +path+ for the PID file of the unicorn master process
@@ -265,7 +291,7 @@ module Unicorn
     def preload_app(bool)
       case bool
       when TrueClass, FalseClass
-        @set[:preload_app] = bool
+        set[:preload_app] = bool
       else
         raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
       end
@@ -286,35 +312,21 @@ module Unicorn
       set_path(:stdout_path, path)
     end
 
-    private
-
-    def set_path(var, path) #:nodoc:
-      case path
-      when NilClass
-      when String
-        path = File.expand_path(path)
-        File.writable?(File.dirname(path)) or \
-               raise ArgumentError, "directory for #{var}=#{path} not writable"
-      else
-        raise ArgumentError
+    # sets the working directory for Unicorn.  This ensures USR2 will
+    # start a new instance of Unicorn in this directory.  This may be
+    # a symlink.
+    def working_directory(path)
+      # just let chdir raise errors
+      path = File.expand_path(path)
+      if config_file &&
+         config_file[0] != ?/ &&
+         ! test(?r, "#{path}/#{config_file}")
+        raise ArgumentError,
+              "config_file=#{config_file} would not be accessible in" \
+              " working_directory=#{path}"
       end
-      @set[var] = path
-    end
-
-    def set_hook(var, my_proc, req_arity = 2) #:nodoc:
-      case my_proc
-      when Proc
-        arity = my_proc.arity
-        (arity == req_arity) or \
-          raise ArgumentError,
-                "#{var}=#{my_proc.inspect} has invalid arity: " \
-                "#{arity} (need #{req_arity})"
-      when NilClass
-        my_proc = DEFAULTS[var]
-      else
-        raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
-      end
-      @set[var] = my_proc
+      Dir.chdir(path)
+      HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
     end
 
     # expands "unix:path/to/foo" to a socket relative to the current path
@@ -340,5 +352,32 @@ module Unicorn
       end
     end
 
+  private
+
+    def set_path(var, path) #:nodoc:
+      case path
+      when NilClass, String
+        set[var] = path
+      else
+        raise ArgumentError
+      end
+    end
+
+    def set_hook(var, my_proc, req_arity = 2) #:nodoc:
+      case my_proc
+      when Proc
+        arity = my_proc.arity
+        (arity == req_arity) or \
+          raise ArgumentError,
+                "#{var}=#{my_proc.inspect} has invalid arity: " \
+                "#{arity} (need #{req_arity})"
+      when NilClass
+        my_proc = DEFAULTS[var]
+      else
+        raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
+      end
+      set[var] = my_proc
+    end
+
   end
 end
diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb
index 6730367..1b3a9cd 100644
--- a/lib/unicorn/const.rb
+++ b/lib/unicorn/const.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 module Unicorn
 
   # Frequently used constants when constructing requests or responses.  Many times
@@ -5,11 +7,11 @@ module Unicorn
   # gave about a 3% to 10% performance improvement over using the strings directly.
   # Symbols did not really improve things much compared to constants.
   module Const
-    UNICORN_VERSION="0.8.4".freeze
+    UNICORN_VERSION="0.96.0"
 
-    DEFAULT_HOST = "0.0.0.0".freeze # default TCP listen host address
-    DEFAULT_PORT = "8080".freeze    # default TCP listen port
-    DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}".freeze
+    DEFAULT_HOST = "0.0.0.0" # default TCP listen host address
+    DEFAULT_PORT = 8080      # default TCP listen port
+    DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}"
 
     # The basic max request size we'll try to read.
     CHUNK_SIZE=(16 * 1024)
@@ -22,14 +24,14 @@ module Unicorn
     MAX_BODY=MAX_HEADER
 
     # common errors we'll send back
-    ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n".freeze
-    ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n".freeze
+    ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
+    ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
+    EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
 
     # A frozen format for this is about 15% faster
-    CONTENT_LENGTH="CONTENT_LENGTH".freeze
     REMOTE_ADDR="REMOTE_ADDR".freeze
-    HTTP_X_FORWARDED_FOR="HTTP_X_FORWARDED_FOR".freeze
     RACK_INPUT="rack.input".freeze
+    HTTP_EXPECT="HTTP_EXPECT"
   end
 
 end
diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb
index d7078a3..99c11c2 100644
--- a/lib/unicorn/http_request.rb
+++ b/lib/unicorn/http_request.rb
@@ -1,46 +1,32 @@
-require 'tempfile'
-require 'stringio'
+# -*- encoding: binary -*-
 
-# compiled extension
-require 'unicorn/http11'
+require 'stringio'
+require 'unicorn_http'
 
 module Unicorn
-  #
-  # The HttpRequest.initialize method will convert any request that is larger than
-  # Const::MAX_BODY into a Tempfile and use that as the body.  Otherwise it uses
-  # a StringIO object.  To be safe, you should assume it works like a file.
-  #
   class HttpRequest
 
-    attr_accessor :logger
-
     # default parameters we merge into the request env for Rack handlers
     DEFAULTS = {
       "rack.errors" => $stderr,
       "rack.multiprocess" => true,
       "rack.multithread" => false,
       "rack.run_once" => false,
-      "rack.version" => [1, 0].freeze,
-      "SCRIPT_NAME" => "".freeze,
+      "rack.version" => [1, 1],
+      "SCRIPT_NAME" => "",
 
       # this is not in the Rack spec, but some apps may rely on it
-      "SERVER_SOFTWARE" => "Unicorn #{Const::UNICORN_VERSION}".freeze
+      "SERVER_SOFTWARE" => "Unicorn #{Const::UNICORN_VERSION}"
     }
 
-    # Optimize for the common case where there's no request body
-    # (GET/HEAD) requests.
-    NULL_IO = StringIO.new
-    LOCALHOST = '127.0.0.1'.freeze
+    NULL_IO = StringIO.new("")
+    LOCALHOST = '127.0.0.1'
 
     # Being explicitly single-threaded, we have certain advantages in
     # not having to worry about variables being clobbered :)
-    BUFFER = ' ' * Const::CHUNK_SIZE # initial size, may grow
+    BUF = ' ' * Const::CHUNK_SIZE # initial size, may grow
     PARSER = HttpParser.new
-    PARAMS = Hash.new
-
-    def initialize(logger = Configurator::DEFAULT_LOGGER)
-      @logger = logger
-    end
+    REQ = {}
 
     # Does the majority of the IO processing.  It has been written in
     # Ruby using about 8 different IO processing strategies.
@@ -56,12 +42,7 @@ module Unicorn
     # This does minimal exception trapping and it is up to the caller
     # to handle any socket errors (e.g. user aborted upload).
     def read(socket)
-      # reset the parser
-      unless NULL_IO == (input = PARAMS[Const::RACK_INPUT]) # unlikely
-        input.close rescue nil
-        input.close! rescue nil
-      end
-      PARAMS.clear
+      REQ.clear
       PARSER.reset
 
       # From http://www.ietf.org/rfc/rfc3875:
@@ -71,83 +52,20 @@ module Unicorn
       #  identify the client for the immediate request to the server;
       #  that client may be a proxy, gateway, or other intermediary
       #  acting on behalf of the actual source client."
-      PARAMS[Const::REMOTE_ADDR] =
+      REQ[Const::REMOTE_ADDR] =
                     TCPSocket === socket ? socket.peeraddr.last : LOCALHOST
 
       # short circuit the common case with small GET requests first
-      PARSER.execute(PARAMS, socket.readpartial(Const::CHUNK_SIZE, BUFFER)) and
-          return handle_body(socket)
-
-      data = BUFFER.dup # socket.readpartial will clobber BUFFER
-
-      # Parser is not done, queue up more data to read and continue parsing
-      # an Exception thrown from the PARSER will throw us out of the loop
-      begin
-        data << socket.readpartial(Const::CHUNK_SIZE, BUFFER)
-        PARSER.execute(PARAMS, data) and return handle_body(socket)
-      end while true
-      rescue HttpParserError => e
-        @logger.error "HTTP parse error, malformed request " \
-                      "(#{PARAMS[Const::HTTP_X_FORWARDED_FOR] ||
-                          PARAMS[Const::REMOTE_ADDR]}): #{e.inspect}"
-        @logger.error "REQUEST DATA: #{data.inspect}\n---\n" \
-                      "PARAMS: #{PARAMS.inspect}\n---\n"
-        raise e
-    end
-
-    private
-
-    # Handles dealing with the rest of the request
-    # returns a Rack environment if successful, raises an exception if not
-    def handle_body(socket)
-      http_body = PARAMS.delete(:http_body)
-      content_length = PARAMS[Const::CONTENT_LENGTH].to_i
-
-      if content_length == 0 # short circuit the common case
-        PARAMS[Const::RACK_INPUT] = NULL_IO.closed? ? NULL_IO.reopen : NULL_IO
-        return PARAMS.update(DEFAULTS)
+      if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil?
+        # Parser is not done, queue up more data to read and continue parsing
+        # an Exception thrown from the PARSER will throw us out of the loop
+        begin
+          BUF << socket.readpartial(Const::CHUNK_SIZE)
+        end while PARSER.headers(REQ, BUF).nil?
       end
-
-      # must read more data to complete body
-      remain = content_length - http_body.length
-
-      body = PARAMS[Const::RACK_INPUT] = (remain < Const::MAX_BODY) ?
-          StringIO.new : Tempfile.new('unicorn')
-
-      body.binmode
-      body.write(http_body)
-
-      # Some clients (like FF1.0) report 0 for body and then send a body.
-      # This will probably truncate them but at least the request goes through
-      # usually.
-      read_body(socket, remain, body) if remain > 0
-      body.rewind
-
-      # in case read_body overread because the client tried to pipeline
-      # another request, we'll truncate it.  Again, we don't do pipelining
-      # or keepalive
-      body.truncate(content_length)
-      PARAMS.update(DEFAULTS)
-    end
-
-    # Does the heavy lifting of properly reading the larger body
-    # requests in small chunks.  It expects PARAMS['rack.input'] to be
-    # an IO object, socket to be valid, It also expects any initial part
-    # of the body that has been read to be in the PARAMS['rack.input']
-    # already.  It will return true if successful and false if not.
-    def read_body(socket, remain, body)
-      begin
-        # write always writes the requested amount on a POSIX filesystem
-        remain -= body.write(socket.readpartial(Const::CHUNK_SIZE, BUFFER))
-      end while remain > 0
-    rescue Object => e
-      @logger.error "Error reading HTTP body: #{e.inspect}"
-
-      # Any errors means we should delete the file, including if the file
-      # is dumped.  Truncate it ASAP to help avoid page flushes to disk.
-      body.truncate(0) rescue nil
-      reset
-      raise e
+      REQ[Const::RACK_INPUT] = 0 == PARSER.content_length ?
+                   NULL_IO : Unicorn::TeeInput.new(socket, REQ, PARSER, BUF)
+      REQ.update(DEFAULTS)
     end
 
   end
diff --git a/lib/unicorn/http_response.rb b/lib/unicorn/http_response.rb
index 3bf9347..96e484b 100644
--- a/lib/unicorn/http_response.rb
+++ b/lib/unicorn/http_response.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'time'
 
 module Unicorn
@@ -30,40 +32,43 @@ module Unicorn
     # Rack does not set/require a Date: header.  We always override the
     # Connection: and Date: headers no matter what (if anything) our
     # Rack application sent us.
-    SKIP = { 'connection' => true, 'date' => true, 'status' => true }.freeze
-    EMPTY = ''.freeze # :nodoc
-    OUT = [] # :nodoc
+    SKIP = { 'connection' => true, 'date' => true, 'status' => true }
 
     # writes the rack_response to socket as an HTTP response
-    def self.write(socket, rack_response)
+    def self.write(socket, rack_response, have_header = true)
       status, headers, body = rack_response
-      status = CODES[status.to_i] || status
-      OUT.clear
 
-      # Don't bother enforcing duplicate supression, it's a Hash most of
-      # the time anyways so just hope our app knows what it's doing
-      headers.each do |key, value|
-        next if SKIP.include?(key.downcase)
-        if value =~ /\n/
-          value.split(/\n/).each { |v| OUT << "#{key}: #{v}\r\n" }
-        else
-          OUT << "#{key}: #{value}\r\n"
+      if have_header
+        status = CODES[status.to_i] || status
+        out = []
+
+        # Don't bother enforcing duplicate supression, it's a Hash most of
+        # the time anyways so just hope our app knows what it's doing
+        headers.each do |key, value|
+          next if SKIP.include?(key.downcase)
+          if value =~ /\n/
+            # avoiding blank, key-only cookies with /\n+/
+            out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
+          else
+            out << "#{key}: #{value}\r\n"
+          end
         end
+
+        # Rack should enforce Content-Length or chunked transfer encoding,
+        # so don't worry or care about them.
+        # Date is required by HTTP/1.1 as long as our clock can be trusted.
+        # Some broken clients require a "Status" header so we accomodate them
+        socket.write("HTTP/1.1 #{status}\r\n" \
+                     "Date: #{Time.now.httpdate}\r\n" \
+                     "Status: #{status}\r\n" \
+                     "Connection: close\r\n" \
+                     "#{out.join('')}\r\n")
       end
 
-      # Rack should enforce Content-Length or chunked transfer encoding,
-      # so don't worry or care about them.
-      # Date is required by HTTP/1.1 as long as our clock can be trusted.
-      # Some broken clients require a "Status" header so we accomodate them
-      socket.write("HTTP/1.1 #{status}\r\n" \
-                   "Date: #{Time.now.httpdate}\r\n" \
-                   "Status: #{status}\r\n" \
-                   "Connection: close\r\n" \
-                   "#{OUT.join(EMPTY)}\r\n")
       body.each { |chunk| socket.write(chunk) }
       socket.close # flushes and uncorks the socket immediately
       ensure
-        body.respond_to?(:close) and body.close rescue nil
+        body.respond_to?(:close) and body.close
     end
 
   end
diff --git a/lib/unicorn/launcher.rb b/lib/unicorn/launcher.rb
index 8c96059..e71f93b 100644
--- a/lib/unicorn/launcher.rb
+++ b/lib/unicorn/launcher.rb
@@ -1,4 +1,10 @@
-$stdin.sync = $stdout.sync = $stderr.sync = true
+# -*- encoding: binary -*-
+
+$stdout.sync = $stderr.sync = true
+$stdin.binmode
+$stdout.binmode
+$stderr.binmode
+
 require 'unicorn'
 
 class Unicorn::Launcher
@@ -13,21 +19,47 @@ class Unicorn::Launcher
   #     the directory it was started in when being re-executed
   #     to pickup code changes if the original deployment directory
   #     is a symlink or otherwise got replaced.
-  def self.daemonize!
+  def self.daemonize!(options = nil)
     $stdin.reopen("/dev/null")
 
     # We only start a new process group if we're not being reexecuted
     # and inheriting file descriptors from our parent
     unless ENV['UNICORN_FD']
-      exit if fork
-      Process.setsid
-      exit if fork
+      if options
+        # grandparent - reads pipe, exits when master is ready
+        #  \_ parent  - exits immediately ASAP
+        #      \_ unicorn master - writes to pipe when ready
+
+        rd, wr = IO.pipe
+        grandparent = $$
+        if fork
+          wr.close # grandparent does not write
+        else
+          rd.close # unicorn master does not read
+          Process.setsid
+          exit if fork # parent dies now
+        end
 
+        if grandparent == $$
+          # this will block until HttpServer#join runs (or it dies)
+          master_pid = (rd.readpartial(16) rescue nil).to_i
+          unless master_pid > 1
+            warn "master failed to start, check stderr log for details"
+            exit!(1)
+          end
+          exit 0
+        else # unicorn master process
+          options[:ready_pipe] = wr
+        end
+      else # backwards compat
+        exit if fork
+        Process.setsid
+        exit if fork
+      end
       # $stderr/$stderr can/will be redirected separately in the Unicorn config
-      $stdout.reopen("/dev/null", "a")
-      $stderr.reopen("/dev/null", "a")
+      Unicorn::Configurator::DEFAULTS[:stderr_path] = "/dev/null"
+      Unicorn::Configurator::DEFAULTS[:stdout_path] = "/dev/null"
     end
-    $stdin.sync = $stdout.sync = $stderr.sync = true
   end
 
 end
diff --git a/lib/unicorn/socket_helper.rb b/lib/unicorn/socket_helper.rb
index f8e3725..09085e5 100644
--- a/lib/unicorn/socket_helper.rb
+++ b/lib/unicorn/socket_helper.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'socket'
 
 module Unicorn
@@ -9,19 +11,24 @@ module Unicorn
     when /linux/
       # from /usr/include/linux/tcp.h
       TCP_DEFER_ACCEPT = 9 unless defined?(TCP_DEFER_ACCEPT)
+
+      # do not send out partial frames (Linux)
       TCP_CORK = 3 unless defined?(TCP_CORK)
     when /freebsd(([1-4]\..{1,2})|5\.[0-4])/
       # Do nothing for httpready, just closing a bug when freebsd <= 5.4
-      TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH)
+      TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH) # :nodoc:
     when /freebsd/
+      # do not send out partial frames (FreeBSD)
       TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH)
+
       # Use the HTTP accept filter if available.
       # The struct made by pack() is defined in /usr/include/sys/socket.h
       # as accept_filter_arg
-      # We won't be seupportin the "dataready" filter unlike nginx
-      # since we only support HTTP and no other protocols
       unless `/sbin/sysctl -nq net.inet.accf.http`.empty?
-        HTTPREADY = ['httpready', nil].pack('a16a240').freeze
+        # set set the "httpready" accept filter in FreeBSD if available
+        # if other protocols are to be supported, this may be
+        # String#replace-d with "dataready" arguments instead
+        FILTER_ARG = ['httpready', nil].pack('a16a240')
       end
     end
 
@@ -29,23 +36,23 @@ module Unicorn
 
       # highly portable, but off by default because we don't do keepalive
       if defined?(TCP_NODELAY) && ! (val = opt[:tcp_nodelay]).nil?
-        sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, val ? 1 : 0) rescue nil
+        sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, val ? 1 : 0)
       end
 
       unless (val = opt[:tcp_nopush]).nil?
         val = val ? 1 : 0
         if defined?(TCP_CORK) # Linux
-          sock.setsockopt(IPPROTO_TCP, TCP_CORK, val) rescue nil
+          sock.setsockopt(IPPROTO_TCP, TCP_CORK, val)
         elsif defined?(TCP_NOPUSH) # TCP_NOPUSH is untested (FreeBSD)
-          sock.setsockopt(IPPROTO_TCP, TCP_NOPUSH, val) rescue nil
+          sock.setsockopt(IPPROTO_TCP, TCP_NOPUSH, val)
         end
       end
 
       # No good reason to ever have deferred accepts off
       if defined?(TCP_DEFER_ACCEPT)
-        sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, 1) rescue nil
-      elsif defined?(SO_ACCEPTFILTER) && defined?(HTTPREADY)
-        sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, HTTPREADY) rescue nil
+        sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, 1)
+      elsif defined?(SO_ACCEPTFILTER) && defined?(FILTER_ARG)
+        sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, FILTER_ARG)
       end
     end
 
@@ -61,6 +68,11 @@ module Unicorn
         log_buffer_sizes(sock, " after: ")
       end
       sock.listen(opt[:backlog] || 1024)
+      rescue => e
+        if respond_to?(:logger)
+          logger.error "error setting socket options: #{e.inspect}"
+          logger.error e.backtrace.join("\n")
+        end
     end
 
     def log_buffer_sizes(sock, pfx = '')
@@ -76,7 +88,7 @@ module Unicorn
     def bind_listen(address = '0.0.0.0:8080', opt = {})
       return address unless String === address
 
-      sock = if address[0..0] == "/"
+      sock = if address[0] == ?/
         if File.exist?(address)
           if File.socket?(address)
             if self.respond_to?(:logger)
@@ -88,7 +100,7 @@ module Unicorn
                   "socket=#{address} specified but it is not a socket!"
           end
         end
-        old_umask = File.umask(0)
+        old_umask = File.umask(opt[:umask] || 0)
         begin
           UNIXServer.new(address)
         ensure
diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb
new file mode 100644
index 0000000..bb86c40
--- /dev/null
+++ b/lib/unicorn/tee_input.rb
@@ -0,0 +1,217 @@
+# -*- encoding: binary -*-
+
+module Unicorn
+
+  # acts like tee(1) on an input input to provide a input-like stream
+  # while providing rewindable semantics through a File/StringIO backing
+  # store.  On the first pass, the input is only read on demand so your
+  # Rack application can use input notification (upload progress and
+  # like).  This should fully conform to the Rack::Lint::InputWrapper
+  # specification on the public API.  This class is intended to be a
+  # strict interpretation of Rack::Lint::InputWrapper functionality and
+  # will not support any deviations from it.
+  #
+  # When processing uploads, Unicorn exposes a TeeInput object under
+  # "rack.input" of the Rack environment.
+  class TeeInput < Struct.new(:socket, :req, :parser, :buf)
+
+    # Initializes a new TeeInput object.  You normally do not have to call
+    # this unless you are writing an HTTP server.
+    def initialize(*args)
+      super(*args)
+      @size = parser.content_length
+      @tmp = @size && @size < Const::MAX_BODY ? StringIO.new("") : Util.tmpio
+      @buf2 = buf.dup
+      if buf.size > 0
+        parser.filter_body(@buf2, buf) and finalize_input
+        @tmp.write(@buf2)
+        @tmp.seek(0)
+      end
+    end
+
+    # :call-seq:
+    #   ios.size  => Integer
+    #
+    # Returns the size of the input.  For requests with a Content-Length
+    # header value, this will not read data off the socket and just return
+    # the value of the Content-Length header as an Integer.
+    #
+    # For Transfer-Encoding:chunked requests, this requires consuming
+    # all of the input stream before returning since there's no other
+    # way to determine the size of the request body beforehand.
+    def size
+      @size and return @size
+
+      if socket
+        pos = @tmp.pos
+        while tee(Const::CHUNK_SIZE, @buf2)
+        end
+        @tmp.seek(pos)
+      end
+
+      @size = @tmp.size
+    end
+
+    # :call-seq:
+    #   ios.read([length [, buffer ]]) => string, buffer, or nil
+    #
+    # Reads at most length bytes from the I/O stream, or to the end of
+    # file if length is omitted or is nil. length must be a non-negative
+    # integer or nil. If the optional buffer argument is present, it
+    # must reference a String, which will receive the data.
+    #
+    # At end of file, it returns nil or "" depend on length.
+    # ios.read() and ios.read(nil) returns "".
+    # ios.read(length [, buffer]) returns nil.
+    #
+    # If the Content-Length of the HTTP request is known (as is the common
+    # case for POST requests), then ios.read(length [, buffer]) will block
+    # until the specified length is read (or it is the last chunk).
+    # Otherwise, for uncommon "Transfer-Encoding: chunked" requests,
+    # ios.read(length [, buffer]) will return immediately if there is
+    # any data and only block when nothing is available (providing
+    # IO#readpartial semantics).
+    def read(*args)
+      socket or return @tmp.read(*args)
+
+      length = args.shift
+      if nil == length
+        rv = @tmp.read || ""
+        while tee(Const::CHUNK_SIZE, @buf2)
+          rv << @buf2
+        end
+        rv
+      else
+        rv = args.shift || @buf2.dup
+        diff = @tmp.size - @tmp.pos
+        if 0 == diff
+          ensure_length(tee(length, rv), length)
+        else
+          ensure_length(@tmp.read(diff > length ? length : diff, rv), length)
+        end
+      end
+    end
+
+    # :call-seq:
+    #   ios.gets   => string or nil
+    #
+    # Reads the next ``line'' from the I/O stream; lines are separated
+    # by the global record separator ($/, typically "\n"). A global
+    # record separator of nil reads the entire unread contents of ios.
+    # Returns nil if called at the end of file.
+    # This takes zero arguments for strict Rack::Lint compatibility,
+    # unlike IO#gets.
+    def gets
+      socket or return @tmp.gets
+      nil == $/ and return read
+
+      orig_size = @tmp.size
+      if @tmp.pos == orig_size
+        tee(Const::CHUNK_SIZE, @buf2) or return nil
+        @tmp.seek(orig_size)
+      end
+
+      line = @tmp.gets # cannot be nil here since size > pos
+      $/ == line[-$/.size, $/.size] and return line
+
+      # unlikely, if we got here, then @tmp is at EOF
+      begin
+        orig_size = @tmp.pos
+        tee(Const::CHUNK_SIZE, @buf2) or break
+        @tmp.seek(orig_size)
+        line << @tmp.gets
+        $/ == line[-$/.size, $/.size] and return line
+        # @tmp is at EOF again here, retry the loop
+      end while true
+
+      line
+    end
+
+    # :call-seq:
+    #   ios.each { |line| block }  => ios
+    #
+    # Executes the block for every ``line'' in *ios*, where lines are
+    # separated by the global record separator ($/, typically "\n").
+    def each(&block)
+      while line = gets
+        yield line
+      end
+
+      self # Rack does not specify what the return value is here
+    end
+
+    # :call-seq:
+    #   ios.rewind    => 0
+    #
+    # Positions the *ios* pointer to the beginning of input, returns
+    # the offset (zero) of the +ios+ pointer.  Subsequent reads will
+    # start from the beginning of the previously-buffered input.
+    def rewind
+      @tmp.rewind # Rack does not specify what the return value is here
+    end
+
+  private
+
+    def client_error(e)
+      case e
+      when EOFError
+        # in case client only did a premature shutdown(SHUT_WR)
+        # we do support clients that shutdown(SHUT_WR) after the
+        # _entire_ request has been sent, and those will not have
+        # raised EOFError on us.
+        socket.close if socket
+        raise ClientShutdown, "bytes_read=#{@tmp.size}", []
+      when HttpParserError
+        e.set_backtrace([])
+      end
+      raise e
+    end
+
+    # tees off a +length+ chunk of data from the input into the IO
+    # backing store as well as returning it.  +dst+ must be specified.
+    # returns nil if reading from the input returns nil
+    def tee(length, dst)
+      unless parser.body_eof?
+        if parser.filter_body(dst, socket.readpartial(length, buf)).nil?
+          @tmp.write(dst)
+          @tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
+          return dst
+        end
+      end
+      finalize_input
+      rescue => e
+        client_error(e)
+    end
+
+    def finalize_input
+      while parser.trailers(req, buf).nil?
+        # Don't worry about raising ClientShutdown here on EOFError, tee()
+        # will catch EOFError when app is processing it, otherwise in
+        # initialize we never get any chance to enter the app so the
+        # EOFError will just get trapped by Unicorn and not the Rack app
+        buf << socket.readpartial(Const::CHUNK_SIZE)
+      end
+      self.socket = nil
+    end
+
+    # tee()s into +dst+ until it is of +length+ bytes (or until
+    # we've reached the Content-Length of the request body).
+    # Returns +dst+ (the exact object, not a duplicate)
+    # To continue supporting applications that need near-real-time
+    # streaming input bodies, this is a no-op for
+    # "Transfer-Encoding: chunked" requests.
+    def ensure_length(dst, length)
+      # @size is nil for chunked bodies, so we can't ensure length for those
+      # since they could be streaming bidirectionally and we don't want to
+      # block the caller in that case.
+      return dst if dst.nil? || @size.nil?
+
+      while dst.size < length && tee(length - dst.size, @buf2)
+        dst << @buf2
+      end
+
+      dst
+    end
+
+  end
+end
diff --git a/lib/unicorn/util.rb b/lib/unicorn/util.rb
index d2214b7..3951596 100644
--- a/lib/unicorn/util.rb
+++ b/lib/unicorn/util.rb
@@ -1,30 +1,55 @@
+# -*- encoding: binary -*-
+
 require 'fcntl'
 require 'tmpdir'
 
 module Unicorn
+
+  class TmpIO < ::File
+
+    # for easier env["rack.input"] compatibility
+    def size
+      # flush if sync
+      stat.size
+    end
+  end
+
   class Util
     class << self
 
-      APPEND_FLAGS = File::WRONLY | File::APPEND
+      def is_log?(fp)
+        append_flags = File::WRONLY | File::APPEND
+
+        ! fp.closed? &&
+          fp.sync &&
+          fp.path[0] == ?/ &&
+          (fp.fcntl(Fcntl::F_GETFL) & append_flags) == append_flags
+      end
 
-      # this reopens logs that have been rotated (using logrotate(8) or
-      # similar).  It is recommended that you install
+      def chown_logs(uid, gid)
+        ObjectSpace.each_object(File) do |fp|
+          is_log?(fp) or next
+          fp.chown(uid, gid)
+        end
+      end
+
+      # This reopens ALL logfiles in the process that have been rotated
+      # using logrotate(8) (without copytruncate) or similar tools.
       # A +File+ object is considered for reopening if it is:
       #   1) opened with the O_APPEND and O_WRONLY flags
       #   2) opened with an absolute path (starts with "/")
       #   3) the current open file handle does not match its original open path
-      #   4) unbuffered (as far as userspace buffering goes)
+      #   4) unbuffered (as far as userspace buffering goes, not O_SYNC)
       # Returns the number of files reopened
       def reopen_logs
         nr = 0
-        ObjectSpace.each_object(File) do |fp|
-          next if fp.closed?
-          next unless (fp.sync && fp.path[0..0] == "/")
-          next unless (fp.fcntl(Fcntl::F_GETFL) & APPEND_FLAGS) == APPEND_FLAGS
 
+        ObjectSpace.each_object(File) do |fp|
+          is_log?(fp) or next
+          orig_st = fp.stat
           begin
-            a, b = fp.stat, File.stat(fp.path)
-            next if a.ino == b.ino && a.dev == b.dev
+            b = File.stat(fp.path)
+            next if orig_st.ino == b.ino && orig_st.dev == b.dev
           rescue Errno::ENOENT
           end
 
@@ -35,6 +60,10 @@ module Unicorn
           end
           fp.reopen(fp.path, open_arg)
           fp.sync = true
+          new_st = fp.stat
+          if orig_st.uid != new_st.uid || orig_st.gid != new_st.gid
+            fp.chown(orig_st.uid, orig_st.gid)
+          end
           nr += 1
         end # each_object
         nr
@@ -45,8 +74,8 @@ module Unicorn
       # buffering is disabled
       def tmpio
         fp = begin
-          File.open("#{Dir::tmpdir}/#{rand}",
-                    File::RDWR|File::CREAT|File::EXCL, 0600)
+          TmpIO.open("#{Dir::tmpdir}/#{rand}",
+                     File::RDWR|File::CREAT|File::EXCL, 0600)
         rescue Errno::EEXIST
           retry
         end
diff --git a/local.mk.sample b/local.mk.sample
index 84bcf44..5019576 100644
--- a/local.mk.sample
+++ b/local.mk.sample
@@ -5,40 +5,58 @@
 # This is depends on a bunch of GNU-isms from bash, sed, touch.
 
 DLEXT := so
-rack_ver := 1.0.0
+gems := rack-1.1.0
 
 # Avoid loading rubygems to speed up tests because gmake is
 # fork+exec heavy with Ruby.
+prefix = $(HOME)
 ifeq ($(r19),)
-  ruby := $(HOME)/bin/ruby
-  RUBYLIB := $(HOME)/lib/ruby/gems/1.8/gems/rack-$(rack_ver)/lib
+  RUBY := $(prefix)/bin/ruby
+  gem_paths := $(addprefix $(prefix)/lib/ruby/gems/1.8/gems/,$(gems))
 else
-  export PATH := $(HOME)/ruby-1.9/bin:$(PATH)
-  ruby := $(HOME)/ruby-1.9/bin/ruby --disable-gems
-  RUBYLIB := $(HOME)/ruby-1.9/lib/ruby/gems/1.9.1/gems/rack-$(rack_ver)/lib
+  prefix := $(prefix)/ruby-1.9
+  export PATH := $(prefix)/bin:$(PATH)
+  RUBY := $(prefix)/bin/ruby --disable-gems
+  gem_paths := $(addprefix $(prefix)/lib/ruby/gems/1.9.1/gems/,$(gems))
 endif
 
-# pipefail is THE reason to use bash (v3+)
-SHELL := /bin/bash -e -o pipefail
+ifdef gem_paths
+  sp :=
+  sp +=
+  export RUBYLIB := $(subst $(sp),:,$(addsuffix /lib,$(gem_paths)))
+endif
+
+# pipefail is THE reason to use bash (v3+) or never revisions of ksh93
+# SHELL := /bin/bash -e -o pipefail
+SHELL := /bin/ksh93 -e -o pipefail
 
 full-test: test-18 test-19
 test-18:
-        $(MAKE) test test-rails 2>&1 | sed -u -e 's!^!1.8 !'
+        $(MAKE) test test-rails 2>&1 | sed -e 's!^!1.8 !'
 test-19:
-        $(MAKE) test test-rails r19=1 2>&1 | sed -u -e 's!^!1.9 !'
+        $(MAKE) test test-rails r19=1 2>&1 | sed -e 's!^!1.9 !'
+
+latest: NEWS
+        @awk 'BEGIN{RS="=== ";ORS=""}NR==2{sub(/\n$$/,"");print RS""$$0 }' < $<
 
 # publishes docs to http://unicorn.bogomips.org
 publish_doc:
         -git set-file-times
-        $(MAKE) doc
+        $(RM) -r doc ChangeLog NEWS
+        $(MAKE) doc LOG_VERSION=$(shell git tag -l | tail -1)
+        $(MAKE) -s latest > doc/LATEST
+        find doc/images doc/js -type f | \
+                TZ=UTC xargs touch -d '1970-01-01 00:00:00' doc/rdoc.css
         $(MAKE) doc_gz
-        rsync -av --delete doc/ dcvr:/srv/unicorn/
+        tar cf - $$(git ls-files examples/) | (cd doc && tar xf -)
+        chmod 644 $$(find doc -type f)
+        rsync -av doc/ dcvr:/srv/unicorn/
         git ls-files | xargs touch
 
 # Create gzip variants of the same timestamp as the original so nginx
 # "gzip_static on" can serve the gzipped versions directly.
-doc_gz: suf := html js css
-doc_gz: globs := $(addprefix doc/*.,$(suf)) $(addprefix doc/*/*.,$(suf))
-doc_gz: docs := $(wildcard $(globs))
+doc_gz: docs = $(shell find doc -type f ! -regex '^.*\.\(gif\|jpg\|png\|gz\)$$')
 doc_gz:
-        for i in $(docs); do gzip < $$i > $$i.gz; touch -r $$i $$i.gz; done
+        touch doc/NEWS.atom.xml -d "$$(awk 'NR==1{print $$4,$$5,$$6}' NEWS)"
+        for i in $(docs); do \
+          gzip --rsyncable -9 < $$i > $$i.gz; touch -r $$i $$i.gz; done
diff --git a/setup.rb b/setup.rb
index 424a5f3..cf1abd9 100644
--- a/setup.rb
+++ b/setup.rb
@@ -1,3 +1,4 @@
+# -*- encoding: binary -*-
 #
 # setup.rb
 #
diff --git a/test/aggregate.rb b/test/aggregate.rb
index 1c2cc5c..5eebbe5 100755
--- a/test/aggregate.rb
+++ b/test/aggregate.rb
@@ -1,4 +1,6 @@
 #!/usr/bin/ruby -n
+# -*- encoding: binary -*-
+
 BEGIN { $tests = $assertions = $failures = $errors = 0 }
 
 $_ =~ /(\d+) tests, (\d+) assertions, (\d+) failures, (\d+) errors/ or next
diff --git a/test/benchmark/README b/test/benchmark/README
index b63b8a3..1d3cdd0 100644
--- a/test/benchmark/README
+++ b/test/benchmark/README
@@ -42,11 +42,6 @@ The benchmark client is usually httperf.
 Another gentle reminder: performance with slow networks/clients
 is NOT our problem.  That is the job of nginx (or similar).
 
-== request.rb, response.rb, big_request.rb
-
-These are micro-benchmarks designed to test internal components
-of Unicorn.  It assumes the internal Unicorn API is mostly stable.
-
 == Contributors
 
 This directory is maintained independently in the "benchmark" branch
diff --git a/test/benchmark/big_request.rb b/test/benchmark/big_request.rb
deleted file mode 100644
index a250c62..0000000
--- a/test/benchmark/big_request.rb
+++ /dev/null
@@ -1,44 +0,0 @@
-require 'benchmark'
-require 'tempfile'
-require 'unicorn'
-nr = ENV['nr'] ? ENV['nr'].to_i : 100
-bs = ENV['bs'] ? ENV['bs'].to_i : (1024 * 1024)
-count = ENV['count'] ? ENV['count'].to_i : 4
-length = bs * count
-slice = (' ' * bs).freeze
-
-big = Tempfile.new('')
-
-def big.unicorn_peeraddr # old versions of Unicorn used this
-  '127.0.0.1'
-end
-
-big.syswrite(
-"PUT /hello/world/puturl?abcd=efg&hi#anchor HTTP/1.0\r\n" \
-"Host: localhost\r\n" \
-"Accept: */*\r\n" \
-"Content-Length: #{length}\r\n" \
-"User-Agent: test-user-agent 0.1.0 (Mozilla compatible) 5.0 asdfadfasda\r\n" \
-"\r\n")
-count.times { big.syswrite(slice) }
-big.sysseek(0)
-big.fsync
-
-include Unicorn
-request = HttpRequest.new(Logger.new($stderr))
-unless request.respond_to?(:reset)
-  def request.reset
-    # no-op
-  end
-end
-
-Benchmark.bmbm do |x|
-  x.report("big") do
-    for i in 1..nr
-      request.read(big)
-      request.reset
-      big.sysseek(0)
-    end
-  end
-end
-
diff --git a/test/benchmark/request.rb b/test/benchmark/request.rb
deleted file mode 100644
index fc7822c..0000000
--- a/test/benchmark/request.rb
+++ /dev/null
@@ -1,56 +0,0 @@
-require 'benchmark'
-require 'unicorn'
-nr = ENV['nr'] ? ENV['nr'].to_i : 100000
-
-class TestClient
-  def initialize(response)
-    @response = (response.join("\r\n") << "\r\n\r\n").freeze
-  end
-  def sysread(len, buf)
-    buf.replace(@response)
-  end
-
-  alias readpartial sysread
-
-  # old versions of Unicorn used this
-  def unicorn_peeraddr
-    '127.0.0.1'
-  end
-end
-
-small = TestClient.new([
-  'GET / HTTP/1.0',
-  'Host: localhost',
-  'Accept: */*',
-  'User-Agent: test-user-agent 0.1.0'
-])
-
-medium = TestClient.new([
-  'GET /hello/world/geturl?abcd=efg&hi#anchor HTTP/1.0',
-  'Host: localhost',
-  'Accept: */*',
-  'User-Agent: test-user-agent 0.1.0 (Mozilla compatible) 5.0 asdfadfasda'
-])
-
-include Unicorn
-request = HttpRequest.new(Logger.new($stderr))
-unless request.respond_to?(:reset)
-  def request.reset
-    # no-op
-  end
-end
-
-Benchmark.bmbm do |x|
-  x.report("small") do
-    for i in 1..nr
-      request.read(small)
-      request.reset
-    end
-  end
-  x.report("medium") do
-    for i in 1..nr
-      request.read(medium)
-      request.reset
-    end
-  end
-end
diff --git a/test/benchmark/response.rb b/test/benchmark/response.rb
deleted file mode 100644
index cb7397b..0000000
--- a/test/benchmark/response.rb
+++ /dev/null
@@ -1,30 +0,0 @@
-require 'benchmark'
-require 'unicorn'
-
-class NullWriter
-  def syswrite(buf); buf.size; end
-  alias write syswrite
-  def close; end
-end
-
-include Unicorn
-
-socket = NullWriter.new
-bs = ENV['bs'] ? ENV['bs'].to_i : 4096
-count = ENV['count'] ? ENV['count'].to_i : 1
-slice = (' ' * bs).freeze
-body = (1..count).map { slice }.freeze
-hdr = {
-  'Content-Length' => (bs * count).to_s.freeze,
-  'Content-Type' => 'text/plain'.freeze
-}.freeze
-response = [ 200, hdr, body ].freeze
-
-nr = ENV['nr'] ? ENV['nr'].to_i : 100000
-Benchmark.bmbm do |x|
-  x.report do
-    for i in 1..nr
-      HttpResponse.write(socket.dup, response)
-    end
-  end
-end
diff --git a/test/exec/test_exec.rb b/test/exec/test_exec.rb
index 014b270..24ba856 100644
--- a/test/exec/test_exec.rb
+++ b/test/exec/test_exec.rb
@@ -1,4 +1,7 @@
+# -*- encoding: binary -*-
+
 # Copyright (c) 2009 Eric Wong
+FLOCK_PATH = File.expand_path(__FILE__)
 require 'test/test_helper'
 
 do_test = true
@@ -25,6 +28,13 @@ use Rack::ContentLength
 run proc { |env| [ 200, { 'Content-Type' => 'text/plain' }, [ "HI\\n" ] ] }
   EOS
 
+  SHOW_RACK_ENV = <<-EOS
+use Rack::ContentLength
+run proc { |env|
+  [ 200, { 'Content-Type' => 'text/plain' }, [ ENV['RACK_ENV'] ] ]
+}
+  EOS
+
   HELLO = <<-EOS
 class Hello
   def call(env)
@@ -72,11 +82,148 @@ end
     end
   end
 
+  def test_working_directory_rel_path_config_file
+    other = Tempfile.new('unicorn.wd')
+    File.unlink(other.path)
+    Dir.mkdir(other.path)
+    File.open("config.ru", "wb") do |fp|
+      fp.syswrite <<EOF
+use Rack::ContentLength
+run proc { |env| [ 200, { 'Content-Type' => 'text/plain' }, [ Dir.pwd ] ] }
+EOF
+    end
+    FileUtils.cp("config.ru", other.path + "/config.ru")
+    Dir.chdir(@tmpdir)
+
+    tmp = File.open('unicorn.config', 'wb')
+    tmp.syswrite <<EOF
+working_directory '#@tmpdir'
+listen '#@addr:#@port'
+EOF
+    pid = xfork { redirect_test_io { exec($unicorn_bin, "-c#{tmp.path}") } }
+    wait_workers_ready("test_stderr.#{pid}.log", 1)
+    results = hit(["http://#@addr:#@port/"])
+    assert_equal @tmpdir, results.first
+    File.truncate("test_stderr.#{pid}.log", 0)
+
+    tmp.sysseek(0)
+    tmp.truncate(0)
+    tmp.syswrite <<EOF
+working_directory '#{other.path}'
+listen '#@addr:#@port'
+EOF
+
+    Process.kill(:HUP, pid)
+    lines = []
+    re = /config_file=(.+) would not be accessible in working_directory=(.+)/
+    until lines.grep(re)
+      sleep 0.1
+      lines = File.readlines("test_stderr.#{pid}.log")
+    end
+
+    File.truncate("test_stderr.#{pid}.log", 0)
+    FileUtils.cp('unicorn.config', other.path + "/unicorn.config")
+    Process.kill(:HUP, pid)
+    wait_workers_ready("test_stderr.#{pid}.log", 1)
+    results = hit(["http://#@addr:#@port/"])
+    assert_equal other.path, results.first
+
+    Process.kill(:QUIT, pid)
+    ensure
+      FileUtils.rmtree(other.path)
+  end
+
+  def test_working_directory
+    other = Tempfile.new('unicorn.wd')
+    File.unlink(other.path)
+    Dir.mkdir(other.path)
+    File.open("config.ru", "wb") do |fp|
+      fp.syswrite <<EOF
+use Rack::ContentLength
+run proc { |env| [ 200, { 'Content-Type' => 'text/plain' }, [ Dir.pwd ] ] }
+EOF
+    end
+    FileUtils.cp("config.ru", other.path + "/config.ru")
+    tmp = Tempfile.new('unicorn.config')
+    tmp.syswrite <<EOF
+working_directory '#@tmpdir'
+listen '#@addr:#@port'
+EOF
+    pid = xfork { redirect_test_io { exec($unicorn_bin, "-c#{tmp.path}") } }
+    wait_workers_ready("test_stderr.#{pid}.log", 1)
+    results = hit(["http://#@addr:#@port/"])
+    assert_equal @tmpdir, results.first
+    File.truncate("test_stderr.#{pid}.log", 0)
+
+    tmp.sysseek(0)
+    tmp.truncate(0)
+    tmp.syswrite <<EOF
+working_directory '#{other.path}'
+listen '#@addr:#@port'
+EOF
+
+    Process.kill(:HUP, pid)
+    wait_workers_ready("test_stderr.#{pid}.log", 1)
+    results = hit(["http://#@addr:#@port/"])
+    assert_equal other.path, results.first
+
+    Process.kill(:QUIT, pid)
+    ensure
+      FileUtils.rmtree(other.path)
+  end
+
+  def test_working_directory_controls_relative_paths
+    other = Tempfile.new('unicorn.wd')
+    File.unlink(other.path)
+    Dir.mkdir(other.path)
+    File.open("config.ru", "wb") do |fp|
+      fp.syswrite <<EOF
+use Rack::ContentLength
+run proc { |env| [ 200, { 'Content-Type' => 'text/plain' }, [ Dir.pwd ] ] }
+EOF
+    end
+    FileUtils.cp("config.ru", other.path + "/config.ru")
+    system('mkfifo', "#{other.path}/fifo")
+    tmp = Tempfile.new('unicorn.config')
+    tmp.syswrite <<EOF
+pid "pid_file_here"
+stderr_path "stderr_log_here"
+stdout_path "stdout_log_here"
+working_directory '#{other.path}'
+listen '#@addr:#@port'
+after_fork do |server, worker|
+  File.open("fifo", "wb").close
+end
+EOF
+    pid = xfork { redirect_test_io { exec($unicorn_bin, "-c#{tmp.path}") } }
+    File.open("#{other.path}/fifo", "rb").close
+
+    assert ! File.exist?("stderr_log_here")
+    assert ! File.exist?("stdout_log_here")
+    assert ! File.exist?("pid_file_here")
+
+    assert ! File.exist?("#@tmpdir/stderr_log_here")
+    assert ! File.exist?("#@tmpdir/stdout_log_here")
+    assert ! File.exist?("#@tmpdir/pid_file_here")
+
+    assert File.exist?("#{other.path}/pid_file_here")
+    assert_equal "#{pid}\n", File.read("#{other.path}/pid_file_here")
+    assert File.exist?("#{other.path}/stderr_log_here")
+    assert File.exist?("#{other.path}/stdout_log_here")
+    wait_master_ready("#{other.path}/stderr_log_here")
+
+    Process.kill(:QUIT, pid)
+    ensure
+      FileUtils.rmtree(other.path)
+  end
+
+
   def test_exit_signals
     %w(INT TERM QUIT).each do |sig|
       File.open("config.ru", "wb") { |fp| fp.syswrite(HI) }
       pid = xfork { redirect_test_io { exec($unicorn_bin, "-l#@addr:#@port") } }
       wait_master_ready("test_stderr.#{pid}.log")
+      wait_workers_ready("test_stderr.#{pid}.log", 1)
       status = nil
       assert_nothing_raised do
         Process.kill(sig, pid)
@@ -98,6 +245,46 @@ end
     assert_shutdown(pid)
   end
 
+  def test_rack_env_unset
+    File.open("config.ru", "wb") { |fp| fp.syswrite(SHOW_RACK_ENV) }
+    pid = fork { redirect_test_io { exec($unicorn_bin, "-l#@addr:#@port") } }
+    results = retry_hit(["http://#{@addr}:#{@port}/"])
+    assert_equal "development", results.first
+    assert_shutdown(pid)
+  end
+
+  def test_rack_env_cli_set
+    File.open("config.ru", "wb") { |fp| fp.syswrite(SHOW_RACK_ENV) }
+    pid = fork {
+      redirect_test_io { exec($unicorn_bin, "-l#@addr:#@port", "-Easdf") }
+    }
+    results = retry_hit(["http://#{@addr}:#{@port}/"])
+    assert_equal "asdf", results.first
+    assert_shutdown(pid)
+  end
+
+  def test_rack_env_ENV_set
+    File.open("config.ru", "wb") { |fp| fp.syswrite(SHOW_RACK_ENV) }
+    pid = fork {
+      ENV["RACK_ENV"] = "foobar"
+      redirect_test_io { exec($unicorn_bin, "-l#@addr:#@port") }
+    }
+    results = retry_hit(["http://#{@addr}:#{@port}/"])
+    assert_equal "foobar", results.first
+    assert_shutdown(pid)
+  end
+
+  def test_rack_env_cli_override_ENV
+    File.open("config.ru", "wb") { |fp| fp.syswrite(SHOW_RACK_ENV) }
+    pid = fork {
+      ENV["RACK_ENV"] = "foobar"
+      redirect_test_io { exec($unicorn_bin, "-l#@addr:#@port", "-Easdf") }
+    }
+    results = retry_hit(["http://#{@addr}:#{@port}/"])
+    assert_equal "asdf", results.first
+    assert_shutdown(pid)
+  end
+
   def test_ttin_ttou
     File.open("config.ru", "wb") { |fp| fp.syswrite(HI) }
     pid = fork { redirect_test_io { exec($unicorn_bin, "-l#@addr:#@port") } }
@@ -603,6 +790,26 @@ end
     reexec_usr2_quit_test(new_pid, pid_file)
   end
 
+  def test_daemonize_redirect_fail
+    pid_file = "#{@tmpdir}/test.pid"
+    log = Tempfile.new('unicorn_test_log')
+    ucfg = Tempfile.new('unicorn_test_config')
+    ucfg.syswrite("pid #{pid_file}\"\n")
+    err = Tempfile.new('stderr')
+    out = Tempfile.new('stdout ')
+
+    File.open("config.ru", "wb") { |fp| fp.syswrite(HI) }
+    pid = xfork do
+      $stderr.reopen(err.path, "a")
+      $stdout.reopen(out.path, "a")
+      exec($unicorn_bin, "-D", "-l#{@addr}:#{@port}", "-c#{ucfg.path}")
+    end
+    pid, status = Process.waitpid2(pid)
+    assert ! status.success?, "original process exited successfully"
+    sleep 1 # can't waitpid on a daemonized process :<
+    assert err.stat.size > 0
+  end
+
   def test_reexec_fd_leak
     unless RUBY_PLATFORM =~ /linux/ # Solaris may work, too, but I forget...
       warn "FD leak test only works on Linux at the moment"
@@ -626,6 +833,7 @@ end
     end
 
     wait_master_ready(log.path)
+    wait_workers_ready(log.path, 1)
     File.truncate(log.path, 0)
     wait_for_file(pid_file)
     orig_pid = pid = File.read(pid_file).to_i
@@ -641,6 +849,7 @@ end
     wait_for_death(pid)
 
     wait_master_ready(log.path)
+    wait_workers_ready(log.path, 1)
     File.truncate(log.path, 0)
     wait_for_file(pid_file)
     pid = File.read(pid_file).to_i
@@ -660,6 +869,7 @@ end
     wait_for_death(pid)
 
     wait_master_ready(log.path)
+    wait_workers_ready(log.path, 1)
     File.truncate(log.path, 0)
     wait_for_file(pid_file)
     pid = File.read(pid_file).to_i
@@ -671,4 +881,158 @@ end
     wait_for_death(pid)
   end
 
+  def hup_test_common(preload)
+    File.open("config.ru", "wb") { |fp| fp.syswrite(HI.gsub("HI", '#$$')) }
+    pid_file = Tempfile.new('pid')
+    ucfg = Tempfile.new('unicorn_test_config')
+    ucfg.syswrite("listen '#@addr:#@port'\n")
+    ucfg.syswrite("pid '#{pid_file.path}'\n")
+    ucfg.syswrite("preload_app true\n") if preload
+    ucfg.syswrite("stderr_path 'test_stderr.#$$.log'\n")
+    ucfg.syswrite("stdout_path 'test_stdout.#$$.log'\n")
+    pid = xfork {
+      redirect_test_io { exec($unicorn_bin, "-D", "-c", ucfg.path) }
+    }
+    _, status = Process.waitpid2(pid)
+    assert status.success?
+    wait_master_ready("test_stderr.#$$.log")
+    wait_workers_ready("test_stderr.#$$.log", 1)
+    uri = URI.parse("http://#@addr:#@port/")
+    pids = Tempfile.new('worker_pids')
+    hitter = fork {
+      bodies = Hash.new(0)
+      at_exit { pids.syswrite(bodies.inspect) }
+      trap(:TERM) { exit(0) }
+      loop {
+        rv = Net::HTTP.get(uri)
+        pid = rv.to_i
+        exit!(1) if pid <= 0
+        bodies[pid] += 1
+      }
+    }
+    sleep 5 # racy
+    daemon_pid = File.read(pid_file.path).to_i
+    assert daemon_pid > 0
+    Process.kill(:HUP, daemon_pid)
+    sleep 5 # racy
+    assert_nothing_raised { Process.kill(:TERM, hitter) }
+    _, hitter_status = Process.waitpid2(hitter)
+    assert hitter_status.success?
+    pids.sysseek(0)
+    pids = eval(pids.read)
+    assert_kind_of(Hash, pids)
+    assert_equal 2, pids.size
+    pids.keys.each { |x|
+      assert_kind_of(Integer, x)
+      assert x > 0
+      assert pids[x] > 0
+    }
+    assert_nothing_raised { Process.kill(:QUIT, daemon_pid) }
+    wait_for_death(daemon_pid)
+  end
+
+  def test_preload_app_hup
+    hup_test_common(true)
+  end
+
+  def test_hup
+    hup_test_common(false)
+  end
+
+  def test_default_listen_hup_holds_listener
+    default_listen_lock do
+      res, pid_path = default_listen_setup
+      daemon_pid = File.read(pid_path).to_i
+      assert_nothing_raised { Process.kill(:HUP, daemon_pid) }
+      wait_workers_ready("test_stderr.#$$.log", 1)
+      res2 = hit(["http://#{Unicorn::Const::DEFAULT_LISTEN}/"])
+      assert_match %r{\d+}, res2.first
+      assert res2.first != res.first
+      assert_nothing_raised { Process.kill(:QUIT, daemon_pid) }
+      wait_for_death(daemon_pid)
+    end
+  end
+
+  def test_default_listen_upgrade_holds_listener
+    default_listen_lock do
+      res, pid_path = default_listen_setup
+      daemon_pid = File.read(pid_path).to_i
+      assert_nothing_raised {
+        Process.kill(:USR2, daemon_pid)
+        wait_for_file("#{pid_path}.oldbin")
+        wait_for_file(pid_path)
+        Process.kill(:QUIT, daemon_pid)
+        wait_for_death(daemon_pid)
+      }
+      daemon_pid = File.read(pid_path).to_i
+      wait_workers_ready("test_stderr.#$$.log", 1)
+      File.truncate("test_stderr.#$$.log", 0)
+
+      res2 = hit(["http://#{Unicorn::Const::DEFAULT_LISTEN}/"])
+      assert_match %r{\d+}, res2.first
+      assert res2.first != res.first
+
+      assert_nothing_raised { Process.kill(:HUP, daemon_pid) }
+      wait_workers_ready("test_stderr.#$$.log", 1)
+      File.truncate("test_stderr.#$$.log", 0)
+      res3 = hit(["http://#{Unicorn::Const::DEFAULT_LISTEN}/"])
+      assert res2.first != res3.first
+
+      assert_nothing_raised { Process.kill(:QUIT, daemon_pid) }
+      wait_for_death(daemon_pid)
+    end
+  end
+
+  def default_listen_setup
+    File.open("config.ru", "wb") { |fp| fp.syswrite(HI.gsub("HI", '#$$')) }
+    pid_path = (tmp = Tempfile.new('pid')).path
+    tmp.close!
+    ucfg = Tempfile.new('unicorn_test_config')
+    ucfg.syswrite("pid '#{pid_path}'\n")
+    ucfg.syswrite("stderr_path 'test_stderr.#$$.log'\n")
+    ucfg.syswrite("stdout_path 'test_stdout.#$$.log'\n")
+    pid = xfork {
+      redirect_test_io { exec($unicorn_bin, "-D", "-c", ucfg.path) }
+    }
+    _, status = Process.waitpid2(pid)
+    assert status.success?
+    wait_master_ready("test_stderr.#$$.log")
+    wait_workers_ready("test_stderr.#$$.log", 1)
+    File.truncate("test_stderr.#$$.log", 0)
+    res = hit(["http://#{Unicorn::Const::DEFAULT_LISTEN}/"])
+    assert_match %r{\d+}, res.first
+    [ res, pid_path ]
+  end
+
+  # we need to flock() something to prevent these tests from running
+  def default_listen_lock(&block)
+    fp = File.open(FLOCK_PATH, "rb")
+    begin
+      fp.flock(File::LOCK_EX)
+      begin
+        TCPServer.new(Unicorn::Const::DEFAULT_HOST,
+                      Unicorn::Const::DEFAULT_PORT).close
+      rescue Errno::EADDRINUSE, Errno::EACCES
+        warn "can't bind to #{Unicorn::Const::DEFAULT_LISTEN}"
+        return false
+      end
+
+      # unused_port should never take this, but we may run an environment
+      # where tests are being run against older unicorns...
+      lock_path = "#{Dir::tmpdir}/unicorn_test." \
+                  "#{Unicorn::Const::DEFAULT_LISTEN}.lock"
+      begin
+        lock = File.open(lock_path, File::WRONLY|File::CREAT|File::EXCL, 0600)
+        yield
+      rescue Errno::EEXIST
+        lock_path = nil
+        return false
+      ensure
+        File.unlink(lock_path) if lock_path
+      end
+    ensure
+      fp.flock(File::LOCK_UN)
+    end
+  end
+
 end if do_test
diff --git a/test/rails/app-1.2.3/app/controllers/application.rb b/test/rails/app-1.2.3/app/controllers/application.rb
index ae8cac0..e72474f 100644
--- a/test/rails/app-1.2.3/app/controllers/application.rb
+++ b/test/rails/app-1.2.3/app/controllers/application.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 class ApplicationController < ActionController::Base
   # Pick a unique cookie name to distinguish our session data from others'
   session :session_key => "_unicorn_rails_test.#{rand}"
diff --git a/test/rails/app-1.2.3/app/controllers/foo_controller.rb b/test/rails/app-1.2.3/app/controllers/foo_controller.rb
index 8d877d1..52b7947 100644
--- a/test/rails/app-1.2.3/app/controllers/foo_controller.rb
+++ b/test/rails/app-1.2.3/app/controllers/foo_controller.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'digest/sha1'
 class FooController < ApplicationController
   def index
diff --git a/test/rails/app-1.2.3/app/helpers/application_helper.rb b/test/rails/app-1.2.3/app/helpers/application_helper.rb
index de6be79..d9889b3 100644
--- a/test/rails/app-1.2.3/app/helpers/application_helper.rb
+++ b/test/rails/app-1.2.3/app/helpers/application_helper.rb
@@ -1,2 +1,4 @@
+# -*- encoding: binary -*-
+
 module ApplicationHelper
 end
diff --git a/test/rails/app-1.2.3/config/boot.rb b/test/rails/app-1.2.3/config/boot.rb
index 71c7d7c..84a5c18 100644
--- a/test/rails/app-1.2.3/config/boot.rb
+++ b/test/rails/app-1.2.3/config/boot.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 unless defined?(RAILS_ROOT)
   root_path = File.join(File.dirname(__FILE__), '..')
   RAILS_ROOT = root_path
diff --git a/test/rails/app-1.2.3/config/environment.rb b/test/rails/app-1.2.3/config/environment.rb
index 2ef6b4a..e230a66 100644
--- a/test/rails/app-1.2.3/config/environment.rb
+++ b/test/rails/app-1.2.3/config/environment.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 unless defined? RAILS_GEM_VERSION
   RAILS_GEM_VERSION = ENV['UNICORN_RAILS_VERSION'] # || '1.2.3'
 end
diff --git a/test/rails/app-1.2.3/config/environments/development.rb b/test/rails/app-1.2.3/config/environments/development.rb
index 032fb46..9d78f5e 100644
--- a/test/rails/app-1.2.3/config/environments/development.rb
+++ b/test/rails/app-1.2.3/config/environments/development.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = false
 config.whiny_nils = true
 config.breakpoint_server = true
diff --git a/test/rails/app-1.2.3/config/environments/production.rb b/test/rails/app-1.2.3/config/environments/production.rb
index c4059e3..1e049b2 100644
--- a/test/rails/app-1.2.3/config/environments/production.rb
+++ b/test/rails/app-1.2.3/config/environments/production.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = true
 config.action_controller.consider_all_requests_local = false
 config.action_controller.perform_caching             = true
diff --git a/test/rails/app-1.2.3/config/routes.rb b/test/rails/app-1.2.3/config/routes.rb
index 774028f..70816dc 100644
--- a/test/rails/app-1.2.3/config/routes.rb
+++ b/test/rails/app-1.2.3/config/routes.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 ActionController::Routing::Routes.draw do |map|
   map.connect ':controller/:action/:id.:format'
   map.connect ':controller/:action/:id'
diff --git a/test/rails/app-2.0.2/app/controllers/application.rb b/test/rails/app-2.0.2/app/controllers/application.rb
index 09705d1..e7bb740 100644
--- a/test/rails/app-2.0.2/app/controllers/application.rb
+++ b/test/rails/app-2.0.2/app/controllers/application.rb
@@ -1,2 +1,4 @@
+# -*- encoding: binary -*-
+
 class ApplicationController < ActionController::Base
 end
diff --git a/test/rails/app-2.0.2/app/controllers/foo_controller.rb b/test/rails/app-2.0.2/app/controllers/foo_controller.rb
index 8d877d1..52b7947 100644
--- a/test/rails/app-2.0.2/app/controllers/foo_controller.rb
+++ b/test/rails/app-2.0.2/app/controllers/foo_controller.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'digest/sha1'
 class FooController < ApplicationController
   def index
diff --git a/test/rails/app-2.0.2/app/helpers/application_helper.rb b/test/rails/app-2.0.2/app/helpers/application_helper.rb
index de6be79..d9889b3 100644
--- a/test/rails/app-2.0.2/app/helpers/application_helper.rb
+++ b/test/rails/app-2.0.2/app/helpers/application_helper.rb
@@ -1,2 +1,4 @@
+# -*- encoding: binary -*-
+
 module ApplicationHelper
 end
diff --git a/test/rails/app-2.0.2/config/boot.rb b/test/rails/app-2.0.2/config/boot.rb
index 71c7d7c..84a5c18 100644
--- a/test/rails/app-2.0.2/config/boot.rb
+++ b/test/rails/app-2.0.2/config/boot.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 unless defined?(RAILS_ROOT)
   root_path = File.join(File.dirname(__FILE__), '..')
   RAILS_ROOT = root_path
diff --git a/test/rails/app-2.0.2/config/environment.rb b/test/rails/app-2.0.2/config/environment.rb
index 7c720f6..9961f08 100644
--- a/test/rails/app-2.0.2/config/environment.rb
+++ b/test/rails/app-2.0.2/config/environment.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 unless defined? RAILS_GEM_VERSION
   RAILS_GEM_VERSION = ENV['UNICORN_RAILS_VERSION']
 end
diff --git a/test/rails/app-2.0.2/config/environments/development.rb b/test/rails/app-2.0.2/config/environments/development.rb
index 6a613c1..5e0f1ca 100644
--- a/test/rails/app-2.0.2/config/environments/development.rb
+++ b/test/rails/app-2.0.2/config/environments/development.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = false
 config.whiny_nils = true
 config.action_controller.consider_all_requests_local = true
diff --git a/test/rails/app-2.0.2/config/environments/production.rb b/test/rails/app-2.0.2/config/environments/production.rb
index c4059e3..1e049b2 100644
--- a/test/rails/app-2.0.2/config/environments/production.rb
+++ b/test/rails/app-2.0.2/config/environments/production.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = true
 config.action_controller.consider_all_requests_local = false
 config.action_controller.perform_caching             = true
diff --git a/test/rails/app-2.0.2/config/routes.rb b/test/rails/app-2.0.2/config/routes.rb
index 774028f..70816dc 100644
--- a/test/rails/app-2.0.2/config/routes.rb
+++ b/test/rails/app-2.0.2/config/routes.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 ActionController::Routing::Routes.draw do |map|
   map.connect ':controller/:action/:id.:format'
   map.connect ':controller/:action/:id'
diff --git a/test/rails/app-2.1.2/app/controllers/application.rb b/test/rails/app-2.1.2/app/controllers/application.rb
index 09705d1..e7bb740 100644
--- a/test/rails/app-2.1.2/app/controllers/application.rb
+++ b/test/rails/app-2.1.2/app/controllers/application.rb
@@ -1,2 +1,4 @@
+# -*- encoding: binary -*-
+
 class ApplicationController < ActionController::Base
 end
diff --git a/test/rails/app-2.1.2/app/controllers/foo_controller.rb b/test/rails/app-2.1.2/app/controllers/foo_controller.rb
index 8d877d1..52b7947 100644
--- a/test/rails/app-2.1.2/app/controllers/foo_controller.rb
+++ b/test/rails/app-2.1.2/app/controllers/foo_controller.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'digest/sha1'
 class FooController < ApplicationController
   def index
diff --git a/test/rails/app-2.1.2/app/helpers/application_helper.rb b/test/rails/app-2.1.2/app/helpers/application_helper.rb
index de6be79..d9889b3 100644
--- a/test/rails/app-2.1.2/app/helpers/application_helper.rb
+++ b/test/rails/app-2.1.2/app/helpers/application_helper.rb
@@ -1,2 +1,4 @@
+# -*- encoding: binary -*-
+
 module ApplicationHelper
 end
diff --git a/test/rails/app-2.1.2/config/boot.rb b/test/rails/app-2.1.2/config/boot.rb
index 0a51688..e357f0a 100644
--- a/test/rails/app-2.1.2/config/boot.rb
+++ b/test/rails/app-2.1.2/config/boot.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # Don't change this file!
 # Configure your app in config/environment.rb and config/environments/*.rb
 
diff --git a/test/rails/app-2.1.2/config/environment.rb b/test/rails/app-2.1.2/config/environment.rb
index 7c720f6..9961f08 100644
--- a/test/rails/app-2.1.2/config/environment.rb
+++ b/test/rails/app-2.1.2/config/environment.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 unless defined? RAILS_GEM_VERSION
   RAILS_GEM_VERSION = ENV['UNICORN_RAILS_VERSION']
 end
diff --git a/test/rails/app-2.1.2/config/environments/development.rb b/test/rails/app-2.1.2/config/environments/development.rb
index 7f49032..37f523f 100644
--- a/test/rails/app-2.1.2/config/environments/development.rb
+++ b/test/rails/app-2.1.2/config/environments/development.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = false
 config.whiny_nils = true
 config.action_controller.consider_all_requests_local = true
diff --git a/test/rails/app-2.1.2/config/environments/production.rb b/test/rails/app-2.1.2/config/environments/production.rb
index c4059e3..1e049b2 100644
--- a/test/rails/app-2.1.2/config/environments/production.rb
+++ b/test/rails/app-2.1.2/config/environments/production.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = true
 config.action_controller.consider_all_requests_local = false
 config.action_controller.perform_caching             = true
diff --git a/test/rails/app-2.1.2/config/routes.rb b/test/rails/app-2.1.2/config/routes.rb
index 774028f..70816dc 100644
--- a/test/rails/app-2.1.2/config/routes.rb
+++ b/test/rails/app-2.1.2/config/routes.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 ActionController::Routing::Routes.draw do |map|
   map.connect ':controller/:action/:id.:format'
   map.connect ':controller/:action/:id'
diff --git a/test/rails/app-2.2.2/app/controllers/application.rb b/test/rails/app-2.2.2/app/controllers/application.rb
index 09705d1..e7bb740 100644
--- a/test/rails/app-2.2.2/app/controllers/application.rb
+++ b/test/rails/app-2.2.2/app/controllers/application.rb
@@ -1,2 +1,4 @@
+# -*- encoding: binary -*-
+
 class ApplicationController < ActionController::Base
 end
diff --git a/test/rails/app-2.2.2/app/controllers/foo_controller.rb b/test/rails/app-2.2.2/app/controllers/foo_controller.rb
index 8d877d1..52b7947 100644
--- a/test/rails/app-2.2.2/app/controllers/foo_controller.rb
+++ b/test/rails/app-2.2.2/app/controllers/foo_controller.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'digest/sha1'
 class FooController < ApplicationController
   def index
diff --git a/test/rails/app-2.2.2/app/helpers/application_helper.rb b/test/rails/app-2.2.2/app/helpers/application_helper.rb
index de6be79..d9889b3 100644
--- a/test/rails/app-2.2.2/app/helpers/application_helper.rb
+++ b/test/rails/app-2.2.2/app/helpers/application_helper.rb
@@ -1,2 +1,4 @@
+# -*- encoding: binary -*-
+
 module ApplicationHelper
 end
diff --git a/test/rails/app-2.2.2/config/boot.rb b/test/rails/app-2.2.2/config/boot.rb
index 0a51688..e357f0a 100644
--- a/test/rails/app-2.2.2/config/boot.rb
+++ b/test/rails/app-2.2.2/config/boot.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # Don't change this file!
 # Configure your app in config/environment.rb and config/environments/*.rb
 
diff --git a/test/rails/app-2.2.2/config/environment.rb b/test/rails/app-2.2.2/config/environment.rb
index 7c720f6..9961f08 100644
--- a/test/rails/app-2.2.2/config/environment.rb
+++ b/test/rails/app-2.2.2/config/environment.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 unless defined? RAILS_GEM_VERSION
   RAILS_GEM_VERSION = ENV['UNICORN_RAILS_VERSION']
 end
diff --git a/test/rails/app-2.2.2/config/environments/development.rb b/test/rails/app-2.2.2/config/environments/development.rb
index 7f49032..37f523f 100644
--- a/test/rails/app-2.2.2/config/environments/development.rb
+++ b/test/rails/app-2.2.2/config/environments/development.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = false
 config.whiny_nils = true
 config.action_controller.consider_all_requests_local = true
diff --git a/test/rails/app-2.2.2/config/environments/production.rb b/test/rails/app-2.2.2/config/environments/production.rb
index c4059e3..1e049b2 100644
--- a/test/rails/app-2.2.2/config/environments/production.rb
+++ b/test/rails/app-2.2.2/config/environments/production.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = true
 config.action_controller.consider_all_requests_local = false
 config.action_controller.perform_caching             = true
diff --git a/test/rails/app-2.2.2/config/routes.rb b/test/rails/app-2.2.2/config/routes.rb
index 774028f..70816dc 100644
--- a/test/rails/app-2.2.2/config/routes.rb
+++ b/test/rails/app-2.2.2/config/routes.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 ActionController::Routing::Routes.draw do |map|
   map.connect ':controller/:action/:id.:format'
   map.connect ':controller/:action/:id'
diff --git a/test/rails/app-2.3.2.1/.gitignore b/test/rails/app-2.3.5/.gitignore
index f451f91..f451f91 100644
--- a/test/rails/app-2.3.2.1/.gitignore
+++ b/test/rails/app-2.3.5/.gitignore
diff --git a/test/rails/app-2.3.2.1/Rakefile b/test/rails/app-2.3.5/Rakefile
index fbebfca..fbebfca 100644
--- a/test/rails/app-2.3.2.1/Rakefile
+++ b/test/rails/app-2.3.5/Rakefile
diff --git a/test/rails/app-2.3.2.1/app/controllers/application_controller.rb b/test/rails/app-2.3.5/app/controllers/application_controller.rb
index 6160f52..07c333e 100644
--- a/test/rails/app-2.3.2.1/app/controllers/application_controller.rb
+++ b/test/rails/app-2.3.5/app/controllers/application_controller.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 class ApplicationController < ActionController::Base
   helper :all
 end
diff --git a/test/rails/app-2.3.2.1/app/controllers/foo_controller.rb b/test/rails/app-2.3.5/app/controllers/foo_controller.rb
index 261669c..54ca1ed 100644
--- a/test/rails/app-2.3.2.1/app/controllers/foo_controller.rb
+++ b/test/rails/app-2.3.5/app/controllers/foo_controller.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'digest/sha1'
 class FooController < ApplicationController
   def index
diff --git a/test/rails/app-2.3.2.1/app/helpers/application_helper.rb b/test/rails/app-2.3.5/app/helpers/application_helper.rb
index de6be79..d9889b3 100644
--- a/test/rails/app-2.3.2.1/app/helpers/application_helper.rb
+++ b/test/rails/app-2.3.5/app/helpers/application_helper.rb
@@ -1,2 +1,4 @@
+# -*- encoding: binary -*-
+
 module ApplicationHelper
 end
diff --git a/test/rails/app-2.3.2.1/config/boot.rb b/test/rails/app-2.3.5/config/boot.rb
index d22e6b0..b6c80d5 100644
--- a/test/rails/app-2.3.2.1/config/boot.rb
+++ b/test/rails/app-2.3.5/config/boot.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 RAILS_ROOT = "#{File.dirname(__FILE__)}/.." unless defined?(RAILS_ROOT)
 
 module Rails
diff --git a/test/rails/app-2.3.2.1/config/database.yml b/test/rails/app-2.3.5/config/database.yml
index 9f77843..9f77843 100644
--- a/test/rails/app-2.3.2.1/config/database.yml
+++ b/test/rails/app-2.3.5/config/database.yml
diff --git a/test/rails/app-2.3.2.1/config/environment.rb b/test/rails/app-2.3.5/config/environment.rb
index 17abdb7..6eb092c 100644
--- a/test/rails/app-2.3.2.1/config/environment.rb
+++ b/test/rails/app-2.3.5/config/environment.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 unless defined? RAILS_GEM_VERSION
   RAILS_GEM_VERSION = ENV['UNICORN_RAILS_VERSION']
 end
diff --git a/test/rails/app-2.3.2.1/config/environments/development.rb b/test/rails/app-2.3.5/config/environments/development.rb
index 55376c5..3d381d2 100644
--- a/test/rails/app-2.3.2.1/config/environments/development.rb
+++ b/test/rails/app-2.3.5/config/environments/development.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = false
 config.whiny_nils = true
 config.action_controller.consider_all_requests_local = true
diff --git a/test/rails/app-2.3.2.1/config/environments/production.rb b/test/rails/app-2.3.5/config/environments/production.rb
index 474257d..08710a4 100644
--- a/test/rails/app-2.3.2.1/config/environments/production.rb
+++ b/test/rails/app-2.3.5/config/environments/production.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 config.cache_classes = true
 config.action_controller.consider_all_requests_local = false
 config.action_controller.perform_caching             = true
diff --git a/test/rails/app-2.3.2.1/config/routes.rb b/test/rails/app-2.3.5/config/routes.rb
index 4248853..ac7877c 100644
--- a/test/rails/app-2.3.2.1/config/routes.rb
+++ b/test/rails/app-2.3.5/config/routes.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 ActionController::Routing::Routes.draw do |map|
   map.connect ':controller/:action/:id'
   map.connect ':controller/:action/:id.:format'
diff --git a/test/rails/app-2.3.2.1/db/.gitignore b/test/rails/app-2.3.5/db/.gitignore
index e69de29..e69de29 100644
--- a/test/rails/app-2.3.2.1/db/.gitignore
+++ b/test/rails/app-2.3.5/db/.gitignore
diff --git a/test/rails/app-2.3.2.1/log/.gitignore b/test/rails/app-2.3.5/log/.gitignore
index 397b4a7..397b4a7 100644
--- a/test/rails/app-2.3.2.1/log/.gitignore
+++ b/test/rails/app-2.3.5/log/.gitignore
diff --git a/test/rails/app-2.3.2.1/public/404.html b/test/rails/app-2.3.5/public/404.html
index 44d986c..44d986c 100644
--- a/test/rails/app-2.3.2.1/public/404.html
+++ b/test/rails/app-2.3.5/public/404.html
diff --git a/test/rails/app-2.3.2.1/public/500.html b/test/rails/app-2.3.5/public/500.html
index e534a49..e534a49 100644
--- a/test/rails/app-2.3.2.1/public/500.html
+++ b/test/rails/app-2.3.5/public/500.html
diff --git a/test/rails/app-2.3.5/public/x.txt b/test/rails/app-2.3.5/public/x.txt
new file mode 100644
index 0000000..e427984
--- /dev/null
+++ b/test/rails/app-2.3.5/public/x.txt
@@ -0,0 +1 @@
+HELLO
diff --git a/test/rails/test_rails.rb b/test/rails/test_rails.rb
index c7add20..9502dcb 100644
--- a/test/rails/test_rails.rb
+++ b/test/rails/test_rails.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # Copyright (c) 2009 Eric Wong
 require 'test/test_helper'
 
@@ -142,18 +144,24 @@ logger Logger.new('#{COMMON_TMP.path}')
         end
       end
     end
-    resp = `curl -isSfN -Ffile=@#{tmp.path} http://#@addr:#@port/foo/xpost`
-    assert $?.success?
-    resp = resp.split(/\r?\n/)
-    grepped = resp.grep(/^sha1: (.{40})/)
-    assert_equal 1, grepped.size
-    assert_equal(sha1.hexdigest, /^sha1: (.{40})/.match(grepped.first)[1])
-
-    grepped = resp.grep(/^Content-Type:\s+(.+)/i)
-    assert_equal 1, grepped.size
-    assert_match %r{^text/plain}, grepped.first.split(/\s*:\s*/)[1]
 
-    assert_equal 1, resp.grep(/^Status:/i).size
+    # fixed in Rack commit 44ed4640f077504a49b7f1cabf8d6ad7a13f6441,
+    # no released version of Rails or Rack has this fix
+    if RB_V[0] >= 1 && RB_V[1] >= 9
+      warn "multipart broken with Rack 1.0.0 and Rails 2.3.2.1 under 1.9"
+    else
+      resp = `curl -isSfN -Ffile=@#{tmp.path} http://#@addr:#@port/foo/xpost`
+      assert $?.success?
+      resp = resp.split(/\r?\n/)
+      grepped = resp.grep(/^sha1: (.{40})/)
+      assert_equal 1, grepped.size
+      assert_equal(sha1.hexdigest, /^sha1: (.{40})/.match(grepped.first)[1])
+
+      grepped = resp.grep(/^Content-Type:\s+(.+)/i)
+      assert_equal 1, grepped.size
+      assert_match %r{^text/plain}, grepped.first.split(/\s*:\s*/)[1]
+      assert_equal 1, resp.grep(/^Status:/i).size
+    end
 
     # make sure we can get 403 responses, too
     uri = URI.parse("http://#@addr:#@port/foo/xpost")
@@ -223,6 +231,31 @@ logger Logger.new('#{COMMON_TMP.path}')
     assert_equal '404 Not Found', res['Status']
   end
 
+  def test_alt_url_root_config_env
+    # cbf to actually work on this since I never use this feature (ewong)
+    return unless ROR_V[0] >= 2 && ROR_V[1] >= 3
+    tmp = Tempfile.new(nil)
+    tmp.syswrite("ENV['RAILS_RELATIVE_URL_ROOT'] = '/poo'\n")
+    redirect_test_io do
+      @pid = fork { exec 'unicorn_rails', "-l#@addr:#@port", "-c", tmp.path }
+    end
+    wait_master_ready("test_stderr.#$$.log")
+    res = Net::HTTP.get_response(URI.parse("http://#@addr:#@port/poo/foo"))
+    assert_equal "200", res.code
+    assert_equal '200 OK', res['Status']
+    assert_equal "FOO\n", res.body
+    assert_match %r{^text/html\b}, res['Content-Type']
+    assert_equal "4", res['Content-Length']
+
+    res = Net::HTTP.get_response(URI.parse("http://#@addr:#@port/foo"))
+    assert_equal "404", res.code
+    assert_equal '404 Not Found', res['Status']
+
+    res = Net::HTTP.get_response(URI.parse("http://#@addr:#@port/poo/x.txt"))
+    assert_equal "200", res.code
+    assert_equal "HELLO\n", res.body
+  end
+
   def teardown
     return if @start_pid != $$
 
diff --git a/test/test_helper.rb b/test/test_helper.rb
index 787adbf..3bdbeb1 100644
--- a/test/test_helper.rb
+++ b/test/test_helper.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # Copyright (c) 2005 Zed A. Shaw
 # You can redistribute it and/or modify it under the same terms as Ruby.
 #
@@ -27,7 +29,7 @@ require 'tempfile'
 require 'fileutils'
 require 'logger'
 require 'unicorn'
-require 'unicorn/http11'
+require 'unicorn_http'
 
 if ENV['DEBUG']
   require 'ruby-debug'
@@ -102,6 +104,10 @@ def unused_port(addr = '127.0.0.1')
   begin
     begin
       port = base + rand(32768 - base)
+      while port == Unicorn::Const::DEFAULT_PORT
+        port = base + rand(32768 - base)
+      end
+
       sock = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0)
       sock.bind(Socket.pack_sockaddr_in(port, addr))
       sock.listen(5)
@@ -139,7 +145,7 @@ def retry_hit(uris = [])
   tries = DEFAULT_TRIES
   begin
     hit(uris)
-  rescue Errno::ECONNREFUSED => err
+  rescue Errno::EINVAL, Errno::ECONNREFUSED => err
     if (tries -= 1) > 0
       sleep DEFAULT_RES
       retry
@@ -262,3 +268,29 @@ def wait_for_death(pid)
   end
   raise "PID:#{pid} never died!"
 end
+
+# executes +cmd+ and chunks its STDOUT
+def chunked_spawn(stdout, *cmd)
+  fork {
+    crd, cwr = IO.pipe
+    crd.binmode
+    cwr.binmode
+    crd.sync = cwr.sync = true
+
+    pid = fork {
+      STDOUT.reopen(cwr)
+      crd.close
+      cwr.close
+      exec(*cmd)
+    }
+    cwr.close
+    begin
+      buf = crd.readpartial(16384)
+      stdout.write("#{'%x' % buf.size}\r\n#{buf}")
+    rescue EOFError
+      stdout.write("0\r\n")
+      pid, status = Process.waitpid(pid)
+      exit status.exitstatus
+    end while true
+  }
+end
diff --git a/test/unit/test_configurator.rb b/test/unit/test_configurator.rb
index 98f2db6..ac1efa8 100644
--- a/test/unit/test_configurator.rb
+++ b/test/unit/test_configurator.rb
@@ -1,7 +1,11 @@
+# -*- encoding: binary -*-
+
 require 'test/unit'
 require 'tempfile'
-require 'unicorn/configurator'
+require 'unicorn'
 
+TestStruct = Struct.new(
+  *(Unicorn::Configurator::DEFAULTS.keys + %w(listener_opts listeners)))
 class TestConfigurator < Test::Unit::TestCase
 
   def test_config_init
@@ -28,8 +32,10 @@ class TestConfigurator < Test::Unit::TestCase
     assert_equal "0.0.0.0:2007", meth.call('*:2007')
     assert_equal "0.0.0.0:2007", meth.call('2007')
     assert_equal "0.0.0.0:2007", meth.call(2007)
-    assert_match %r{\A\d+\.\d+\.\d+\.\d+:2007\z}, meth.call('1:2007')
-    assert_match %r{\A\d+\.\d+\.\d+\.\d+:2007\z}, meth.call('2:2007')
+
+    # the next two aren't portable, consider them unsupported for now
+    # assert_match %r{\A\d+\.\d+\.\d+\.\d+:2007\z}, meth.call('1:2007')
+    # assert_match %r{\A\d+\.\d+\.\d+\.\d+:2007\z}, meth.call('2:2007')
   end
 
   def test_config_invalid
@@ -51,22 +57,23 @@ class TestConfigurator < Test::Unit::TestCase
 
   def test_config_defaults
     cfg = Unicorn::Configurator.new(:use_defaults => true)
-    assert_nothing_raised { cfg.commit!(self) }
+    test_struct = TestStruct.new
+    assert_nothing_raised { cfg.commit!(test_struct) }
     Unicorn::Configurator::DEFAULTS.each do |key,value|
-      assert_equal value, instance_variable_get("@#{key.to_s}")
+      assert_equal value, test_struct.__send__(key)
     end
   end
 
   def test_config_defaults_skip
     cfg = Unicorn::Configurator.new(:use_defaults => true)
     skip = [ :logger ]
-    assert_nothing_raised { cfg.commit!(self, :skip => skip) }
-    @logger = nil
+    test_struct = TestStruct.new
+    assert_nothing_raised { cfg.commit!(test_struct, :skip => skip) }
     Unicorn::Configurator::DEFAULTS.each do |key,value|
       next if skip.include?(key)
-      assert_equal value, instance_variable_get("@#{key.to_s}")
+      assert_equal value, test_struct.__send__(key)
     end
-    assert_nil @logger
+    assert_nil test_struct.logger
   end
 
   def test_listen_options
@@ -78,8 +85,9 @@ class TestConfigurator < Test::Unit::TestCase
     assert_nothing_raised do
       cfg = Unicorn::Configurator.new(:config_file => tmp.path)
     end
-    assert_nothing_raised { cfg.commit!(self) }
-    assert(listener_opts = instance_variable_get("@listener_opts"))
+    test_struct = TestStruct.new
+    assert_nothing_raised { cfg.commit!(test_struct) }
+    assert(listener_opts = test_struct.listener_opts)
     assert_equal expect, listener_opts[listener]
   end
 
@@ -93,10 +101,41 @@ class TestConfigurator < Test::Unit::TestCase
     end
   end
 
+  def test_listen_option_bad_delay
+    tmp = Tempfile.new('unicorn_config')
+    expect = { :delay => "five" }
+    listener = "127.0.0.1:12345"
+    tmp.syswrite("listen '#{listener}', #{expect.inspect}\n")
+    assert_raises(ArgumentError) do
+      Unicorn::Configurator.new(:config_file => tmp.path)
+    end
+  end
+
+  def test_listen_option_float_delay
+    tmp = Tempfile.new('unicorn_config')
+    expect = { :delay => 0.5 }
+    listener = "127.0.0.1:12345"
+    tmp.syswrite("listen '#{listener}', #{expect.inspect}\n")
+    assert_nothing_raised do
+      Unicorn::Configurator.new(:config_file => tmp.path)
+    end
+  end
+
+  def test_listen_option_int_delay
+    tmp = Tempfile.new('unicorn_config')
+    expect = { :delay => 5 }
+    listener = "127.0.0.1:12345"
+    tmp.syswrite("listen '#{listener}', #{expect.inspect}\n")
+    assert_nothing_raised do
+      Unicorn::Configurator.new(:config_file => tmp.path)
+    end
+  end
+
   def test_after_fork_proc
+    test_struct = TestStruct.new
     [ proc { |a,b| }, Proc.new { |a,b| }, lambda { |a,b| } ].each do |my_proc|
-      Unicorn::Configurator.new(:after_fork => my_proc).commit!(self)
-      assert_equal my_proc, @after_fork
+      Unicorn::Configurator.new(:after_fork => my_proc).commit!(test_struct)
+      assert_equal my_proc, test_struct.after_fork
     end
   end
 
diff --git a/test/unit/test_http_parser.rb b/test/unit/test_http_parser.rb
index a158ebb..0443b46 100644
--- a/test/unit/test_http_parser.rb
+++ b/test/unit/test_http_parser.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # Copyright (c) 2005 Zed A. Shaw
 # You can redistribute it and/or modify it under the same terms as Ruby.
 #
@@ -9,12 +11,13 @@ require 'test/test_helper'
 include Unicorn
 
 class HttpParserTest < Test::Unit::TestCase
-    
+
   def test_parse_simple
     parser = HttpParser.new
     req = {}
     http = "GET / HTTP/1.1\r\n\r\n"
-    assert parser.execute(req, http)
+    assert_equal req, parser.headers(req, http)
+    assert_equal '', http
 
     assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL']
     assert_equal '/', req['REQUEST_PATH']
@@ -24,15 +27,18 @@ class HttpParserTest < Test::Unit::TestCase
     assert_nil req['FRAGMENT']
     assert_equal '', req['QUERY_STRING']
 
+    assert parser.keepalive?
     parser.reset
     req.clear
 
-    assert ! parser.execute(req, "G")
+    http = "G"
+    assert_nil parser.headers(req, http)
+    assert_equal "G", http
     assert req.empty?
 
     # try parsing again to ensure we were reset correctly
     http = "GET /hello-world HTTP/1.1\r\n\r\n"
-    assert parser.execute(req, http)
+    assert parser.headers(req, http)
 
     assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL']
     assert_equal '/hello-world', req['REQUEST_PATH']
@@ -41,55 +47,184 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'GET', req['REQUEST_METHOD']
     assert_nil req['FRAGMENT']
     assert_equal '', req['QUERY_STRING']
+    assert_equal '', http
+    assert parser.keepalive?
+  end
+
+  def test_connection_close_no_ka
+    parser = HttpParser.new
+    req = {}
+    tmp = "GET / HTTP/1.1\r\nConnection: close\r\n\r\n"
+    assert_equal req.object_id, parser.headers(req, tmp).object_id
+    assert_equal "GET", req['REQUEST_METHOD']
+    assert ! parser.keepalive?
+  end
+
+  def test_connection_keep_alive_ka
+    parser = HttpParser.new
+    req = {}
+    tmp = "HEAD / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n"
+    assert_equal req.object_id, parser.headers(req, tmp).object_id
+    assert parser.keepalive?
+  end
+
+  def test_connection_keep_alive_ka_bad_method
+    parser = HttpParser.new
+    req = {}
+    tmp = "POST / HTTP/1.1\r\nConnection: keep-alive\r\n\r\n"
+    assert_equal req.object_id, parser.headers(req, tmp).object_id
+    assert ! parser.keepalive?
+  end
+
+  def test_connection_keep_alive_ka_bad_version
+    parser = HttpParser.new
+    req = {}
+    tmp = "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n"
+    assert_equal req.object_id, parser.headers(req, tmp).object_id
+    assert parser.keepalive?
   end
 
   def test_parse_server_host_default_port
     parser = HttpParser.new
     req = {}
-    assert parser.execute(req, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n")
+    tmp = "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"
+    assert_equal req, parser.headers(req, tmp)
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
+    assert_equal '', tmp
+    assert parser.keepalive?
   end
 
   def test_parse_server_host_alt_port
     parser = HttpParser.new
     req = {}
-    assert parser.execute(req, "GET / HTTP/1.1\r\nHost: foo:999\r\n\r\n")
+    tmp = "GET / HTTP/1.1\r\nHost: foo:999\r\n\r\n"
+    assert_equal req, parser.headers(req, tmp)
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '999', req['SERVER_PORT']
+    assert_equal '', tmp
+    assert parser.keepalive?
   end
 
   def test_parse_server_host_empty_port
     parser = HttpParser.new
     req = {}
-    assert parser.execute(req, "GET / HTTP/1.1\r\nHost: foo:\r\n\r\n")
+    tmp = "GET / HTTP/1.1\r\nHost: foo:\r\n\r\n"
+    assert_equal req, parser.headers(req, tmp)
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
+    assert_equal '', tmp
+    assert parser.keepalive?
   end
 
   def test_parse_server_host_xfp_https
     parser = HttpParser.new
     req = {}
-    assert parser.execute(req, "GET / HTTP/1.1\r\nHost: foo:\r\n" \
-                          "X-Forwarded-Proto: https\r\n\r\n")
+    tmp = "GET / HTTP/1.1\r\nHost: foo:\r\n" \
+          "X-Forwarded-Proto: https\r\n\r\n"
+    assert_equal req, parser.headers(req, tmp)
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '443', req['SERVER_PORT']
+    assert_equal '', tmp
+    assert parser.keepalive?
   end
 
   def test_parse_strange_headers
     parser = HttpParser.new
     req = {}
     should_be_good = "GET / HTTP/1.1\r\naaaaaaaaaaaaa:++++++++++\r\n\r\n"
-    assert parser.execute(req, should_be_good)
+    assert_equal req, parser.headers(req, should_be_good)
+    assert_equal '', should_be_good
+    assert parser.keepalive?
+  end
+
+  # legacy test case from Mongrel that we never supported before...
+  # I still consider Pound irrelevant, unfortunately stupid clients that
+  # send extremely big headers do exist and they've managed to find Unicorn...
+  def test_nasty_pound_header
+    parser = HttpParser.new
+    nasty_pound_header = "GET / HTTP/1.1\r\nX-SSL-Bullshit:   -----BEGIN CERTIFICATE-----\r\n\tMIIFbTCCBFWgAwIBAgICH4cwDQYJKoZIhvcNAQEFBQAwcDELMAkGA1UEBhMCVUsx\r\n\tETAPBgNVBAoTCGVTY2llbmNlMRIwEAYDVQQLEwlBdXRob3JpdHkxCzAJBgNVBAMT\r\n\tAkNBMS0wKwYJKoZIhvcNAQkBFh5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMu\r\n\tdWswHhcNMDYwNzI3MTQxMzI4WhcNMDcwNzI3MTQxMzI4WjBbMQswCQYDVQQGEwJV\r\n\tSzERMA8GA1UEChMIZVNjaWVuY2UxEzARBgNVBAsTCk1hbmNoZXN0ZXIxCzAJBgNV\r\n\tBAcTmrsogriqMWLAk1DMRcwFQYDVQQDEw5taWNoYWVsIHBhcmQYJKoZIhvcNAQEB\r\n\tBQADggEPADCCAQoCggEBANPEQBgl1IaKdSS1TbhF3hEXSl72G9J+WC/1R64fAcEF\r\n\tW51rEyFYiIeZGx/BVzwXbeBoNUK41OK65sxGuflMo5gLflbwJtHBRIEKAfVVp3YR\r\n\tgW7cMA/s/XKgL1GEC7rQw8lIZT8RApukCGqOVHSi/F1SiFlPDxuDfmdiNzL31+sL\r\n\t0iwHDdNkGjy5pyBSB8Y79dsSJtCW/iaLB0/n8Sj7HgvvZJ7x0fr+RQjYOUUfrePP\r\n\tu2MSpFyf+9BbC/aXgaZuiCvSR+8Snv3xApQY+fULK/xY8h8Ua51iXoQ5jrgu2SqR\r\n\twgA7BUi3G8LFzMBl8FRCDYGUDy7M6QaHXx1ZWIPWNKsCAwEAAaOCAiQwggIgMAwG\r\n\tA1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwID6DAs\r\n\tBglghkgBhvhCAQ0EHxYdVUsgZS1TY2llbmNlIFVzZXIgQ2VydGlmaWNhdGUwHQYD\r\n\tVR0OBBYEFDTt/sf9PeMaZDHkUIldrDYMNTBZMIGaBgNVHSMEgZIwgY+AFAI4qxGj\r\n\tloCLDdMVKwiljjDastqooXSkcjBwMQswCQYDVQQGEwJVSzERMA8GA1UEChMIZVNj\r\n\taWVuY2UxEjAQBgNVBAsTCUF1dGhvcml0eTELMAkGA1UEAxMCQ0ExLTArBgkqhkiG\r\n\t9w0BCQEWHmNhLW9wZXJhdG9yQGdyaWQtc3VwcG9ydC5hYy51a4IBADApBgNVHRIE\r\n\tIjAggR5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMudWswGQYDVR0gBBIwEDAO\r\n\tBgwrBgEEAdkvAQEBAQYwPQYJYIZIAYb4QgEEBDAWLmh0dHA6Ly9jYS5ncmlkLXN1\r\n\tcHBvcnQuYWMudmT4sopwqlBWsvcHViL2NybC9jYWNybC5jcmwwPQYJYIZIAYb4QgEDBDAWLmh0\r\n\tdHA6Ly9jYS5ncmlkLXN1cHBvcnQuYWMudWsvcHViL2NybC9jYWNybC5jcmwwPwYD\r\n\tVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NhLmdyaWQt5hYy51ay9wdWIv\r\n\tY3JsL2NhY3JsLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAS/U4iiooBENGW/Hwmmd3\r\n\tXCy6Zrt08YjKCzGNjorT98g8uGsqYjSxv/hmi0qlnlHs+k/3Iobc3LjS5AMYr5L8\r\n\tUO7OSkgFFlLHQyC9JzPfmLCAugvzEbyv4Olnsr8hbxF1MbKZoQxUZtMVu29wjfXk\r\n\thTeApBv7eaKCWpSp7MCbvgzm74izKhu3vlDk9w6qVrxePfGgpKPqfHiOoGhFnbTK\r\n\twTC6o2xq5y0qZ03JonF7OJspEd3I5zKY3E+ov7/ZhW6DqT8UFvsAdjvQbXyhV8Eu\r\n\tYhixw1aKEPzNjNowuIseVogKOLXxWI5vAi5HgXdS0/ES5gDGsABo4fqovUKlgop3\r\n\tRA==\r\n\t-----END CERTIFICATE-----\r\n\r\n"
+    req = {}
+    buf = nasty_pound_header.dup
+
+    assert nasty_pound_header =~ /(-----BEGIN .*--END CERTIFICATE-----)/m
+    expect = $1.dup
+    expect.gsub!(/\r\n\t/, ' ')
+    assert_equal req, parser.headers(req, buf)
+    assert_equal '', buf
+    assert_equal expect, req['HTTP_X_SSL_BULLSHIT']
+  end
+
+  def test_continuation_eats_leading_spaces
+    parser = HttpParser.new
+    header = "GET / HTTP/1.1\r\n" \
+             "X-ASDF:      \r\n" \
+             "\t\r\n" \
+             "    \r\n" \
+             "  ASDF\r\n\r\n"
+    req = {}
+    assert_equal req, parser.headers(req, header)
+    assert_equal '', header
+    assert_equal 'ASDF', req['HTTP_X_ASDF']
+  end
+
+  def test_continuation_eats_scattered_leading_spaces
+    parser = HttpParser.new
+    header = "GET / HTTP/1.1\r\n" \
+             "X-ASDF:   hi\r\n" \
+             "    y\r\n" \
+             "\t\r\n" \
+             "       x\r\n" \
+             "  ASDF\r\n\r\n"
+    req = {}
+    assert_equal req, parser.headers(req, header)
+    assert_equal '', header
+    assert_equal 'hi y x ASDF', req['HTTP_X_ASDF']
+  end
+
+  def test_continuation_with_absolute_uri_and_ignored_host_header
+    parser = HttpParser.new
+    header = "GET http://example.com/ HTTP/1.1\r\n" \
+             "Host: \r\n" \
+             "    YHBT.net\r\n" \
+             "\r\n"
+    req = {}
+    assert_equal req, parser.headers(req, header)
+    assert_equal 'example.com', req['HTTP_HOST']
+  end
 
-    # ref: http://thread.gmane.org/gmane.comp.lang.ruby.mongrel.devel/37/focus=45
-    # (note we got 'pen' mixed up with 'pound' in that thread,
-    # but the gist of it is still relevant: these nasty headers are irrelevant
-    #
-    # nasty_pound_header = "GET / HTTP/1.1\r\nX-SSL-Bullshit:   -----BEGIN CERTIFICATE-----\r\n\tMIIFbTCCBFWgAwIBAgICH4cwDQYJKoZIhvcNAQEFBQAwcDELMAkGA1UEBhMCVUsx\r\n\tETAPBgNVBAoTCGVTY2llbmNlMRIwEAYDVQQLEwlBdXRob3JpdHkxCzAJBgNVBAMT\r\n\tAkNBMS0wKwYJKoZIhvcNAQkBFh5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMu\r\n\tdWswHhcNMDYwNzI3MTQxMzI4WhcNMDcwNzI3MTQxMzI4WjBbMQswCQYDVQQGEwJV\r\n\tSzERMA8GA1UEChMIZVNjaWVuY2UxEzARBgNVBAsTCk1hbmNoZXN0ZXIxCzAJBgNV\r\n\tBAcTmrsogriqMWLAk1DMRcwFQYDVQQDEw5taWNoYWVsIHBhcmQYJKoZIhvcNAQEB\r\n\tBQADggEPADCCAQoCggEBANPEQBgl1IaKdSS1TbhF3hEXSl72G9J+WC/1R64fAcEF\r\n\tW51rEyFYiIeZGx/BVzwXbeBoNUK41OK65sxGuflMo5gLflbwJtHBRIEKAfVVp3YR\r\n\tgW7cMA/s/XKgL1GEC7rQw8lIZT8RApukCGqOVHSi/F1SiFlPDxuDfmdiNzL31+sL\r\n\t0iwHDdNkGjy5pyBSB8Y79dsSJtCW/iaLB0/n8Sj7HgvvZJ7x0fr+RQjYOUUfrePP\r\n\tu2MSpFyf+9BbC/aXgaZuiCvSR+8Snv3xApQY+fULK/xY8h8Ua51iXoQ5jrgu2SqR\r\n\twgA7BUi3G8LFzMBl8FRCDYGUDy7M6QaHXx1ZWIPWNKsCAwEAAaOCAiQwggIgMAwG\r\n\tA1UdEwEB/wQCMAAwEQYJYIZIAYb4QgEBBAQDAgWgMA4GA1UdDwEB/wQEAwID6DAs\r\n\tBglghkgBhvhCAQ0EHxYdVUsgZS1TY2llbmNlIFVzZXIgQ2VydGlmaWNhdGUwHQYD\r\n\tVR0OBBYEFDTt/sf9PeMaZDHkUIldrDYMNTBZMIGaBgNVHSMEgZIwgY+AFAI4qxGj\r\n\tloCLDdMVKwiljjDastqooXSkcjBwMQswCQYDVQQGEwJVSzERMA8GA1UEChMIZVNj\r\n\taWVuY2UxEjAQBgNVBAsTCUF1dGhvcml0eTELMAkGA1UEAxMCQ0ExLTArBgkqhkiG\r\n\t9w0BCQEWHmNhLW9wZXJhdG9yQGdyaWQtc3VwcG9ydC5hYy51a4IBADApBgNVHRIE\r\n\tIjAggR5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMudWswGQYDVR0gBBIwEDAO\r\n\tBgwrBgEEAdkvAQEBAQYwPQYJYIZIAYb4QgEEBDAWLmh0dHA6Ly9jYS5ncmlkLXN1\r\n\tcHBvcnQuYWMudmT4sopwqlBWsvcHViL2NybC9jYWNybC5jcmwwPQYJYIZIAYb4QgEDBDAWLmh0\r\n\tdHA6Ly9jYS5ncmlkLXN1cHBvcnQuYWMudWsvcHViL2NybC9jYWNybC5jcmwwPwYD\r\n\tVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NhLmdyaWQt5hYy51ay9wdWIv\r\n\tY3JsL2NhY3JsLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAS/U4iiooBENGW/Hwmmd3\r\n\tXCy6Zrt08YjKCzGNjorT98g8uGsqYjSxv/hmi0qlnlHs+k/3Iobc3LjS5AMYr5L8\r\n\tUO7OSkgFFlLHQyC9JzPfmLCAugvzEbyv4Olnsr8hbxF1MbKZoQxUZtMVu29wjfXk\r\n\thTeApBv7eaKCWpSp7MCbvgzm74izKhu3vlDk9w6qVrxePfGgpKPqfHiOoGhFnbTK\r\n\twTC6o2xq5y0qZ03JonF7OJspEd3I5zKY3E+ov7/ZhW6DqT8UFvsAdjvQbXyhV8Eu\r\n\tYhixw1aKEPzNjNowuIseVogKOLXxWI5vAi5HgXdS0/ES5gDGsABo4fqovUKlgop3\r\n\tRA==\r\n\t-----END CERTIFICATE-----\r\n\r\n"
-    # parser = HttpParser.new
-    # req = {}
-    # assert parser.execute(req, nasty_pound_header, 0)
+  # this may seem to be testing more of an implementation detail, but
+  # it also helps ensure we're safe in the presence of multiple parsers
+  # in case we ever go multithreaded/evented...
+  def test_resumable_continuations
+    nr = 1000
+    req = {}
+    header = "GET / HTTP/1.1\r\n" \
+             "X-ASDF:      \r\n" \
+             "  hello\r\n"
+    tmp = []
+    nr.times { |i|
+      parser = HttpParser.new
+      assert parser.headers(req, "#{header} #{i}\r\n").nil?
+      asdf = req['HTTP_X_ASDF']
+      assert_equal "hello #{i}", asdf
+      tmp << [ parser, asdf ]
+      req.clear
+    }
+    tmp.each_with_index { |(parser, asdf), i|
+      assert_equal req, parser.headers(req, "#{header} #{i}\r\n .\r\n\r\n")
+      assert_equal "hello #{i} .", asdf
+    }
+  end
+
+  def test_invalid_continuation
+    parser = HttpParser.new
+    header = "GET / HTTP/1.1\r\n" \
+             "    y\r\n" \
+             "Host: hello\r\n" \
+             "\r\n"
+    req = {}
+    assert_raises(HttpParserError) { parser.headers(req, header) }
   end
 
   def test_parse_ie6_urls
@@ -103,7 +238,10 @@ class HttpParserTest < Test::Unit::TestCase
       parser = HttpParser.new
       req = {}
       sorta_safe = %(GET #{path} HTTP/1.1\r\n\r\n)
-      assert parser.execute(req, sorta_safe)
+      assert_equal req, parser.headers(req, sorta_safe)
+      assert_equal path, req['REQUEST_URI']
+      assert_equal '', sorta_safe
+      assert parser.keepalive?
     end
   end
   
@@ -112,28 +250,34 @@ class HttpParserTest < Test::Unit::TestCase
     req = {}
     bad_http = "GET / SsUTF/1.1"
 
-    assert_raises(HttpParserError) { parser.execute(req, bad_http) }
+    assert_raises(HttpParserError) { parser.headers(req, bad_http) }
+
+    # make sure we can recover
     parser.reset
-    assert(parser.execute({}, "GET / HTTP/1.0\r\n\r\n"))
+    req.clear
+    assert_equal req, parser.headers(req, "GET / HTTP/1.0\r\n\r\n")
+    assert ! parser.keepalive?
   end
 
   def test_piecemeal
     parser = HttpParser.new
     req = {}
     http = "GET"
-    assert ! parser.execute(req, http)
-    assert_raises(HttpParserError) { parser.execute(req, http) }
-    assert ! parser.execute(req, http << " / HTTP/1.0")
+    assert_nil parser.headers(req, http)
+    assert_nil parser.headers(req, http)
+    assert_nil parser.headers(req, http << " / HTTP/1.0")
     assert_equal '/', req['REQUEST_PATH']
     assert_equal '/', req['REQUEST_URI']
     assert_equal 'GET', req['REQUEST_METHOD']
-    assert ! parser.execute(req, http << "\r\n")
+    assert_nil parser.headers(req, http << "\r\n")
     assert_equal 'HTTP/1.0', req['HTTP_VERSION']
-    assert ! parser.execute(req, http << "\r")
-    assert parser.execute(req, http << "\n")
-    assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL']
+    assert_nil parser.headers(req, http << "\r")
+    assert_equal req, parser.headers(req, http << "\n")
+    assert_equal 'HTTP/1.0', req['SERVER_PROTOCOL']
     assert_nil req['FRAGMENT']
     assert_equal '', req['QUERY_STRING']
+    assert_equal "", http
+    assert ! parser.keepalive?
   end
 
   # not common, but underscores do appear in practice
@@ -141,7 +285,7 @@ class HttpParserTest < Test::Unit::TestCase
     parser = HttpParser.new
     req = {}
     http = "GET http://under_score.example.com/foo?q=bar HTTP/1.0\r\n\r\n"
-    assert parser.execute(req, http)
+    assert_equal req, parser.headers(req, http)
     assert_equal 'http', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -150,13 +294,54 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'under_score.example.com', req['HTTP_HOST']
     assert_equal 'under_score.example.com', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
+    assert_equal "", http
+    assert ! parser.keepalive?
+  end
+
+  # some dumb clients add users because they're stupid
+  def test_absolute_uri_w_user
+    parser = HttpParser.new
+    req = {}
+    http = "GET http://user%20space@example.com/foo?q=bar HTTP/1.0\r\n\r\n"
+    assert_equal req, parser.headers(req, http)
+    assert_equal 'http', req['rack.url_scheme']
+    assert_equal '/foo?q=bar', req['REQUEST_URI']
+    assert_equal '/foo', req['REQUEST_PATH']
+    assert_equal 'q=bar', req['QUERY_STRING']
+
+    assert_equal 'example.com', req['HTTP_HOST']
+    assert_equal 'example.com', req['SERVER_NAME']
+    assert_equal '80', req['SERVER_PORT']
+    assert_equal "", http
+    assert ! parser.keepalive?
+  end
+
+  # since Mongrel supported anything URI.parse supported, we're stuck
+  # supporting everything URI.parse supports
+  def test_absolute_uri_uri_parse
+    "#{URI::REGEXP::PATTERN::UNRESERVED};:&=+$,".split(//).each do |char|
+      parser = HttpParser.new
+      req = {}
+      http = "GET http://#{char}@example.com/ HTTP/1.0\r\n\r\n"
+      assert_equal req, parser.headers(req, http)
+      assert_equal 'http', req['rack.url_scheme']
+      assert_equal '/', req['REQUEST_URI']
+      assert_equal '/', req['REQUEST_PATH']
+      assert_equal '', req['QUERY_STRING']
+
+      assert_equal 'example.com', req['HTTP_HOST']
+      assert_equal 'example.com', req['SERVER_NAME']
+      assert_equal '80', req['SERVER_PORT']
+      assert_equal "", http
+      assert ! parser.keepalive?
+    end
   end
 
   def test_absolute_uri
     parser = HttpParser.new
     req = {}
     http = "GET http://example.com/foo?q=bar HTTP/1.0\r\n\r\n"
-    assert parser.execute(req, http)
+    assert_equal req, parser.headers(req, http)
     assert_equal 'http', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -165,6 +350,8 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'example.com', req['HTTP_HOST']
     assert_equal 'example.com', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
+    assert_equal "", http
+    assert ! parser.keepalive?
   end
 
   # X-Forwarded-Proto is not in rfc2616, absolute URIs are, however...
@@ -173,7 +360,7 @@ class HttpParserTest < Test::Unit::TestCase
     req = {}
     http = "GET https://example.com/foo?q=bar HTTP/1.1\r\n" \
            "X-Forwarded-Proto: http\r\n\r\n"
-    assert parser.execute(req, http)
+    assert_equal req, parser.headers(req, http)
     assert_equal 'https', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -182,6 +369,8 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'example.com', req['HTTP_HOST']
     assert_equal 'example.com', req['SERVER_NAME']
     assert_equal '443', req['SERVER_PORT']
+    assert_equal "", http
+    assert parser.keepalive?
   end
 
   # Host: header should be ignored for absolute URIs
@@ -190,7 +379,7 @@ class HttpParserTest < Test::Unit::TestCase
     req = {}
     http = "GET http://example.com:8080/foo?q=bar HTTP/1.2\r\n" \
            "Host: bad.example.com\r\n\r\n"
-    assert parser.execute(req, http)
+    assert_equal req, parser.headers(req, http)
     assert_equal 'http', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -199,6 +388,8 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'example.com:8080', req['HTTP_HOST']
     assert_equal 'example.com', req['SERVER_NAME']
     assert_equal '8080', req['SERVER_PORT']
+    assert_equal "", http
+    assert ! parser.keepalive? # TODO: read HTTP/1.2 when it's final
   end
 
   def test_absolute_uri_with_empty_port
@@ -206,7 +397,7 @@ class HttpParserTest < Test::Unit::TestCase
     req = {}
     http = "GET https://example.com:/foo?q=bar HTTP/1.1\r\n" \
            "Host: bad.example.com\r\n\r\n"
-    assert parser.execute(req, http)
+    assert_equal req, parser.headers(req, http)
     assert_equal 'https', req['rack.url_scheme']
     assert_equal '/foo?q=bar', req['REQUEST_URI']
     assert_equal '/foo', req['REQUEST_PATH']
@@ -215,32 +406,55 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'example.com:', req['HTTP_HOST']
     assert_equal 'example.com', req['SERVER_NAME']
     assert_equal '443', req['SERVER_PORT']
+    assert_equal "", http
+    assert parser.keepalive? # TODO: read HTTP/1.2 when it's final
   end
 
   def test_put_body_oneshot
     parser = HttpParser.new
     req = {}
     http = "PUT / HTTP/1.0\r\nContent-Length: 5\r\n\r\nabcde"
-    assert parser.execute(req, http)
+    assert_equal req, parser.headers(req, http)
     assert_equal '/', req['REQUEST_PATH']
     assert_equal '/', req['REQUEST_URI']
     assert_equal 'PUT', req['REQUEST_METHOD']
     assert_equal 'HTTP/1.0', req['HTTP_VERSION']
-    assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL']
-    assert_equal "abcde", req[:http_body]
+    assert_equal 'HTTP/1.0', req['SERVER_PROTOCOL']
+    assert_equal "abcde", http
+    assert ! parser.keepalive? # TODO: read HTTP/1.2 when it's final
   end
 
   def test_put_body_later
     parser = HttpParser.new
     req = {}
     http = "PUT /l HTTP/1.0\r\nContent-Length: 5\r\n\r\n"
-    assert parser.execute(req, http)
+    assert_equal req, parser.headers(req, http)
     assert_equal '/l', req['REQUEST_PATH']
     assert_equal '/l', req['REQUEST_URI']
     assert_equal 'PUT', req['REQUEST_METHOD']
     assert_equal 'HTTP/1.0', req['HTTP_VERSION']
-    assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL']
-    assert_equal "", req[:http_body]
+    assert_equal 'HTTP/1.0', req['SERVER_PROTOCOL']
+    assert_equal "", http
+    assert ! parser.keepalive? # TODO: read HTTP/1.2 when it's final
+  end
+
+  def test_unknown_methods
+    %w(GETT HEADR XGET XHEAD).each { |m|
+      parser = HttpParser.new
+      req = {}
+      s = "#{m} /forums/1/topics/2375?page=1#posts-17408 HTTP/1.1\r\n\r\n"
+      ok = false
+      assert_nothing_raised do
+        ok = parser.headers(req, s)
+      end
+      assert ok
+      assert_equal '/forums/1/topics/2375?page=1', req['REQUEST_URI']
+      assert_equal 'posts-17408', req['FRAGMENT']
+      assert_equal 'page=1', req['QUERY_STRING']
+      assert_equal "", s
+      assert_equal m, req['REQUEST_METHOD']
+      assert ! parser.keepalive? # TODO: read HTTP/1.2 when it's final
+    }
   end
 
   def test_fragment_in_uri
@@ -249,12 +463,14 @@ class HttpParserTest < Test::Unit::TestCase
     get = "GET /forums/1/topics/2375?page=1#posts-17408 HTTP/1.1\r\n\r\n"
     ok = false
     assert_nothing_raised do
-      ok = parser.execute(req, get)
+      ok = parser.headers(req, get)
     end
     assert ok
     assert_equal '/forums/1/topics/2375?page=1', req['REQUEST_URI']
     assert_equal 'posts-17408', req['FRAGMENT']
     assert_equal 'page=1', req['QUERY_STRING']
+    assert_equal '', get
+    assert parser.keepalive?
   end
 
   # lame random garbage maker
@@ -279,7 +495,7 @@ class HttpParserTest < Test::Unit::TestCase
     10.times do |c|
       get = "GET /#{rand_data(10,120)} HTTP/1.1\r\nX-#{rand_data(1024, 1024+(c*1024))}: Test\r\n\r\n"
       assert_raises Unicorn::HttpParserError do
-        parser.execute({}, get)
+        parser.headers({}, get)
         parser.reset
       end
     end
@@ -288,7 +504,7 @@ class HttpParserTest < Test::Unit::TestCase
     10.times do |c|
       get = "GET /#{rand_data(10,120)} HTTP/1.1\r\nX-Test: #{rand_data(1024, 1024+(c*1024), false)}\r\n\r\n"
       assert_raises Unicorn::HttpParserError do
-        parser.execute({}, get)
+        parser.headers({}, get)
         parser.reset
       end
     end
@@ -297,7 +513,7 @@ class HttpParserTest < Test::Unit::TestCase
     get = "GET /#{rand_data(10,120)} HTTP/1.1\r\n"
     get << "X-Test: test\r\n" * (80 * 1024)
     assert_raises Unicorn::HttpParserError do
-      parser.execute({}, get)
+      parser.headers({}, get)
       parser.reset
     end
 
@@ -305,7 +521,7 @@ class HttpParserTest < Test::Unit::TestCase
     10.times do |c|
       get = "GET #{rand_data(1024, 1024+(c*1024), false)} #{rand_data(1024, 1024+(c*1024), false)}\r\n\r\n"
       assert_raises Unicorn::HttpParserError do
-        parser.execute({}, get)
+        parser.headers({}, get)
         parser.reset
       end
     end
diff --git a/test/unit/test_http_parser_ng.rb b/test/unit/test_http_parser_ng.rb
new file mode 100644
index 0000000..bb61e7f
--- /dev/null
+++ b/test/unit/test_http_parser_ng.rb
@@ -0,0 +1,420 @@
+# -*- encoding: binary -*-
+
+# coding: binary
+require 'test/test_helper'
+require 'digest/md5'
+
+include Unicorn
+
+class HttpParserNgTest < Test::Unit::TestCase
+
+  def setup
+    @parser = HttpParser.new
+  end
+
+  def test_identity_byte_headers
+    req = {}
+    str = "PUT / HTTP/1.1\r\n"
+    str << "Content-Length: 123\r\n"
+    str << "\r"
+    hdr = ""
+    str.each_byte { |byte|
+      assert_nil @parser.headers(req, hdr << byte.chr)
+    }
+    hdr << "\n"
+    assert_equal req.object_id, @parser.headers(req, hdr).object_id
+    assert_equal '123', req['CONTENT_LENGTH']
+    assert_equal 0, hdr.size
+    assert ! @parser.keepalive?
+    assert @parser.headers?
+    assert 123, @parser.content_length
+  end
+
+  def test_identity_step_headers
+    req = {}
+    str = "PUT / HTTP/1.1\r\n"
+    assert ! @parser.headers(req, str)
+    str << "Content-Length: 123\r\n"
+    assert ! @parser.headers(req, str)
+    str << "\r\n"
+    assert_equal req.object_id, @parser.headers(req, str).object_id
+    assert_equal '123', req['CONTENT_LENGTH']
+    assert_equal 0, str.size
+    assert ! @parser.keepalive?
+    assert @parser.headers?
+  end
+
+  def test_identity_oneshot_header
+    req = {}
+    str = "PUT / HTTP/1.1\r\nContent-Length: 123\r\n\r\n"
+    assert_equal req.object_id, @parser.headers(req, str).object_id
+    assert_equal '123', req['CONTENT_LENGTH']
+    assert_equal 0, str.size
+    assert ! @parser.keepalive?
+  end
+
+  def test_identity_oneshot_header_with_body
+    body = ('a' * 123).freeze
+    req = {}
+    str = "PUT / HTTP/1.1\r\n" \
+          "Content-Length: #{body.length}\r\n" \
+          "\r\n#{body}"
+    assert_equal req.object_id, @parser.headers(req, str).object_id
+    assert_equal '123', req['CONTENT_LENGTH']
+    assert_equal 123, str.size
+    assert_equal body, str
+    tmp = ''
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal 0, str.size
+    assert_equal tmp, body
+    assert_equal "", @parser.filter_body(tmp, str)
+    assert ! @parser.keepalive?
+  end
+
+  def test_identity_oneshot_header_with_body_partial
+    str = "PUT / HTTP/1.1\r\nContent-Length: 123\r\n\r\na"
+    assert_equal Hash, @parser.headers({}, str).class
+    assert_equal 1, str.size
+    assert_equal 'a', str
+    tmp = ''
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal "", str
+    assert_equal "a", tmp
+    str << ' ' * 122
+    rv = @parser.filter_body(tmp, str)
+    assert_equal 122, tmp.size
+    assert_nil rv
+    assert_equal "", str
+    assert_equal str.object_id, @parser.filter_body(tmp, str).object_id
+    assert ! @parser.keepalive?
+  end
+
+  def test_identity_oneshot_header_with_body_slop
+    str = "PUT / HTTP/1.1\r\nContent-Length: 1\r\n\r\naG"
+    assert_equal Hash, @parser.headers({}, str).class
+    assert_equal 2, str.size
+    assert_equal 'aG', str
+    tmp = ''
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal "G", str
+    assert_equal "G", @parser.filter_body(tmp, str)
+    assert_equal 1, tmp.size
+    assert_equal "a", tmp
+    assert ! @parser.keepalive?
+  end
+
+  def test_chunked
+    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_equal 0, str.size
+    tmp = ""
+    assert_nil @parser.filter_body(tmp, "6")
+    assert_equal 0, tmp.size
+    assert_nil @parser.filter_body(tmp, rv = "\r\n")
+    assert_equal 0, rv.size
+    assert_equal 0, tmp.size
+    tmp = ""
+    assert_nil @parser.filter_body(tmp, "..")
+    assert_equal "..", tmp
+    assert_nil @parser.filter_body(tmp, "abcd\r\n0\r\n")
+    assert_equal "abcd", tmp
+    rv = "PUT"
+    assert_equal rv.object_id, @parser.filter_body(tmp, rv).object_id
+    assert_equal "PUT", rv
+    assert ! @parser.keepalive?
+  end
+
+  def test_two_chunks
+    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_equal 0, str.size
+    tmp = ""
+    assert_nil @parser.filter_body(tmp, "6")
+    assert_equal 0, tmp.size
+    assert_nil @parser.filter_body(tmp, rv = "\r\n")
+    assert_equal "", rv
+    assert_equal 0, tmp.size
+    tmp = ""
+    assert_nil @parser.filter_body(tmp, "..")
+    assert_equal 2, tmp.size
+    assert_equal "..", tmp
+    assert_nil @parser.filter_body(tmp, "abcd\r\n1")
+    assert_equal "abcd", tmp
+    assert_nil @parser.filter_body(tmp, "\r")
+    assert_equal "", tmp
+    assert_nil @parser.filter_body(tmp, "\n")
+    assert_equal "", tmp
+    assert_nil @parser.filter_body(tmp, "z")
+    assert_equal "z", tmp
+    assert_nil @parser.filter_body(tmp, "\r\n")
+    assert_nil @parser.filter_body(tmp, "0")
+    assert_nil @parser.filter_body(tmp, "\r")
+    rv = @parser.filter_body(tmp, buf = "\nGET")
+    assert_equal "GET", rv
+    assert_equal buf.object_id, rv.object_id
+    assert ! @parser.keepalive?
+  end
+
+  def test_big_chunk
+    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n" \
+          "4000\r\nabcd"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    tmp = ''
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal '', str
+    str = ' ' * 16300
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal '', str
+    str = ' ' * 80
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal '', str
+    assert ! @parser.body_eof?
+    assert_equal "", @parser.filter_body(tmp, "\r\n0\r\n")
+    assert @parser.body_eof?
+    assert ! @parser.keepalive?
+  end
+
+  def test_two_chunks_oneshot
+    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n" \
+          "1\r\na\r\n2\r\n..\r\n0\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    tmp = ''
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal 'a..', tmp
+    rv = @parser.filter_body(tmp, str)
+    assert_equal rv.object_id, str.object_id
+    assert ! @parser.keepalive?
+  end
+
+  def test_chunks_bytewise
+    chunked = "10\r\nabcdefghijklmnop\r\n11\r\n0123456789abcdefg\r\n0\r\n"
+    str = "PUT / HTTP/1.1\r\ntransfer-Encoding: chunked\r\n\r\n#{chunked}"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_equal chunked, str
+    tmp = ''
+    buf = ''
+    body = ''
+    str = str[0..-2]
+    str.each_byte { |byte|
+      assert_nil @parser.filter_body(tmp, buf << byte.chr)
+      body << tmp
+    }
+    assert_equal 'abcdefghijklmnop0123456789abcdefg', body
+    rv = @parser.filter_body(tmp, buf << "\n")
+    assert_equal rv.object_id, buf.object_id
+    assert ! @parser.keepalive?
+  end
+
+  def test_trailers
+    str = "PUT / HTTP/1.1\r\n" \
+          "Trailer: Content-MD5\r\n" \
+          "transfer-Encoding: chunked\r\n\r\n" \
+          "1\r\na\r\n2\r\n..\r\n0\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_equal 'Content-MD5', req['HTTP_TRAILER']
+    assert_nil req['HTTP_CONTENT_MD5']
+    tmp = ''
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal 'a..', tmp
+    md5_b64 = [ Digest::MD5.digest(tmp) ].pack('m').strip.freeze
+    rv = @parser.filter_body(tmp, str)
+    assert_equal rv.object_id, str.object_id
+    assert_equal '', str
+    md5_hdr = "Content-MD5: #{md5_b64}\r\n".freeze
+    str << md5_hdr
+    assert_nil @parser.trailers(req, str)
+    assert_equal md5_b64, req['HTTP_CONTENT_MD5']
+    assert_equal "CONTENT_MD5: #{md5_b64}\r\n", str
+    assert_nil @parser.trailers(req, str << "\r")
+    assert_equal req, @parser.trailers(req, str << "\nGET / ")
+    assert_equal "GET / ", str
+    assert ! @parser.keepalive?
+  end
+
+  def test_trailers_slowly
+    str = "PUT / HTTP/1.1\r\n" \
+          "Trailer: Content-MD5\r\n" \
+          "transfer-Encoding: chunked\r\n\r\n" \
+          "1\r\na\r\n2\r\n..\r\n0\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_equal 'Content-MD5', req['HTTP_TRAILER']
+    assert_nil req['HTTP_CONTENT_MD5']
+    tmp = ''
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal 'a..', tmp
+    md5_b64 = [ Digest::MD5.digest(tmp) ].pack('m').strip.freeze
+    rv = @parser.filter_body(tmp, str)
+    assert_equal rv.object_id, str.object_id
+    assert_equal '', str
+    assert_nil @parser.trailers(req, str)
+    md5_hdr = "Content-MD5: #{md5_b64}\r\n".freeze
+    md5_hdr.each_byte { |byte|
+      str << byte.chr
+      assert_nil @parser.trailers(req, str)
+    }
+    assert_equal md5_b64, req['HTTP_CONTENT_MD5']
+    assert_equal "CONTENT_MD5: #{md5_b64}\r\n", str
+    assert_nil @parser.trailers(req, str << "\r")
+    assert_equal req, @parser.trailers(req, str << "\n")
+  end
+
+  def test_max_chunk
+    str = "PUT / HTTP/1.1\r\n" \
+          "transfer-Encoding: chunked\r\n\r\n" \
+          "#{HttpParser::CHUNK_MAX.to_s(16)}\r\na\r\n2\r\n..\r\n0\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_nil @parser.content_length
+    assert_nothing_raised { @parser.filter_body('', str) }
+    assert ! @parser.keepalive?
+  end
+
+  def test_max_body
+    n = HttpParser::LENGTH_MAX
+    str = "PUT / HTTP/1.1\r\nContent-Length: #{n}\r\n\r\n"
+    req = {}
+    assert_nothing_raised { @parser.headers(req, str) }
+    assert_equal n, req['CONTENT_LENGTH'].to_i
+    assert ! @parser.keepalive?
+  end
+
+  def test_overflow_chunk
+    n = HttpParser::CHUNK_MAX + 1
+    str = "PUT / HTTP/1.1\r\n" \
+          "transfer-Encoding: chunked\r\n\r\n" \
+          "#{n.to_s(16)}\r\na\r\n2\r\n..\r\n0\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_nil @parser.content_length
+    assert_raise(HttpParserError) { @parser.filter_body('', str) }
+    assert ! @parser.keepalive?
+  end
+
+  def test_overflow_content_length
+    n = HttpParser::LENGTH_MAX + 1
+    str = "PUT / HTTP/1.1\r\nContent-Length: #{n}\r\n\r\n"
+    assert_raise(HttpParserError) { @parser.headers({}, str) }
+    assert ! @parser.keepalive?
+  end
+
+  def test_bad_chunk
+    str = "PUT / HTTP/1.1\r\n" \
+          "transfer-Encoding: chunked\r\n\r\n" \
+          "#zzz\r\na\r\n2\r\n..\r\n0\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_nil @parser.content_length
+    assert_raise(HttpParserError) { @parser.filter_body('', str) }
+    assert ! @parser.keepalive?
+  end
+
+  def test_bad_content_length
+    str = "PUT / HTTP/1.1\r\nContent-Length: 7ff\r\n\r\n"
+    assert_raise(HttpParserError) { @parser.headers({}, str) }
+    assert ! @parser.keepalive?
+  end
+
+  def test_bad_trailers
+    str = "PUT / HTTP/1.1\r\n" \
+          "Trailer: Transfer-Encoding\r\n" \
+          "transfer-Encoding: chunked\r\n\r\n" \
+          "1\r\na\r\n2\r\n..\r\n0\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_equal 'Transfer-Encoding', req['HTTP_TRAILER']
+    tmp = ''
+    assert_nil @parser.filter_body(tmp, str)
+    assert_equal 'a..', tmp
+    assert_equal '', str
+    str << "Transfer-Encoding: identity\r\n\r\n"
+    assert_raise(HttpParserError) { @parser.trailers(req, str) }
+    assert ! @parser.keepalive?
+  end
+
+  def test_repeat_headers
+    str = "PUT / HTTP/1.1\r\n" \
+          "Trailer: Content-MD5\r\n" \
+          "Trailer: Content-SHA1\r\n" \
+          "transfer-Encoding: chunked\r\n\r\n" \
+          "1\r\na\r\n2\r\n..\r\n0\r\n"
+    req = {}
+    assert_equal req, @parser.headers(req, str)
+    assert_equal 'Content-MD5,Content-SHA1', req['HTTP_TRAILER']
+    assert ! @parser.keepalive?
+  end
+
+  def test_parse_simple_request
+    parser = HttpParser.new
+    req = {}
+    http = "GET /read-rfc1945-if-you-dont-believe-me\r\n"
+    assert_equal req, parser.headers(req, http)
+    assert_equal '', http
+    expect = {
+      "SERVER_NAME"=>"localhost",
+      "rack.url_scheme"=>"http",
+      "REQUEST_PATH"=>"/read-rfc1945-if-you-dont-believe-me",
+      "PATH_INFO"=>"/read-rfc1945-if-you-dont-believe-me",
+      "REQUEST_URI"=>"/read-rfc1945-if-you-dont-believe-me",
+      "SERVER_PORT"=>"80",
+      "SERVER_PROTOCOL"=>"HTTP/0.9",
+      "REQUEST_METHOD"=>"GET",
+      "QUERY_STRING"=>""
+    }
+    assert_equal expect, req
+    assert ! parser.headers?
+  end
+
+  def test_path_info_semicolon
+    qs = "QUERY_STRING"
+    pi = "PATH_INFO"
+    req = {}
+    str = "GET %s HTTP/1.1\r\nHost: example.com\r\n\r\n"
+    {
+      "/1;a=b?c=d&e=f" => { qs => "c=d&e=f", pi => "/1;a=b" },
+      "/1?c=d&e=f" => { qs => "c=d&e=f", pi => "/1" },
+      "/1;a=b" => { qs => "", pi => "/1;a=b" },
+      "/1;a=b?" => { qs => "", pi => "/1;a=b" },
+      "/1?a=b;c=d&e=f" => { qs => "a=b;c=d&e=f", pi => "/1" },
+      "*" => { qs => "", pi => "" },
+    }.each do |uri,expect|
+      assert_equal req, @parser.headers(req.clear, str % [ uri ])
+      @parser.reset
+      assert_equal uri, req["REQUEST_URI"], "REQUEST_URI mismatch"
+      assert_equal expect[qs], req[qs], "#{qs} mismatch"
+      assert_equal expect[pi], req[pi], "#{pi} mismatch"
+      next if uri == "*"
+      uri = URI.parse("http://example.com#{uri}")
+      assert_equal uri.query.to_s, req[qs], "#{qs} mismatch URI.parse disagrees"
+      assert_equal uri.path, req[pi], "#{pi} mismatch URI.parse disagrees"
+    end
+  end
+
+  def test_path_info_semicolon_absolute
+    qs = "QUERY_STRING"
+    pi = "PATH_INFO"
+    req = {}
+    str = "GET http://example.com%s HTTP/1.1\r\nHost: www.example.com\r\n\r\n"
+    {
+      "/1;a=b?c=d&e=f" => { qs => "c=d&e=f", pi => "/1;a=b" },
+      "/1?c=d&e=f" => { qs => "c=d&e=f", pi => "/1" },
+      "/1;a=b" => { qs => "", pi => "/1;a=b" },
+      "/1;a=b?" => { qs => "", pi => "/1;a=b" },
+      "/1?a=b;c=d&e=f" => { qs => "a=b;c=d&e=f", pi => "/1" },
+    }.each do |uri,expect|
+      assert_equal req, @parser.headers(req.clear, str % [ uri ])
+      @parser.reset
+      assert_equal uri, req["REQUEST_URI"], "REQUEST_URI mismatch"
+      assert_equal "example.com", req["HTTP_HOST"], "Host: mismatch"
+      assert_equal expect[qs], req[qs], "#{qs} mismatch"
+      assert_equal expect[pi], req[pi], "#{pi} mismatch"
+    end
+  end
+
+end
diff --git a/test/unit/test_request.rb b/test/unit/test_request.rb
index 0bfff7d..1896300 100644
--- a/test/unit/test_request.rb
+++ b/test/unit/test_request.rb
@@ -1,14 +1,9 @@
+# -*- encoding: binary -*-
+
 # Copyright (c) 2009 Eric Wong
 # You can redistribute it and/or modify it under the same terms as Ruby.
 
 require 'test/test_helper'
-begin
-  require 'rack'
-  require 'rack/lint'
-rescue LoadError
-  warn "Unable to load rack, skipping test"
-  exit 0
-end
 
 include Unicorn
 
@@ -16,10 +11,11 @@ class RequestTest < Test::Unit::TestCase
 
   class MockRequest < StringIO
     alias_method :readpartial, :sysread
+    alias_method :read_nonblock, :sysread
   end
 
   def setup
-    @request = HttpRequest.new(Logger.new($stderr))
+    @request = HttpRequest.new
     @app = lambda do |env|
       [ 200, { 'Content-Length' => '0', 'Content-Type' => 'text/plain' }, [] ]
     end
@@ -119,6 +115,31 @@ class RequestTest < Test::Unit::TestCase
     assert_nothing_raised { res = @lint.call(env) }
   end
 
+  def test_no_content_stringio
+    client = MockRequest.new("GET / HTTP/1.1\r\nHost: foo\r\n\r\n")
+    res = env = nil
+    assert_nothing_raised { env = @request.read(client) }
+    assert_equal StringIO, env['rack.input'].class
+  end
+
+  def test_zero_content_stringio
+    client = MockRequest.new("PUT / HTTP/1.1\r\n" \
+                             "Content-Length: 0\r\n" \
+                             "Host: foo\r\n\r\n")
+    res = env = nil
+    assert_nothing_raised { env = @request.read(client) }
+    assert_equal StringIO, env['rack.input'].class
+  end
+
+  def test_real_content_not_stringio
+    client = MockRequest.new("PUT / HTTP/1.1\r\n" \
+                             "Content-Length: 1\r\n" \
+                             "Host: foo\r\n\r\n")
+    res = env = nil
+    assert_nothing_raised { env = @request.read(client) }
+    assert_equal Unicorn::TeeInput, env['rack.input'].class
+  end
+
   def test_rack_lint_put
     client = MockRequest.new(
       "PUT / HTTP/1.1\r\n" \
@@ -149,7 +170,11 @@ class RequestTest < Test::Unit::TestCase
     assert_nothing_raised { env = @request.read(client) }
     assert ! env.include?(:http_body)
     assert_equal length, env['rack.input'].size
-    count.times { assert_equal buf, env['rack.input'].read(bs) }
+    count.times {
+      tmp = env['rack.input'].read(bs)
+      tmp << env['rack.input'].read(bs - tmp.size) if tmp.size != bs
+      assert_equal buf, tmp
+    }
     assert_nil env['rack.input'].read(bs)
     assert_nothing_raised { env['rack.input'].rewind }
     assert_nothing_raised { res = @lint.call(env) }
diff --git a/test/unit/test_response.rb b/test/unit/test_response.rb
index 66c2b54..f9eda8e 100644
--- a/test/unit/test_response.rb
+++ b/test/unit/test_response.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # Copyright (c) 2005 Zed A. Shaw
 # You can redistribute it and/or modify it under the same terms as Ruby.
 #
@@ -94,4 +96,15 @@ class ResponseTest < Test::Unit::TestCase
     assert_match(expect_body, out.string.split(/\r\n/).last)
   end
 
+  def test_unknown_status_pass_through
+    out = StringIO.new
+    HttpResponse.write(out,["666 I AM THE BEAST", {}, [] ])
+    assert out.closed?
+    headers = out.string.split(/\r\n\r\n/).first.split(/\r\n/)
+    assert %r{\AHTTP/\d\.\d 666 I AM THE BEAST\z}.match(headers[0])
+    status = headers.grep(/\AStatus:/i).first
+    assert status
+    assert_equal "Status: 666 I AM THE BEAST", status
+  end
+
 end
diff --git a/test/unit/test_server.rb b/test/unit/test_server.rb
index 742b240..00705d0 100644
--- a/test/unit/test_server.rb
+++ b/test/unit/test_server.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # Copyright (c) 2005 Zed A. Shaw
 # You can redistribute it and/or modify it under the same terms as Ruby.
 #
@@ -10,9 +12,13 @@ include Unicorn
 
 class TestHandler
 
-  def call(env)
-  #   response.socket.write("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\nhello!\n")
+  def call(env)
+    while env['rack.input'].read(4096)
+    end
     [200, { 'Content-Type' => 'text/plain' }, ['hello!\n']]
+    rescue Unicorn::ClientShutdown, Unicorn::HttpParserError => e
+      $stderr.syswrite("#{e.class}: #{e.message} #{e.backtrace.empty?}\n")
+      raise e
    end
 end
 
@@ -31,6 +37,8 @@ class WebServerTest < Test::Unit::TestCase
 
   def teardown
     redirect_test_io do
+      wait_workers_ready("test_stderr.#$$.log", 1)
+      File.truncate("test_stderr.#$$.log", 0)
       @server.stop(true)
     end
   end
@@ -51,8 +59,10 @@ class WebServerTest < Test::Unit::TestCase
     end
     results = hit(["http://localhost:#@port/"])
     worker_pid = results[0].to_i
+    assert worker_pid != 0
     tmp.sysseek(0)
     loader_pid = tmp.sysread(4096).to_i
+    assert loader_pid != 0
     assert_equal worker_pid, loader_pid
     teardown
 
@@ -63,6 +73,7 @@ class WebServerTest < Test::Unit::TestCase
     end
     results = hit(["http://localhost:#@port/"])
     worker_pid = results[0].to_i
+    assert worker_pid != 0
     tmp.sysseek(0)
     loader_pid = tmp.sysread(4096).to_i
     assert_equal $$, loader_pid
@@ -94,6 +105,92 @@ class WebServerTest < Test::Unit::TestCase
     assert_equal 'hello!\n', results[0], "Handler didn't really run"
   end
 
+  def test_client_shutdown_writes
+    sock = nil
+    buf = nil
+    bs = 15609315 * rand
+    assert_nothing_raised do
+      sock = TCPSocket.new('127.0.0.1', @port)
+      sock.syswrite("PUT /hello HTTP/1.1\r\n")
+      sock.syswrite("Host: example.com\r\n")
+      sock.syswrite("Transfer-Encoding: chunked\r\n")
+      sock.syswrite("Trailer: X-Foo\r\n")
+      sock.syswrite("\r\n")
+      sock.syswrite("%x\r\n" % [ bs ])
+      sock.syswrite("F" * bs)
+      sock.syswrite("\r\n0\r\nX-")
+      "Foo: bar\r\n\r\n".each_byte do |x|
+        sock.syswrite x.chr
+        sleep 0.05
+      end
+      # we wrote the entire request before shutting down, server should
+      # continue to process our request and never hit EOFError on our sock
+      sock.shutdown(Socket::SHUT_WR)
+      buf = sock.read
+    end
+    assert_equal 'hello!\n', buf.split(/\r\n\r\n/).last
+    next_client = Net::HTTP.get(URI.parse("http://127.0.0.1:#@port/"))
+    assert_equal 'hello!\n', next_client
+    lines = File.readlines("test_stderr.#$$.log")
+    assert lines.grep(/^Unicorn::ClientShutdown: /).empty?
+    assert_nothing_raised { sock.close }
+  end
+
+  def test_client_shutdown_write_truncates
+    sock = nil
+    buf = nil
+    bs = 15609315 * rand
+    assert_nothing_raised do
+      sock = TCPSocket.new('127.0.0.1', @port)
+      sock.syswrite("PUT /hello HTTP/1.1\r\n")
+      sock.syswrite("Host: example.com\r\n")
+      sock.syswrite("Transfer-Encoding: chunked\r\n")
+      sock.syswrite("Trailer: X-Foo\r\n")
+      sock.syswrite("\r\n")
+      sock.syswrite("%x\r\n" % [ bs ])
+      sock.syswrite("F" * (bs / 2.0))
+
+      # shutdown prematurely, this will force the server to abort
+      # processing on us even during app dispatch
+      sock.shutdown(Socket::SHUT_WR)
+      IO.select([sock], nil, nil, 60) or raise "Timed out"
+      buf = sock.read
+    end
+    assert_equal "", buf
+    next_client = Net::HTTP.get(URI.parse("http://127.0.0.1:#@port/"))
+    assert_equal 'hello!\n', next_client
+    lines = File.readlines("test_stderr.#$$.log")
+    lines = lines.grep(/^Unicorn::ClientShutdown: bytes_read=\d+/)
+    assert_equal 1, lines.size
+    assert_match %r{\AUnicorn::ClientShutdown: bytes_read=\d+ true$}, lines[0]
+    assert_nothing_raised { sock.close }
+  end
+
+  def test_client_malformed_body
+    sock = nil
+    buf = nil
+    bs = 15653984
+    assert_nothing_raised do
+      sock = TCPSocket.new('127.0.0.1', @port)
+      sock.syswrite("PUT /hello HTTP/1.1\r\n")
+      sock.syswrite("Host: example.com\r\n")
+      sock.syswrite("Transfer-Encoding: chunked\r\n")
+      sock.syswrite("Trailer: X-Foo\r\n")
+      sock.syswrite("\r\n")
+      sock.syswrite("%x\r\n" % [ bs ])
+      sock.syswrite("F" * bs)
+    end
+    begin
+      File.open("/dev/urandom", "rb") { |fp| sock.syswrite(fp.sysread(16384)) }
+    rescue
+    end
+    assert_nothing_raised { sock.close }
+    next_client = Net::HTTP.get(URI.parse("http://127.0.0.1:#@port/"))
+    assert_equal 'hello!\n', next_client
+    lines = File.readlines("test_stderr.#$$.log")
+    lines = lines.grep(/^Unicorn::HttpParserError: .* true$/)
+    assert_equal 1, lines.size
+  end
 
   def do_test(string, chunk, close_after=nil, shutdown_delay=0)
     # Do not use instance variables here, because it needs to be thread safe
@@ -131,6 +228,16 @@ class WebServerTest < Test::Unit::TestCase
     end
   end
 
+  def test_logger_set
+    assert_equal @server.logger, Unicorn::HttpRequest::DEFAULTS["rack.logger"]
+  end
+
+  def test_logger_changed
+    tmp = Logger.new($stdout)
+    @server.logger = tmp
+    assert_equal tmp, Unicorn::HttpRequest::DEFAULTS["rack.logger"]
+  end
+
   def test_bad_client_400
     sock = nil
     assert_nothing_raised do
@@ -141,6 +248,16 @@ class WebServerTest < Test::Unit::TestCase
     assert_nothing_raised { sock.close }
   end
 
+  def test_http_0_9
+    sock = nil
+    assert_nothing_raised do
+      sock = TCPSocket.new('127.0.0.1', @port)
+      sock.syswrite("GET /hello\r\n")
+    end
+    assert_match 'hello!\n', sock.sysread(4096)
+    assert_nothing_raised { sock.close }
+  end
+
   def test_header_is_too_long
     redirect_test_io do
       long = "GET /test HTTP/1.1\r\n" + ("X-Big: stuff\r\n" * 15000) + "\r\n"
@@ -152,9 +269,18 @@ class WebServerTest < Test::Unit::TestCase
 
   def test_file_streamed_request
     body = "a" * (Unicorn::Const::MAX_BODY * 2)
-    long = "GET /test HTTP/1.1\r\nContent-length: #{body.length}\r\n\r\n" + body
+    long = "PUT /test HTTP/1.1\r\nContent-length: #{body.length}\r\n\r\n" + body
     do_test(long, Unicorn::Const::CHUNK_SIZE * 2 -400)
   end
 
+  def test_file_streamed_request_bad_body
+    body = "a" * (Unicorn::Const::MAX_BODY * 2)
+    long = "GET /test HTTP/1.1\r\nContent-ength: #{body.length}\r\n\r\n" + body
+    assert_raises(EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,
+                  Errno::EBADF) {
+      do_test(long, Unicorn::Const::CHUNK_SIZE * 2 -400)
+    }
+  end
+
 end
 
diff --git a/test/unit/test_signals.rb b/test/unit/test_signals.rb
index ef66ed6..eb2af0b 100644
--- a/test/unit/test_signals.rb
+++ b/test/unit/test_signals.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 # Copyright (c) 2009 Eric Wong
 # You can redistribute it and/or modify it under the same terms as Ruby.
 #
@@ -24,14 +26,15 @@ class SignalsTest < Test::Unit::TestCase
     @bs = 1 * 1024 * 1024
     @count = 100
     @port = unused_port
-    tmp = @tmp = Tempfile.new('unicorn.sock')
+    @sock = Tempfile.new('unicorn.sock')
+    @tmp = Tempfile.new('unicorn.write')
+    @tmp.sync = true
+    File.unlink(@sock.path)
     File.unlink(@tmp.path)
-    n = 0
-    tmp.chmod(0)
     @server_opts = {
-      :listeners => [ "127.0.0.1:#@port", @tmp.path ],
+      :listeners => [ "127.0.0.1:#@port", @sock.path ],
       :after_fork => lambda { |server,worker|
-        trap(:HUP) { tmp.chmod(n += 1) }
+        trap(:HUP) { @tmp.syswrite('.') }
       },
     }
     @server = nil
@@ -53,8 +56,10 @@ class SignalsTest < Test::Unit::TestCase
       buf =~ /\bX-Pid: (\d+)\b/ or raise Exception
       child = $1.to_i
       wait_master_ready("test_stderr.#{pid}.log")
+      wait_workers_ready("test_stderr.#{pid}.log", 1)
       Process.kill(:KILL, pid)
       Process.waitpid(pid)
+      File.unlink("test_stderr.#{pid}.log", "test_stdout.#{pid}.log")
       t0 = Time.now
     end
     assert child
@@ -137,8 +142,9 @@ class SignalsTest < Test::Unit::TestCase
       pid = buf[/\r\nX-Pid: (\d+)\r\n/, 1].to_i
       header_len = buf[/\A(.+?\r\n\r\n)/m, 1].size
     end
+    assert pid > 0, "pid not positive: #{pid.inspect}"
     read = buf.size
-    mode_before = @tmp.stat.mode
+    size_before = @tmp.stat.size
     assert_raises(EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,
                   Errno::EBADF) do
       loop do
@@ -151,13 +157,17 @@ class SignalsTest < Test::Unit::TestCase
 
     redirect_test_io { @server.stop(true) }
     # can't check for == since pending signals get merged
-    assert mode_before < @tmp.stat.mode
-    assert_equal(read - header_len, @bs * @count)
+    assert size_before < @tmp.stat.size
+    got = read - header_len
+    expect = @bs * @count
+    assert_equal(expect, got, "expect=#{expect} got=#{got}")
     assert_nothing_raised { sock.close }
   end
 
   def test_request_read
     app = lambda { |env|
+      while env['rack.input'].read(4096)
+      end
       [ 200, {'Content-Type'=>'text/plain', 'X-Pid'=>Process.pid.to_s}, [] ]
     }
     redirect_test_io { @server = HttpServer.new(app, @server_opts).start }
@@ -171,11 +181,12 @@ class SignalsTest < Test::Unit::TestCase
       sock.close
     end
 
+    assert pid > 0, "pid not positive: #{pid.inspect}"
     sock = TCPSocket.new('127.0.0.1', @port)
     sock.syswrite("PUT / HTTP/1.0\r\n")
     sock.syswrite("Content-Length: #{@bs * @count}\r\n\r\n")
     1000.times { Process.kill(:HUP, pid) }
-    mode_before = @tmp.stat.mode
+    size_before = @tmp.stat.size
     killer = fork { loop { Process.kill(:HUP, pid); sleep(0.0001) } }
     buf = ' ' * @bs
     @count.times { sock.syswrite(buf) }
@@ -183,7 +194,7 @@ class SignalsTest < Test::Unit::TestCase
     Process.waitpid2(killer)
     redirect_test_io { @server.stop(true) }
     # can't check for == since pending signals get merged
-    assert mode_before < @tmp.stat.mode
+    assert size_before < @tmp.stat.size
     assert_equal pid, sock.sysread(4096)[/\r\nX-Pid: (\d+)\r\n/, 1].to_i
     sock.close
   end
diff --git a/test/unit/test_socket_helper.rb b/test/unit/test_socket_helper.rb
index 75d9f7b..c35b0c2 100644
--- a/test/unit/test_socket_helper.rb
+++ b/test/unit/test_socket_helper.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'test/test_helper'
 require 'tempfile'
 
@@ -61,6 +63,20 @@ class TestSocketHelper < Test::Unit::TestCase
       File.umask(old_umask)
   end
 
+  def test_bind_listen_unix_umask
+    old_umask = File.umask(0777)
+    tmp = Tempfile.new 'unix.sock'
+    @unix_listener_path = tmp.path
+    File.unlink(@unix_listener_path)
+    @unix_listener = bind_listen(@unix_listener_path, :umask => 077)
+    assert UNIXServer === @unix_listener
+    assert_equal @unix_listener_path, sock_name(@unix_listener)
+    assert_equal 0140700, File.stat(@unix_listener_path).mode
+    assert_equal 0777, File.umask
+    ensure
+      File.umask(old_umask)
+  end
+
   def test_bind_listen_unix_idempotent
     test_bind_listen_unix
     a = bind_listen(@unix_listener)
diff --git a/test/unit/test_tee_input.rb b/test/unit/test_tee_input.rb
new file mode 100644
index 0000000..403f698
--- /dev/null
+++ b/test/unit/test_tee_input.rb
@@ -0,0 +1,229 @@
+# -*- encoding: binary -*-
+
+require 'test/unit'
+require 'digest/sha1'
+require 'unicorn'
+
+class TestTeeInput < Test::Unit::TestCase
+
+  def setup
+    @rs = $/
+    @env = {}
+    @rd, @wr = IO.pipe
+    @rd.sync = @wr.sync = true
+    @start_pid = $$
+  end
+
+  def teardown
+    return if $$ != @start_pid
+    $/ = @rs
+    @rd.close rescue nil
+    @wr.close rescue nil
+    begin
+      Process.wait
+    rescue Errno::ECHILD
+      break
+    end while true
+  end
+
+  def test_gets_long
+    init_parser("hello", 5 + (4096 * 4 * 3) + "#$/foo#$/".size)
+    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    status = line = nil
+    pid = fork {
+      @rd.close
+      3.times { @wr.write("ffff" * 4096) }
+      @wr.write "#$/foo#$/"
+      @wr.close
+    }
+    @wr.close
+    assert_nothing_raised { line = ti.gets }
+    assert_equal(4096 * 4 * 3 + 5 + $/.size, line.size)
+    assert_equal("hello" << ("ffff" * 4096 * 3) << "#$/", line)
+    assert_nothing_raised { line = ti.gets }
+    assert_equal "foo#$/", line
+    assert_nil ti.gets
+    assert_nothing_raised { pid, status = Process.waitpid2(pid) }
+    assert status.success?
+  end
+
+  def test_gets_short
+    init_parser("hello", 5 + "#$/foo".size)
+    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    status = line = nil
+    pid = fork {
+      @rd.close
+      @wr.write "#$/foo"
+      @wr.close
+    }
+    @wr.close
+    assert_nothing_raised { line = ti.gets }
+    assert_equal("hello#$/", line)
+    assert_nothing_raised { line = ti.gets }
+    assert_equal "foo", line
+    assert_nil ti.gets
+    assert_nothing_raised { pid, status = Process.waitpid2(pid) }
+    assert status.success?
+  end
+
+  def test_small_body
+    init_parser('hello')
+    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    assert_equal 0, @parser.content_length
+    assert @parser.body_eof?
+    assert_equal StringIO, ti.instance_eval { @tmp.class }
+    assert_equal 0, ti.instance_eval { @tmp.pos }
+    assert_equal 5, ti.size
+    assert_equal 'hello', ti.read
+    assert_equal '', ti.read
+    assert_nil ti.read(4096)
+  end
+
+  def test_read_with_buffer
+    init_parser('hello')
+    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    buf = ''
+    rv = ti.read(4, buf)
+    assert_equal 'hell', rv
+    assert_equal 'hell', buf
+    assert_equal rv.object_id, buf.object_id
+    assert_equal 'o', ti.read
+    assert_equal nil, ti.read(5, buf)
+    assert_equal 0, ti.rewind
+    assert_equal 'hello', ti.read(5, buf)
+    assert_equal 'hello', buf
+  end
+
+  def test_big_body
+    init_parser('.' * Unicorn::Const::MAX_BODY << 'a')
+    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    assert_equal 0, @parser.content_length
+    assert @parser.body_eof?
+    assert_kind_of File, ti.instance_eval { @tmp }
+    assert_equal 0, ti.instance_eval { @tmp.pos }
+    assert_equal Unicorn::Const::MAX_BODY + 1, ti.size
+  end
+
+  def test_read_in_full_if_content_length
+    a, b = 300, 3
+    init_parser('.' * b, 300)
+    assert_equal 300, @parser.content_length
+    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    pid = fork {
+      @wr.write('.' * 197)
+      sleep 1 # still a *potential* race here that would make the test moot...
+      @wr.write('.' * 100)
+    }
+    assert_equal a, ti.read(a).size
+    _, status = Process.waitpid2(pid)
+    assert status.success?
+    @wr.close
+  end
+
+  def test_big_body_multi
+    init_parser('.', Unicorn::Const::MAX_BODY + 1)
+    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    assert_equal Unicorn::Const::MAX_BODY, @parser.content_length
+    assert ! @parser.body_eof?
+    assert_kind_of File, ti.instance_eval { @tmp }
+    assert_equal 0, ti.instance_eval { @tmp.pos }
+    assert_equal 1, ti.instance_eval { @tmp.size }
+    assert_equal Unicorn::Const::MAX_BODY + 1, ti.size
+    nr = Unicorn::Const::MAX_BODY / 4
+    pid = fork {
+      @rd.close
+      nr.times { @wr.write('....') }
+      @wr.close
+    }
+    @wr.close
+    assert_equal '.', ti.read(1)
+    assert_equal Unicorn::Const::MAX_BODY + 1, ti.size
+    nr.times {
+      assert_equal '....', ti.read(4)
+      assert_equal Unicorn::Const::MAX_BODY + 1, ti.size
+    }
+    assert_nil ti.read(1)
+    status = nil
+    assert_nothing_raised { pid, status = Process.waitpid2(pid) }
+    assert status.success?
+  end
+
+  def test_chunked
+    @parser = Unicorn::HttpParser.new
+    @buf = "POST / HTTP/1.1\r\n" \
+           "Host: localhost\r\n" \
+           "Transfer-Encoding: chunked\r\n" \
+           "\r\n"
+    assert_equal @env, @parser.headers(@env, @buf)
+    assert_equal "", @buf
+
+    pid = fork {
+      @rd.close
+      5.times { @wr.write("5\r\nabcde\r\n") }
+      @wr.write("0\r\n")
+    }
+    @wr.close
+    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    assert_nil @parser.content_length
+    assert_nil ti.instance_eval { @size }
+    assert ! @parser.body_eof?
+    assert_equal 25, ti.size
+    assert @parser.body_eof?
+    assert_equal 25, ti.instance_eval { @size }
+    assert_equal 0, ti.instance_eval { @tmp.pos }
+    assert_nothing_raised { ti.rewind }
+    assert_equal 0, ti.instance_eval { @tmp.pos }
+    assert_equal 'abcdeabcdeabcdeabcde', ti.read(20)
+    assert_equal 20, ti.instance_eval { @tmp.pos }
+    assert_nothing_raised { ti.rewind }
+    assert_equal 0, ti.instance_eval { @tmp.pos }
+    assert_kind_of File, ti.instance_eval { @tmp }
+    status = nil
+    assert_nothing_raised { pid, status = Process.waitpid2(pid) }
+    assert status.success?
+  end
+
+  def test_chunked_ping_pong
+    @parser = Unicorn::HttpParser.new
+    @buf = "POST / HTTP/1.1\r\n" \
+           "Host: localhost\r\n" \
+           "Transfer-Encoding: chunked\r\n" \
+           "\r\n"
+    assert_equal @env, @parser.headers(@env, @buf)
+    assert_equal "", @buf
+    chunks = %w(aa bbb cccc dddd eeee)
+    rd, wr = IO.pipe
+
+    pid = fork {
+      chunks.each do |chunk|
+        rd.read(1) == "." and
+          @wr.write("#{'%x' % [ chunk.size]}\r\n#{chunk}\r\n")
+      end
+      @wr.write("0\r\n")
+    }
+    ti = Unicorn::TeeInput.new(@rd, @env, @parser, @buf)
+    assert_nil @parser.content_length
+    assert_nil ti.instance_eval { @size }
+    assert ! @parser.body_eof?
+    chunks.each do |chunk|
+      wr.write('.')
+      assert_equal chunk, ti.read(16384)
+    end
+    _, status = Process.waitpid2(pid)
+    assert status.success?
+  end
+
+private
+
+  def init_parser(body, size = nil)
+    @parser = Unicorn::HttpParser.new
+    body = body.to_s.freeze
+    @buf = "POST / HTTP/1.1\r\n" \
+           "Host: localhost\r\n" \
+           "Content-Length: #{size || body.size}\r\n" \
+           "\r\n#{body}"
+    assert_equal @env, @parser.headers(@env, @buf)
+    assert_equal body, @buf
+  end
+
+end
diff --git a/test/unit/test_upload.rb b/test/unit/test_upload.rb
index 9ef3ed7..7ac3c9e 100644
--- a/test/unit/test_upload.rb
+++ b/test/unit/test_upload.rb
@@ -1,5 +1,8 @@
+# -*- encoding: binary -*-
+
 # Copyright (c) 2009 Eric Wong
 require 'test/test_helper'
+require 'digest/md5'
 
 include Unicorn
 
@@ -18,29 +21,33 @@ class UploadTest < Test::Unit::TestCase
     @sha1 = Digest::SHA1.new
     @sha1_app = lambda do |env|
       input = env['rack.input']
-      resp = { :pos => input.pos, :size => input.size, :class => input.class }
+      resp = {}
 
-      # sysread
       @sha1.reset
-      begin
-        loop { @sha1.update(input.sysread(@bs)) }
-      rescue EOFError
+      while buf = input.read(@bs)
+        @sha1.update(buf)
       end
       resp[:sha1] = @sha1.hexdigest
 
-      # read
-      input.sysseek(0) if input.respond_to?(:sysseek)
+      # rewind and read again
       input.rewind
       @sha1.reset
-      loop {
-        buf = input.read(@bs) or break
+      while buf = input.read(@bs)
         @sha1.update(buf)
-      }
+      end
 
       if resp[:sha1] == @sha1.hexdigest
         resp[:sysread_read_byte_match] = true
       end
 
+      if expect_size = env['HTTP_X_EXPECT_SIZE']
+        if expect_size.to_i == input.size
+          resp[:expect_size_match] = true
+        end
+      end
+      resp[:size] = input.size
+      resp[:content_md5] = env['HTTP_CONTENT_MD5']
+
       [ 200, @hdr.merge({'X-Resp' => resp.inspect}), [] ]
     end
   end
@@ -54,7 +61,7 @@ class UploadTest < Test::Unit::TestCase
     start_server(@sha1_app)
     sock = TCPSocket.new(@addr, @port)
     sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
-    @count.times do
+    @count.times do |i|
       buf = @random.sysread(@bs)
       @sha1.update(buf)
       sock.syswrite(buf)
@@ -63,10 +70,34 @@ class UploadTest < Test::Unit::TestCase
     assert_equal "HTTP/1.1 200 OK", read[0]
     resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
     assert_equal length, resp[:size]
-    assert_equal 0, resp[:pos]
     assert_equal @sha1.hexdigest, resp[:sha1]
   end
 
+  def test_put_content_md5
+    md5 = Digest::MD5.new
+    start_server(@sha1_app)
+    sock = TCPSocket.new(@addr, @port)
+    sock.syswrite("PUT / HTTP/1.0\r\nTransfer-Encoding: chunked\r\n" \
+                  "Trailer: Content-MD5\r\n\r\n")
+    @count.times do |i|
+      buf = @random.sysread(@bs)
+      @sha1.update(buf)
+      md5.update(buf)
+      sock.syswrite("#{'%x' % buf.size}\r\n")
+      sock.syswrite(buf << "\r\n")
+    end
+    sock.syswrite("0\r\n")
+
+    content_md5 = [ md5.digest! ].pack('m').strip.freeze
+    sock.syswrite("Content-MD5: #{content_md5}\r\n\r\n")
+    read = sock.read.split(/\r\n/)
+    assert_equal "HTTP/1.1 200 OK", read[0]
+    resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
+    assert_equal length, resp[:size]
+    assert_equal @sha1.hexdigest, resp[:sha1]
+    assert_equal content_md5, resp[:content_md5]
+  end
+
   def test_put_trickle_small
     @count, @bs = 2, 128
     start_server(@sha1_app)
@@ -85,42 +116,7 @@ class UploadTest < Test::Unit::TestCase
     assert_equal "HTTP/1.1 200 OK", read[0]
     resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
     assert_equal length, resp[:size]
-    assert_equal 0, resp[:pos]
     assert_equal @sha1.hexdigest, resp[:sha1]
-    assert_equal StringIO, resp[:class]
-  end
-
-  def test_tempfile_unlinked
-    spew_path = lambda do |env|
-      if orig = env['HTTP_X_OLD_PATH']
-        assert orig != env['rack.input'].path
-      end
-      assert_equal length, env['rack.input'].size
-      [ 200, @hdr.merge('X-Tempfile-Path' => env['rack.input'].path), [] ]
-    end
-    start_server(spew_path)
-    sock = TCPSocket.new(@addr, @port)
-    sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
-    @count.times { sock.syswrite(' ' * @bs) }
-    path = sock.read[/^X-Tempfile-Path: (\S+)/, 1]
-    sock.close
-
-    # send another request to ensure we hit the next request
-    sock = TCPSocket.new(@addr, @port)
-    sock.syswrite("PUT / HTTP/1.0\r\nX-Old-Path: #{path}\r\n" \
-                  "Content-Length: #{length}\r\n\r\n")
-    @count.times { sock.syswrite(' ' * @bs) }
-    path2 = sock.read[/^X-Tempfile-Path: (\S+)/, 1]
-    sock.close
-    assert path != path2
-
-    # make sure the next request comes in so the unlink got processed
-    sock = TCPSocket.new(@addr, @port)
-    sock.syswrite("GET ?lasdf\r\n\r\n\r\n\r\n")
-    sock.sysread(4096) rescue nil
-    sock.close
-
-    assert ! File.exist?(path)
   end
 
   def test_put_keepalive_truncates_small_overwrite
@@ -136,75 +132,31 @@ class UploadTest < Test::Unit::TestCase
     sock.syswrite('12345') # write 4 bytes more than we expected
     @sha1.update('1')
 
-    read = sock.read.split(/\r\n/)
+    buf = sock.readpartial(4096)
+    while buf !~ /\r\n\r\n/
+      buf << sock.readpartial(4096)
+    end
+    read = buf.split(/\r\n/)
     assert_equal "HTTP/1.1 200 OK", read[0]
     resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
     assert_equal to_upload, resp[:size]
-    assert_equal 0, resp[:pos]
     assert_equal @sha1.hexdigest, resp[:sha1]
   end
 
   def test_put_excessive_overwrite_closed
-    start_server(lambda { |env| [ 200, @hdr, [] ] })
-    sock = TCPSocket.new(@addr, @port)
-    buf = ' ' * @bs
-    sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
-    @count.times { sock.syswrite(buf) }
-    assert_raise(Errno::ECONNRESET, Errno::EPIPE) do
-      ::Unicorn::Const::CHUNK_SIZE.times { sock.syswrite(buf) }
-    end
-  end
-
-  def test_put_handler_closed_file
-    nr = '0'
     start_server(lambda { |env|
-      env['rack.input'].close
-      resp = { :nr => nr.succ! }
-      [ 200, @hdr.merge({ 'X-Resp' => resp.inspect}), [] ]
+      while env['rack.input'].read(65536); end
+      [ 200, @hdr, [] ]
     })
     sock = TCPSocket.new(@addr, @port)
     buf = ' ' * @bs
     sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
-    @count.times { sock.syswrite(buf) }
-    read = sock.read.split(/\r\n/)
-    assert_equal "HTTP/1.1 200 OK", read[0]
-    resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
-    assert_equal '1', resp[:nr]
 
-    # server still alive?
-    sock = TCPSocket.new(@addr, @port)
-    sock.syswrite("GET / HTTP/1.0\r\n\r\n")
-    read = sock.read.split(/\r\n/)
-    assert_equal "HTTP/1.1 200 OK", read[0]
-    resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
-    assert_equal '2', resp[:nr]
-  end
-
-  def test_renamed_file_not_closed
-    start_server(lambda { |env|
-      new_tmp = Tempfile.new('unicorn_test')
-      input = env['rack.input']
-      File.rename(input.path, new_tmp.path)
-      resp = {
-        :inode => input.stat.ino,
-        :size => input.stat.size,
-        :new_tmp => new_tmp.path,
-        :old_tmp => input.path,
-      }
-      [ 200, @hdr.merge({ 'X-Resp' => resp.inspect}), [] ]
-    })
-    sock = TCPSocket.new(@addr, @port)
-    buf = ' ' * @bs
-    sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
     @count.times { sock.syswrite(buf) }
-    read = sock.read.split(/\r\n/)
-    assert_equal "HTTP/1.1 200 OK", read[0]
-    resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
-    new_tmp = File.open(resp[:new_tmp])
-    assert_equal resp[:inode], new_tmp.stat.ino
-    assert_equal length, resp[:size]
-    assert ! File.exist?(resp[:old_tmp])
-    assert_equal resp[:size], new_tmp.stat.size
+    assert_raise(Errno::ECONNRESET, Errno::EPIPE) do
+      ::Unicorn::Const::CHUNK_SIZE.times { sock.syswrite(buf) }
+    end
+    assert_equal "HTTP/1.1 200 OK\r\n", sock.gets
   end
 
   # Despite reading numerous articles and inspecting the 1.9.1-p0 C
@@ -233,7 +185,6 @@ class UploadTest < Test::Unit::TestCase
     resp = `curl -isSfN -T#{tmp.path} http://#@addr:#@port/`
     assert $?.success?, 'curl ran OK'
     assert_match(%r!\b#{sha1}\b!, resp)
-    assert_match(/Tempfile/, resp)
     assert_match(/sysread_read_byte_match/, resp)
 
     # small StringIO path
@@ -249,10 +200,87 @@ class UploadTest < Test::Unit::TestCase
     resp = `curl -isSfN -T#{tmp.path} http://#@addr:#@port/`
     assert $?.success?, 'curl ran OK'
     assert_match(%r!\b#{sha1}\b!, resp)
-    assert_match(/StringIO/, resp)
     assert_match(/sysread_read_byte_match/, resp)
   end
 
+  def test_chunked_upload_via_curl
+    # POSIX doesn't require all of these to be present on a system
+    which('curl') or return
+    which('sha1sum') or return
+    which('dd') or return
+
+    start_server(@sha1_app)
+
+    tmp = Tempfile.new('dd_dest')
+    assert(system("dd", "if=#{@random.path}", "of=#{tmp.path}",
+                        "bs=#{@bs}", "count=#{@count}"),
+           "dd #@random to #{tmp}")
+    sha1_re = %r!\b([a-f0-9]{40})\b!
+    sha1_out = `sha1sum #{tmp.path}`
+    assert $?.success?, 'sha1sum ran OK'
+
+    assert_match(sha1_re, sha1_out)
+    sha1 = sha1_re.match(sha1_out)[1]
+    cmd = "curl -H 'X-Expect-Size: #{tmp.size}' --tcp-nodelay \
+           -isSf --no-buffer -T- " \
+          "http://#@addr:#@port/"
+    resp = Tempfile.new('resp')
+    resp.sync = true
+
+    rd, wr = IO.pipe
+    wr.sync = rd.sync = true
+    pid = fork {
+      STDIN.reopen(rd)
+      rd.close
+      wr.close
+      STDOUT.reopen(resp)
+      exec cmd
+    }
+    rd.close
+
+    tmp.rewind
+    @count.times { |i|
+      wr.write(tmp.read(@bs))
+      sleep(rand / 10) if 0 == i % 8
+    }
+    wr.close
+    pid, status = Process.waitpid2(pid)
+
+    resp.rewind
+    resp = resp.read
+    assert status.success?, 'curl ran OK'
+    assert_match(%r!\b#{sha1}\b!, resp)
+    assert_match(/sysread_read_byte_match/, resp)
+    assert_match(/expect_size_match/, resp)
+  end
+
+  def test_curl_chunked_small
+    # POSIX doesn't require all of these to be present on a system
+    which('curl') or return
+    which('sha1sum') or return
+    which('dd') or return
+
+    start_server(@sha1_app)
+
+    tmp = Tempfile.new('dd_dest')
+    # small StringIO path
+    assert(system("dd", "if=#{@random.path}", "of=#{tmp.path}",
+                        "bs=1024", "count=1"),
+           "dd #@random to #{tmp}")
+    sha1_re = %r!\b([a-f0-9]{40})\b!
+    sha1_out = `sha1sum #{tmp.path}`
+    assert $?.success?, 'sha1sum ran OK'
+
+    assert_match(sha1_re, sha1_out)
+    sha1 = sha1_re.match(sha1_out)[1]
+    resp = `curl -H 'X-Expect-Size: #{tmp.size}' --tcp-nodelay \
+            -isSf --no-buffer -T- http://#@addr:#@port/ < #{tmp.path}`
+    assert $?.success?, 'curl ran OK'
+    assert_match(%r!\b#{sha1}\b!, resp)
+    assert_match(/sysread_read_byte_match/, resp)
+    assert_match(/expect_size_match/, resp)
+  end
+
   private
 
   def length
diff --git a/test/unit/test_util.rb b/test/unit/test_util.rb
index 032f0be..4a1e21f 100644
--- a/test/unit/test_util.rb
+++ b/test/unit/test_util.rb
@@ -1,3 +1,5 @@
+# -*- encoding: binary -*-
+
 require 'test/test_helper'
 require 'tempfile'
 
@@ -15,6 +17,7 @@ class TestUtil < Test::Unit::TestCase
     assert_equal before, File.stat(tmp.path).inspect
     assert_equal ext, (tmp.external_encoding rescue nil)
     assert_equal int, (tmp.internal_encoding rescue nil)
+    assert_nothing_raised { tmp.close! }
   end
 
   def test_reopen_logs_renamed
@@ -37,6 +40,8 @@ class TestUtil < Test::Unit::TestCase
     assert_equal int, (tmp.internal_encoding rescue nil)
     assert_equal(EXPECT_FLAGS, EXPECT_FLAGS & tmp.fcntl(Fcntl::F_GETFL))
     assert tmp.sync
+    assert_nothing_raised { tmp.close! }
+    assert_nothing_raised { to.close! }
   end
 
   def test_reopen_logs_renamed_with_encoding
@@ -59,6 +64,7 @@ class TestUtil < Test::Unit::TestCase
         assert fp.sync
       }
     }
+    assert_nothing_raised { tmp.close! }
   end if STDIN.respond_to?(:external_encoding)
 
   def test_reopen_logs_renamed_with_internal_encoding
@@ -84,6 +90,7 @@ class TestUtil < Test::Unit::TestCase
         }
       }
     }
+    assert_nothing_raised { tmp.close! }
   end if STDIN.respond_to?(:external_encoding)
 
 end
diff --git a/unicorn.gemspec b/unicorn.gemspec
new file mode 100644
index 0000000..d336c90
--- /dev/null
+++ b/unicorn.gemspec
@@ -0,0 +1,53 @@
+# -*- encoding: binary -*-
+
+ENV["VERSION"] or abort "VERSION= must be specified"
+manifest = File.readlines('.manifest').map! { |x| x.chomp! }
+
+# don't bother with tests that fork, not worth our time to get working
+# with `gem check -t` ... (of course we care for them when testing with
+# GNU make when they can run in parallel)
+test_files = manifest.grep(%r{\Atest/unit/test_.*\.rb\z}).map do |f|
+  File.readlines(f).grep(/\bfork\b/).empty? ? f : nil
+end.compact
+
+Gem::Specification.new do |s|
+  s.name = %q{unicorn}
+  s.version = ENV["VERSION"]
+
+  s.authors = ["Unicorn hackers"]
+  s.date = Time.now.utc.strftime('%Y-%m-%d')
+  s.description = File.read("README").split(/\n\n/)[1]
+  s.email = %q{mongrel-unicorn@rubyforge.org}
+  s.executables = %w(unicorn unicorn_rails)
+  s.extensions = %w(ext/unicorn_http/extconf.rb)
+
+  s.extra_rdoc_files = File.readlines('.document').map! do |x|
+    x.chomp!
+    if File.directory?(x)
+      manifest.grep(%r{\A#{x}/})
+    elsif File.file?(x)
+      x
+    else
+      nil
+    end
+  end.flatten.compact
+
+  s.files = manifest
+  s.homepage = %q{http://unicorn.bogomips.org/}
+
+  summary = %q{Rack HTTP server for fast clients and Unix}
+  s.rdoc_options = [ "-Na", "-t", "Unicorn: #{summary}" ]
+  s.require_paths = %w(lib ext)
+  s.rubyforge_project = %q{mongrel}
+  s.summary = summary
+
+  s.test_files = test_files
+
+  # for people that are absolutely stuck on Rails 2.3.2 and can't
+  # up/downgrade to any other version, the Rack dependency may be
+  # commented out.  Nevertheless, upgrading to Rails 2.3.4 or later is
+  # *strongly* recommended for security reasons.
+  s.add_dependency(%q<rack>)
+
+  # s.licenses = %w(GPLv2 Ruby) # licenses= method is not in older RubyGems
+end