about summary refs log tree commit homepage
diff options
context:
space:
mode:
-rw-r--r--.document1
-rw-r--r--CHANGELOG2
-rw-r--r--COPYING339
-rw-r--r--GNUmakefile12
-rw-r--r--LICENSE6
-rw-r--r--Manifest9
-rw-r--r--README28
-rw-r--r--TODO18
-rw-r--r--examples/echo.ru27
-rw-r--r--examples/git.ru13
-rw-r--r--ext/unicorn/http11/http11.c11
-rw-r--r--lib/unicorn.rb205
-rw-r--r--lib/unicorn/app/exec_cgi.rb63
-rw-r--r--lib/unicorn/app/inetd.rb106
-rw-r--r--lib/unicorn/app/old_rails/static.rb20
-rw-r--r--lib/unicorn/chunked_reader.rb94
-rw-r--r--lib/unicorn/configurator.rb55
-rw-r--r--lib/unicorn/const.rb6
-rw-r--r--lib/unicorn/http_request.rb92
-rw-r--r--lib/unicorn/http_response.rb3
-rw-r--r--lib/unicorn/tee_input.rb135
-rw-r--r--lib/unicorn/trailer_parser.rb52
-rw-r--r--local.mk.sample6
-rw-r--r--test/rails/test_rails.rb30
-rw-r--r--test/test_helper.rb26
-rw-r--r--test/unit/test_chunked_reader.rb180
-rw-r--r--test/unit/test_configurator.rb27
-rw-r--r--test/unit/test_http_parser.rb30
-rw-r--r--test/unit/test_request.rb9
-rw-r--r--test/unit/test_server.rb13
-rw-r--r--test/unit/test_signals.rb2
-rw-r--r--test/unit/test_trailer_parser.rb52
-rw-r--r--test/unit/test_upload.rb234
33 files changed, 1497 insertions, 409 deletions
diff --git a/.document b/.document
index e8ef088..8f8ae75 100644
--- a/.document
+++ b/.document
@@ -3,6 +3,7 @@ TUNING
 PHILOSOPHY
 DESIGN
 CONTRIBUTORS
+COPYING
 LICENSE
 SIGNALS
 TODO
diff --git a/CHANGELOG b/CHANGELOG
index cc7d5fe..3fd10b8 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,5 @@
+v0.9.1 - FD_CLOEXEC portability fix (v0.8.2 port)
+v0.9.0 - bodies: "Transfer-Encoding: chunked", rewindable streaming
 v0.8.2 - socket handling bugfixes and usability tweaks
 v0.8.1 - safer timeout handling, more consistent reload behavior
 v0.8.0 - enforce Rack dependency, minor performance improvements and fixes
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..d511905
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/GNUmakefile b/GNUmakefile
index 1145143..6a9bd7a 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -70,19 +70,23 @@ $(slow_tests): $(test_prefix)/.stamp
         @$(MAKE) $(shell $(awk_slow) $@)
 
 TEST_OPTS = -v
-TEST_OPTS = -v
+check_test = grep '0 failures, 0 errors' $(t) >/dev/null
 ifndef V
        quiet_pre = @echo '* $(arg)$(extra)';
-       quiet_post = >$(t) 2>&1
+       quiet_post = >$(t) 2>&1 && $(check_test)
 else
        # we can't rely on -o pipefail outside of bash 3+,
        # so we use a stamp file to indicate success and
        # have rm fail if the stamp didn't get created
        stamp = $@$(log_suffix).ok
        quiet_pre = @echo $(ruby) $(arg) $(TEST_OPTS); ! test -f $(stamp) && (
-       quiet_post = && > $(stamp) )>&2 | tee $(t); rm $(stamp) 2>/dev/null
+       quiet_post = && > $(stamp) )2>&1 | tee $(t); \
+         rm $(stamp) 2>/dev/null && $(check_test)
 endif
-run_test = $(quiet_pre) setsid $(ruby) -w $(arg) $(TEST_OPTS) $(quiet_post) || \
+
+# TRACER='strace -f -o $(t).strace -s 100000'
+run_test = $(quiet_pre) \
+  setsid $(TRACER) $(ruby) -w $(arg) $(TEST_OPTS) $(quiet_post) || \
   (sed "s,^,$(extra): ," >&2 < $(t); exit 1)
 
 %.n: arg = $(subst .n,,$(subst --, -n ,$@))
diff --git a/LICENSE b/LICENSE
index c02e2c5..5d38d9e 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
-Unicorn Web Server (unicorn) is copyrighted free software by Eric Wong
-(normalperson@yhbt.net) and contributors. You can redistribute it
-and/or modify it under either the terms of the GPL2 or the conditions below:
+Unicorn is copyrighted free software by Eric Wong (normalperson@yhbt.net)
+and contributors. You can redistribute it and/or modify it under either
+the terms of the GPL2 (see COPYING file) or the conditions below:
 
 1. You may make and give away verbatim copies of the source form of the
    software without restriction, provided that you duplicate all of the
diff --git a/Manifest b/Manifest
index cac021a..72ff78d 100644
--- a/Manifest
+++ b/Manifest
@@ -2,6 +2,7 @@
 .gitignore
 CHANGELOG
 CONTRIBUTORS
+COPYING
 DESIGN
 GNUmakefile
 LICENSE
@@ -14,6 +15,8 @@ TODO
 TUNING
 bin/unicorn
 bin/unicorn_rails
+examples/echo.ru
+examples/git.ru
 examples/init.sh
 ext/unicorn/http11/ext_help.h
 ext/unicorn/http11/extconf.rb
@@ -23,15 +26,19 @@ ext/unicorn/http11/http11_parser.rl
 ext/unicorn/http11/http11_parser_common.rl
 lib/unicorn.rb
 lib/unicorn/app/exec_cgi.rb
+lib/unicorn/app/inetd.rb
 lib/unicorn/app/old_rails.rb
 lib/unicorn/app/old_rails/static.rb
 lib/unicorn/cgi_wrapper.rb
+lib/unicorn/chunked_reader.rb
 lib/unicorn/configurator.rb
 lib/unicorn/const.rb
 lib/unicorn/http_request.rb
 lib/unicorn/http_response.rb
 lib/unicorn/launcher.rb
 lib/unicorn/socket_helper.rb
+lib/unicorn/tee_input.rb
+lib/unicorn/trailer_parser.rb
 lib/unicorn/util.rb
 local.mk.sample
 setup.rb
@@ -120,6 +127,7 @@ test/rails/app-2.3.2.1/public/404.html
 test/rails/app-2.3.2.1/public/500.html
 test/rails/test_rails.rb
 test/test_helper.rb
+test/unit/test_chunked_reader.rb
 test/unit/test_configurator.rb
 test/unit/test_http_parser.rb
 test/unit/test_request.rb
@@ -127,5 +135,6 @@ test/unit/test_response.rb
 test/unit/test_server.rb
 test/unit/test_signals.rb
 test/unit/test_socket_helper.rb
+test/unit/test_trailer_parser.rb
 test/unit/test_upload.rb
 test/unit/test_util.rb
diff --git a/README b/README
index 161a3e1..96cc461 100644
--- a/README
+++ b/README
@@ -131,16 +131,28 @@ regarding this.
 
 == Known Issues
 
-* WONTFIX: code reloading with Sinatra 0.3.2 (and likely older
+* WONTFIX: code reloading and restarts with Sinatra 0.3.x (and likely older
   versions) apps is broken.  The workaround is to force production
-  mode to disable code reloading in your Sinatra application:
+  mode to disable code reloading as well as disabling "run" in your
+  Sinatra application:
     set :env, :production
-  Since this is no longer an issue with Sinatra 0.9.x apps and only
-  affected non-production instances, this will not be fixed on our end.
-  Also remember we're capable of replacing the running binary without
-  dropping any connections regardless of framework :)
+    set :run, false
+  Since this is no longer an issue with Sinatra 0.9.x apps, this will not be
+  fixed on our end.  Since Unicorn is itself the application launcher, the
+  at_exit handler used in old Sinatra always caused Mongrel to be launched
+  whenever a Unicorn worker was about to exit.
+
+  Also remember we're capable of replacing the running binary without dropping
+  any connections regardless of framework :)
 
 == Contact
 
-Email Eric Wong at normalperson@yhbt.net for now.
-Newsgroup and mailing list maybe coming...
+All feedback (bug reports, user/development dicussion, patches, pull
+requests) go to the mailing list.  Patches must be sent inline
+(git format-patch -M + git send-email).  No subscription is necessary
+to post on the mailing list.  No top posting.  Address replies +To:+ (or
++Cc:+) the original sender and +Cc:+ the mailing list.
+
+* email: mongrel-unicorn@rubyforge.org
+* archives: http://rubyforge.org/pipermail/mongrel-unicorn/
+* subscribe: http://rubyforge.org/mailman/listinfo/mongrel-unicorn/
diff --git a/TODO b/TODO
index 085ef70..df0d06e 100644
--- a/TODO
+++ b/TODO
@@ -1,17 +1,9 @@
-== 1.0.0
+* integration tests with nginx including bad client handling
 
-  * integration tests with nginx including bad client handling
+* manpages (why do so few Ruby executables come with proper manpages?)
 
-  * manpages (why do so few Ruby executables come with proper manpages?)
+* code cleanups (launchers)
 
-== 1.1.0
+* Pure Ruby HTTP parser
 
-  * Transfer-Encoding: chunked request handling.  Testcase:
-
-      curl -T- http://host:port/path < file_from_stdin
-
-  * code cleanups (launchers)
-
-  * Pure Ruby HTTP parser
-
-  * Rubinius support?
+* Rubinius support
diff --git a/examples/echo.ru b/examples/echo.ru
new file mode 100644
index 0000000..c79cb7b
--- /dev/null
+++ b/examples/echo.ru
@@ -0,0 +1,27 @@
+#\-E none
+#
+# Example application that echoes read data back to the HTTP client.
+# This emulates the old echo protocol people used to run.
+#
+# An example of using this in a client would be to run:
+#   curl --no-buffer -T- http://host:port/
+#
+# Then type random stuff in your terminal to watch it get echoed back!
+
+class EchoBody < Struct.new(:input)
+
+  def each(&block)
+    while buf = input.read(4096)
+      yield buf
+    end
+    self
+  end
+
+end
+
+use Rack::Chunked
+run lambda { |env|
+  /\A100-continue\z/ =~ env['HTTP_EXPECT'] and return [100, {}, []]
+  [ 200, { 'Content-Type' => 'application/octet-stream' },
+    EchoBody.new(env['rack.input']) ]
+}
diff --git a/examples/git.ru b/examples/git.ru
new file mode 100644
index 0000000..59a31c9
--- /dev/null
+++ b/examples/git.ru
@@ -0,0 +1,13 @@
+#\-E none
+
+# See http://thread.gmane.org/gmane.comp.web.curl.general/10473/raw on
+# how to setup git for this.  A better version of the above patch was
+# accepted and committed on June 15, 2009, so you can pull the latest
+# curl CVS snapshot to try this out.
+require 'unicorn/app/inetd'
+
+use Rack::Lint
+use Rack::Chunked # important!
+run Unicorn::App::Inetd.new(
+ *%w(git daemon --verbose --inetd --export-all --base-path=/home/ew/unicorn)
+)
diff --git a/ext/unicorn/http11/http11.c b/ext/unicorn/http11/http11.c
index cd7a8f7..f640a08 100644
--- a/ext/unicorn/http11/http11.c
+++ b/ext/unicorn/http11/http11.c
@@ -324,10 +324,17 @@ static void header_done(void *data, const char *at, size_t length)
   }
   rb_hash_aset(req, global_server_name, server_name);
   rb_hash_aset(req, global_server_port, server_port);
+  rb_hash_aset(req, global_server_protocol, global_server_protocol_value);
 
   /* grab the initial body and stuff it into the hash */
-  rb_hash_aset(req, sym_http_body, rb_str_new(at, length));
-  rb_hash_aset(req, global_server_protocol, global_server_protocol_value);
+  temp = rb_hash_aref(req, global_request_method);
+  if (temp != Qnil) {
+    long len = RSTRING_LEN(temp);
+    char *ptr = RSTRING_PTR(temp);
+
+    if (memcmp(ptr, "HEAD", len) && memcmp(ptr, "GET", len))
+      rb_hash_aset(req, sym_http_body, rb_str_new(at, length));
+  }
 }
 
 static void HttpParser_free(void *data) {
diff --git a/lib/unicorn.rb b/lib/unicorn.rb
index aac530b..eb11f4d 100644
--- a/lib/unicorn.rb
+++ b/lib/unicorn.rb
@@ -1,4 +1,5 @@
 require 'fcntl'
+require 'tempfile'
 require 'unicorn/socket_helper'
 autoload :Rack, 'rack'
 
@@ -10,8 +11,15 @@ module Unicorn
   autoload :HttpRequest, 'unicorn/http_request'
   autoload :HttpResponse, 'unicorn/http_response'
   autoload :Configurator, 'unicorn/configurator'
+  autoload :TeeInput, 'unicorn/tee_input'
+  autoload :ChunkedReader, 'unicorn/chunked_reader'
+  autoload :TrailerParser, 'unicorn/trailer_parser'
   autoload :Util, 'unicorn/util'
 
+  Z = '' # the stock empty string we use everywhere...
+  Z.force_encoding(Encoding::BINARY) if Z.respond_to?(:force_encoding)
+  Z.freeze
+
   class << self
     def run(app, options = {})
       HttpServer.new(app, options).start.join
@@ -22,8 +30,12 @@ module Unicorn
   # processes which in turn handle the I/O and application process.
   # Listener sockets are started in the master process and shared with
   # forked worker children.
-  class HttpServer
-    attr_reader :logger
+
+  class HttpServer < Struct.new(:listener_opts, :timeout, :worker_processes,
+                                :before_fork, :after_fork, :before_exec,
+                                :logger, :pid, :app, :preload_app,
+                                :reexec_pid, :orig_app, :init_listeners,
+                                :master_pid, :config)
     include ::Unicorn::SocketHelper
 
     # prevents IO objects in here from being GC-ed
@@ -54,8 +66,7 @@ module Unicorn
       0 => $0.dup,
     }
 
-    Worker = Struct.new(:nr, :tempfile) unless defined?(Worker)
-    class Worker
+    class Worker < Struct.new(:nr, :tempfile)
       # worker objects may be compared to just plain numbers
       def ==(other_nr)
         self.nr == other_nr
@@ -67,14 +78,13 @@ module Unicorn
     # HttpServer.run.join to join the thread that's processing
     # incoming requests on the socket.
     def initialize(app, options = {})
-      @app = app
-      @pid = nil
-      @reexec_pid = 0
-      @init_listeners = options[:listeners] ? options[:listeners].dup : []
-      @config = Configurator.new(options.merge(:use_defaults => true))
-      @listener_opts = {}
-      @config.commit!(self, :skip => [:listeners, :pid])
-      @orig_app = app
+      self.app = app
+      self.reexec_pid = 0
+      self.init_listeners = options[:listeners] ? options[:listeners].dup : []
+      self.config = Configurator.new(options.merge(:use_defaults => true))
+      self.listener_opts = {}
+      config.commit!(self, :skip => [:listeners, :pid])
+      self.orig_app = app
     end
 
     # Runs the thing.  Returns self so you can run join on it
@@ -85,13 +95,13 @@ module Unicorn
       # before they become UNIXServer or TCPServer
       inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
         io = Socket.for_fd(fd.to_i)
-        set_server_sockopt(io, @listener_opts[sock_name(io)])
+        set_server_sockopt(io, listener_opts[sock_name(io)])
         IO_PURGATORY << io
         logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
         server_cast(io)
       end
 
-      config_listeners = @config[:listeners].dup
+      config_listeners = config[:listeners].dup
       LISTENERS.replace(inherited)
 
       # we start out with generic Socket objects that get cast to either
@@ -104,8 +114,9 @@ module Unicorn
       end
       config_listeners.each { |addr| listen(addr) }
       raise ArgumentError, "no listeners" if LISTENERS.empty?
-      self.pid = @config[:pid]
-      build_app! if @preload_app
+      self.pid = config[:pid]
+      self.master_pid = $$
+      build_app! if preload_app
       maintain_worker_count
       self
     end
@@ -123,8 +134,7 @@ module Unicorn
         end
       end
       set_names = listener_names(listeners)
-      dead_names += cur_names - set_names
-      dead_names.uniq!
+      dead_names.concat(cur_names - set_names).uniq!
 
       LISTENERS.delete_if do |io|
         if dead_names.include?(sock_name(io))
@@ -133,7 +143,7 @@ module Unicorn
           end
           (io.close rescue nil).nil? # true
         else
-          set_server_sockopt(io, @listener_opts[sock_name(io)])
+          set_server_sockopt(io, listener_opts[sock_name(io)])
           false
         end
       end
@@ -144,28 +154,27 @@ module Unicorn
     def stdout_path=(path); redirect_io($stdout, path); end
     def stderr_path=(path); redirect_io($stderr, path); end
 
-    def logger=(obj)
-      REQUEST.logger = @logger = obj
-    end
+    alias_method :set_pid, :pid=
+    undef_method :pid=
 
     # sets the path for the PID file of the master process
     def pid=(path)
       if path
         if x = valid_pid?(path)
-          return path if @pid && path == @pid && x == $$
+          return path if pid && path == pid && x == $$
           raise ArgumentError, "Already running on PID:#{x} " \
                                "(or pid=#{path} is stale)"
         end
       end
-      unlink_pid_safe(@pid) if @pid
+      unlink_pid_safe(pid) if pid
       File.open(path, 'wb') { |fp| fp.syswrite("#$$\n") } if path
-      @pid = path
+      self.set_pid(path)
     end
 
     # add a given address to the +listeners+ set, idempotently
     # Allows workers to add a private, per-process listener via the
-    # @after_fork hook.  Very useful for debugging and testing.
-    def listen(address, opt = {}.merge(@listener_opts[address] || {}))
+    # after_fork hook.  Very useful for debugging and testing.
+    def listen(address, opt = {}.merge(listener_opts[address] || {}))
       return if String === address && listener_names.include?(address)
 
       delay, tries = 0.5, 5
@@ -231,12 +240,12 @@ module Unicorn
               logger.info "SIGWINCH ignored because we're not daemonized"
             end
           when :TTIN
-            @worker_processes += 1
+            self.worker_processes += 1
           when :TTOU
-            @worker_processes -= 1 if @worker_processes > 0
+            self.worker_processes -= 1 if self.worker_processes > 0
           when :HUP
             respawn = true
-            if @config.config_file
+            if config.config_file
               load_config!
               redo # immediate reaping since we may have QUIT workers
             else # exec binary and exit if there's no config file
@@ -255,14 +264,14 @@ module Unicorn
       end
       stop # gracefully shutdown all workers on our way out
       logger.info "master complete"
-      unlink_pid_safe(@pid) if @pid
+      unlink_pid_safe(pid) if pid
     end
 
     # Terminates all workers, but does not exit master process
     def stop(graceful = true)
       self.listeners = []
       kill_each_worker(graceful ? :QUIT : :TERM)
-      timeleft = @timeout
+      timeleft = timeout
       step = 0.2
       reap_all_workers
       until WORKERS.empty?
@@ -315,15 +324,15 @@ module Unicorn
     def reap_all_workers
       begin
         loop do
-          pid, status = Process.waitpid2(-1, Process::WNOHANG)
-          pid or break
-          if @reexec_pid == pid
+          wpid, status = Process.waitpid2(-1, Process::WNOHANG)
+          wpid or break
+          if reexec_pid == wpid
             logger.error "reaped #{status.inspect} exec()-ed"
-            @reexec_pid = 0
-            self.pid = @pid.chomp('.oldbin') if @pid
+            self.reexec_pid = 0
+            self.pid = pid.chomp('.oldbin') if pid
             proc_name 'master'
           else
-            worker = WORKERS.delete(pid) and worker.tempfile.close rescue nil
+            worker = WORKERS.delete(wpid) and worker.tempfile.close rescue nil
             logger.info "reaped #{status.inspect} " \
                         "worker=#{worker.nr rescue 'unknown'}"
           end
@@ -334,19 +343,19 @@ module Unicorn
 
     # reexecutes the START_CTX with a new binary
     def reexec
-      if @reexec_pid > 0
+      if reexec_pid > 0
         begin
-          Process.kill(0, @reexec_pid)
-          logger.error "reexec-ed child already running PID:#{@reexec_pid}"
+          Process.kill(0, reexec_pid)
+          logger.error "reexec-ed child already running PID:#{reexec_pid}"
           return
         rescue Errno::ESRCH
-          @reexec_pid = 0
+          reexec_pid = 0
         end
       end
 
-      if @pid
-        old_pid = "#{@pid}.oldbin"
-        prev_pid = @pid.dup
+      if pid
+        old_pid = "#{pid}.oldbin"
+        prev_pid = pid.dup
         begin
           self.pid = old_pid  # clear the path for a new pid file
         rescue ArgumentError
@@ -359,7 +368,7 @@ module Unicorn
         end
       end
 
-      @reexec_pid = fork do
+      self.reexec_pid = fork do
         listener_fds = LISTENERS.map { |sock| sock.fileno }
         ENV['UNICORN_FD'] = listener_fds.join(',')
         Dir.chdir(START_CTX[:cwd])
@@ -376,38 +385,38 @@ module Unicorn
           io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
         end
         logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
-        @before_exec.call(self)
+        before_exec.call(self)
         exec(*cmd)
       end
       proc_name 'master (old)'
     end
 
-    # forcibly terminate all workers that haven't checked in in @timeout
+    # forcibly terminate all workers that haven't checked in in timeout
     # seconds.  The timeout is implemented using an unlinked tempfile
     # shared between the parent process and each worker.  The worker
     # runs File#chmod to modify the ctime of the tempfile.  If the ctime
-    # is stale for >@timeout seconds, then we'll kill the corresponding
+    # is stale for >timeout seconds, then we'll kill the corresponding
     # worker.
     def murder_lazy_workers
       diff = stat = nil
-      WORKERS.dup.each_pair do |pid, worker|
+      WORKERS.dup.each_pair do |wpid, worker|
         stat = begin
           worker.tempfile.stat
         rescue => e
-          logger.warn "worker=#{worker.nr} PID:#{pid} stat error: #{e.inspect}"
-          kill_worker(:QUIT, pid)
+          logger.warn "worker=#{worker.nr} PID:#{wpid} stat error: #{e.inspect}"
+          kill_worker(:QUIT, wpid)
           next
         end
         stat.mode == 0100000 and next
-        (diff = (Time.now - stat.ctime)) <= @timeout and next
-        logger.error "worker=#{worker.nr} PID:#{pid} timeout " \
-                     "(#{diff}s > #{@timeout}s), killing"
-        kill_worker(:KILL, pid) # take no prisoners for @timeout violations
+        (diff = (Time.now - stat.ctime)) <= timeout and next
+        logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
+                     "(#{diff}s > #{timeout}s), killing"
+        kill_worker(:KILL, wpid) # take no prisoners for timeout violations
       end
     end
 
     def spawn_missing_workers
-      (0...@worker_processes).each do |worker_nr|
+      (0...worker_processes).each do |worker_nr|
         WORKERS.values.include?(worker_nr) and next
         begin
           Dir.chdir(START_CTX[:cwd])
@@ -419,25 +428,32 @@ module Unicorn
         tempfile = Tempfile.new(nil) # as short as possible to save dir space
         tempfile.unlink # don't allow other processes to find or see it
         worker = Worker.new(worker_nr, tempfile)
-        @before_fork.call(self, worker)
-        pid = fork { worker_loop(worker) }
-        WORKERS[pid] = worker
+        before_fork.call(self, worker)
+        WORKERS[fork { worker_loop(worker) }] = worker
       end
     end
 
     def maintain_worker_count
-      (off = WORKERS.size - @worker_processes) == 0 and return
+      (off = WORKERS.size - worker_processes) == 0 and return
       off < 0 and return spawn_missing_workers
-      WORKERS.dup.each_pair { |pid,w|
-        w.nr >= @worker_processes and kill_worker(:QUIT, pid) rescue nil
+      WORKERS.dup.each_pair { |wpid,w|
+        w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
       }
     end
 
     # once a client is accepted, it is processed in its entirety here
     # in 3 easy steps: read request, call app, write app response
-    def process_client(app, client)
+    def process_client(client)
       client.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      HttpResponse.write(client, app.call(REQUEST.read(client)))
+      response = app.call(env = REQUEST.read(client))
+
+      if 100 == response.first.to_i
+        client.write(Const::EXPECT_100_RESPONSE)
+        env.delete(Const::HTTP_EXPECT)
+        response = app.call(env)
+      end
+
+      HttpResponse.write(client, response)
     # if we get any error, try to write something back to the client
     # assuming we haven't closed the socket, but don't get hung up
     # if the socket is already closed or broken.  We'll always ensure
@@ -457,7 +473,7 @@ module Unicorn
 
     # gets rid of stuff the worker has no business keeping track of
     # to free some resources and drops all sig handlers.
-    # traps for USR1, USR2, and HUP may be set in the @after_fork Proc
+    # traps for USR1, USR2, and HUP may be set in the after_fork Proc
     # by the user.
     def init_worker_process(worker)
       QUEUE_SIGS.each { |sig| trap(sig, nil) }
@@ -470,15 +486,15 @@ module Unicorn
       WORKERS.clear
       LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
       worker.tempfile.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
-      @after_fork.call(self, worker) # can drop perms
-      @timeout /= 2.0 # halve it for select()
-      build_app! unless @preload_app
+      after_fork.call(self, worker) # can drop perms
+      self.timeout /= 2.0 # halve it for select()
+      build_app! unless preload_app
     end
 
     def reopen_worker_logs(worker_nr)
-      @logger.info "worker=#{worker_nr} reopening logs..."
+      logger.info "worker=#{worker_nr} reopening logs..."
       Unicorn::Util.reopen_logs
-      @logger.info "worker=#{worker_nr} done reopening logs"
+      logger.info "worker=#{worker_nr} done reopening logs"
       init_self_pipe!
     end
 
@@ -486,7 +502,7 @@ module Unicorn
     # for connections and doesn't die until the parent dies (or is
     # given a INT, QUIT, or TERM signal)
     def worker_loop(worker)
-      master_pid = Process.ppid # slightly racy, but less memory usage
+      ppid = master_pid
       init_worker_process(worker)
       nr = 0 # this becomes negative if we need to reopen logs
       alive = worker.tempfile # tempfile is our lifeline to the master process
@@ -497,14 +513,13 @@ module Unicorn
       trap(:USR1) { nr = -65536; SELF_PIPE.first.close rescue nil }
       trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
       [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
-      @logger.info "worker=#{worker.nr} ready"
-      app = @app
+      logger.info "worker=#{worker.nr} ready"
 
       begin
         nr < 0 and reopen_worker_logs(worker.nr)
         nr = 0
 
-        # we're a goner in @timeout seconds anyways if alive.chmod
+        # we're a goner in timeout seconds anyways if alive.chmod
         # breaks, so don't trap the exception.  Using fchmod() since
         # futimes() is not available in base Ruby and I very strongly
         # prefer temporary files to be unlinked for security,
@@ -516,7 +531,7 @@ module Unicorn
 
         ready.each do |sock|
           begin
-            process_client(app, sock.accept_nonblock)
+            process_client(sock.accept_nonblock)
             nr += 1
             t == (ti = Time.now.to_i) or alive.chmod(t = ti)
           rescue Errno::EAGAIN, Errno::ECONNABORTED
@@ -530,11 +545,11 @@ module Unicorn
         # before we sleep again in select().
         redo unless nr == 0 # (nr < 0) => reopen logs
 
-        master_pid == Process.ppid or return
+        ppid == Process.ppid or return
         alive.chmod(t = 0)
         begin
           # timeout used so we can detect parent death:
-          ret = IO.select(LISTENERS, nil, SELF_PIPE, @timeout) or redo
+          ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) or redo
           ready = ret.first
         rescue Errno::EINTR
           ready = LISTENERS
@@ -551,17 +566,17 @@ module Unicorn
 
     # delivers a signal to a worker and fails gracefully if the worker
     # is no longer running.
-    def kill_worker(signal, pid)
+    def kill_worker(signal, wpid)
       begin
-        Process.kill(signal, pid)
+        Process.kill(signal, wpid)
       rescue Errno::ESRCH
-        worker = WORKERS.delete(pid) and worker.tempfile.close rescue nil
+        worker = WORKERS.delete(wpid) and worker.tempfile.close rescue nil
       end
     end
 
     # delivers a signal to each worker
     def kill_each_worker(signal)
-      WORKERS.keys.each { |pid| kill_worker(signal, pid) }
+      WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
     end
 
     # unlinks a PID file at given +path+ if it contains the current PID
@@ -573,10 +588,10 @@ module Unicorn
     # returns a PID if a given path contains a non-stale PID file,
     # nil otherwise.
     def valid_pid?(path)
-      if File.exist?(path) && (pid = File.read(path).to_i) > 1
+      if File.exist?(path) && (wpid = File.read(path).to_i) > 1
         begin
-          Process.kill(0, pid)
-          return pid
+          Process.kill(0, wpid)
+          return wpid
         rescue Errno::ESRCH
         end
       end
@@ -585,17 +600,17 @@ module Unicorn
 
     def load_config!
       begin
-        logger.info "reloading config_file=#{@config.config_file}"
-        @config[:listeners].replace(@init_listeners)
-        @config.reload
-        @config.commit!(self)
+        logger.info "reloading config_file=#{config.config_file}"
+        config[:listeners].replace(init_listeners)
+        config.reload
+        config.commit!(self)
         kill_each_worker(:QUIT)
         Unicorn::Util.reopen_logs
-        @app = @orig_app
-        build_app! if @preload_app
-        logger.info "done reloading config_file=#{@config.config_file}"
+        self.app = orig_app
+        build_app! if preload_app
+        logger.info "done reloading config_file=#{config.config_file}"
       rescue Object => e
-        logger.error "error reloading config_file=#{@config.config_file}: " \
+        logger.error "error reloading config_file=#{config.config_file}: " \
                      "#{e.class} #{e.message}"
       end
     end
@@ -606,12 +621,12 @@ module Unicorn
     end
 
     def build_app!
-      if @app.respond_to?(:arity) && @app.arity == 0
+      if app.respond_to?(:arity) && app.arity == 0
         if defined?(Gem) && Gem.respond_to?(:refresh)
           logger.info "Refreshing Gem list"
           Gem.refresh
         end
-        @app = @app.call
+        self.app = app.call
       end
     end
 
diff --git a/lib/unicorn/app/exec_cgi.rb b/lib/unicorn/app/exec_cgi.rb
index 8f81d78..147b279 100644
--- a/lib/unicorn/app/exec_cgi.rb
+++ b/lib/unicorn/app/exec_cgi.rb
@@ -5,7 +5,7 @@ module Unicorn::App
 
   # This class is highly experimental (even more so than the rest of Unicorn)
   # and has never run anything other than cgit.
-  class ExecCgi
+  class ExecCgi < Struct.new(:args)
 
     CHUNK_SIZE = 16384
     PASS_VARS = %w(
@@ -32,21 +32,21 @@ module Unicorn::App
     #     run Unicorn::App::ExecCgi.new("/path/to/cgit.cgi")
     #   end
     def initialize(*args)
-      @args = args.dup
-      first = @args[0] or
+      self.args = args
+      first = args[0] or
         raise ArgumentError, "need path to executable"
-      first[0..0] == "/" or @args[0] = ::File.expand_path(first)
-      File.executable?(@args[0]) or
-        raise ArgumentError, "#{@args[0]} is not executable"
+      first[0..0] == "/" or args[0] = ::File.expand_path(first)
+      File.executable?(args[0]) or
+        raise ArgumentError, "#{args[0]} is not executable"
     end
 
     # Calls the app
     def call(env)
-      out, err = Tempfile.new(''), Tempfile.new('')
+      out, err = Tempfile.new(nil), Tempfile.new(nil)
       out.unlink
       err.unlink
       inp = force_file_input(env)
-      inp.sync = out.sync = err.sync = true
+      out.sync = err.sync = true
       pid = fork { run_child(inp, out, err, env) }
       inp.close
       pid, status = Process.waitpid2(pid)
@@ -65,14 +65,14 @@ module Unicorn::App
         val = env[key] or next
         ENV[key] = val
       end
-      ENV['SCRIPT_NAME'] = @args[0]
+      ENV['SCRIPT_NAME'] = args[0]
       ENV['GATEWAY_INTERFACE'] = 'CGI/1.1'
       env.keys.grep(/^HTTP_/) { |key| ENV[key] = env[key] }
 
       a = IO.new(0).reopen(inp)
       b = IO.new(1).reopen(out)
       c = IO.new(2).reopen(err)
-      exec(*@args)
+      exec(*args)
     end
 
     # Extracts headers from CGI out, will change the offset of out.
@@ -89,23 +89,24 @@ module Unicorn::App
         offset = 4
       end
       offset += head.length
-      out.instance_variable_set('@unicorn_app_exec_cgi_offset', offset)
-      size -= offset
 
       # Allows +out+ to be used as a Rack body.
-      def out.each
-        sysseek(@unicorn_app_exec_cgi_offset)
-
-        # don't use a preallocated buffer for sysread since we can't
-        # guarantee an actual socket is consuming the yielded string
-        # (or if somebody is pushing to an array for eventual concatenation
-        begin
-          yield(sysread(CHUNK_SIZE))
-        rescue EOFError
-          return
-        end while true
-      end
+      out.instance_eval { class << self; self; end }.instance_eval {
+        define_method(:each) { |&blk|
+          sysseek(offset)
+
+          # don't use a preallocated buffer for sysread since we can't
+          # guarantee an actual socket is consuming the yielded string
+          # (or if somebody is pushing to an array for eventual concatenation
+          begin
+            blk.call(sysread(CHUNK_SIZE))
+          rescue EOFError
+            break
+          end while true
+        }
+      }
 
+      size -= offset
       prev = nil
       headers = Rack::Utils::HeaderHash.new
       head.split(/\r?\n/).each do |line|
@@ -121,17 +122,15 @@ module Unicorn::App
     # ensures rack.input is a file handle that we can redirect stdin to
     def force_file_input(env)
       inp = env['rack.input']
-      if inp.respond_to?(:fileno) && Integer === inp.fileno
-        inp
-      elsif inp.size == 0 # inp could be a StringIO or StringIO-like object
-        ::File.open('/dev/null')
+      if inp.size == 0 # inp could be a StringIO or StringIO-like object
+        ::File.open('/dev/null', 'rb')
       else
-        tmp = Tempfile.new('')
+        tmp = Tempfile.new(nil)
         tmp.unlink
         tmp.binmode
+        tmp.sync = true
 
-        # Rack::Lint::InputWrapper doesn't allow sysread :(
-        buf = ''
+        buf = Z.dup
         while inp.read(CHUNK_SIZE, buf)
           tmp.syswrite(buf)
         end
@@ -146,7 +145,7 @@ module Unicorn::App
       err.seek(0)
       dst = env['rack.errors']
       pid = status.pid
-      dst.write("#{pid}: #{@args.inspect} status=#{status} stderr:\n")
+      dst.write("#{pid}: #{args.inspect} status=#{status} stderr:\n")
       err.each_line { |line| dst.write("#{pid}: #{line}") }
       dst.flush
     end
diff --git a/lib/unicorn/app/inetd.rb b/lib/unicorn/app/inetd.rb
new file mode 100644
index 0000000..580b456
--- /dev/null
+++ b/lib/unicorn/app/inetd.rb
@@ -0,0 +1,106 @@
+# Copyright (c) 2009 Eric Wong
+# You can redistribute it and/or modify it under the same terms as Ruby.
+
+# this class *must* be used with Rack::Chunked
+
+module Unicorn::App
+  class Inetd < Struct.new(:cmd)
+
+    class CatBody < Struct.new(:errors, :err_rd, :out_rd, :pid_map)
+      def initialize(env, cmd)
+        self.errors = env['rack.errors']
+        in_rd, in_wr = IO.pipe
+        self.err_rd, err_wr = IO.pipe
+        self.out_rd, out_wr = IO.pipe
+
+        cmd_pid = fork {
+          inp, out, err = (0..2).map { |i| IO.new(i) }
+          inp.reopen(in_rd)
+          out.reopen(out_wr)
+          err.reopen(err_wr)
+          [ in_rd, in_wr, err_rd, err_wr, out_rd, out_wr ].each { |i| i.close }
+          exec(*cmd)
+        }
+        [ in_rd, err_wr, out_wr ].each { |io| io.close }
+        [ in_wr, err_rd, out_rd ].each { |io| io.binmode }
+        in_wr.sync = true
+
+        # Unfortunately, input here must be processed inside a seperate
+        # thread/process using blocking I/O since env['rack.input'] is not
+        # IO.select-able and attempting to make it so would trip Rack::Lint
+        inp_pid = fork {
+          input = env['rack.input']
+          [ err_rd, out_rd ].each { |io| io.close }
+          buf = Unicorn::Z.dup
+
+          # this is dependent on input.read having readpartial semantics:
+          while input.read(16384, buf)
+            in_wr.write(buf)
+          end
+          in_wr.close
+        }
+        in_wr.close
+        self.pid_map = {
+          inp_pid => 'input streamer',
+          cmd_pid => cmd.inspect,
+        }
+      end
+
+      def each(&block)
+        begin
+          rd, = IO.select([err_rd, out_rd])
+          rd && rd.first or next
+
+          if rd.include?(err_rd)
+            begin
+              errors.write(err_rd.read_nonblock(16384))
+            rescue Errno::EINTR
+            rescue Errno::EAGAIN
+              break
+            end while true
+          end
+
+          rd.include?(out_rd) or next
+
+          begin
+            yield out_rd.read_nonblock(16384)
+          rescue Errno::EINTR
+          rescue Errno::EAGAIN
+            break
+          end while true
+        rescue EOFError,Errno::EPIPE,Errno::EBADF,Errno::EINVAL
+          break
+        end while true
+
+        self
+      end
+
+      def close
+        pid_map.each { |pid, str|
+          begin
+            pid, status = Process.waitpid2(pid)
+            status.success? or
+              errors.write("#{str}: #{status.inspect} (PID:#{pid})\n")
+          rescue Errno::ECHILD
+            errors.write("Failed to reap #{str} (PID:#{pid})\n")
+          end
+        }
+      end
+
+    end
+
+    def initialize(*cmd)
+      self.cmd = cmd
+    end
+
+    def call(env)
+      /\A100-continue\z/i =~ env[Unicorn::Const::HTTP_EXPECT] and
+          return [ 100, {} , [] ]
+
+      [ 200, { 'Content-Type' => 'application/octet-stream' },
+       CatBody.new(env, cmd) ]
+    end
+
+  end
+
+end
diff --git a/lib/unicorn/app/old_rails/static.rb b/lib/unicorn/app/old_rails/static.rb
index 17c007c..51a0017 100644
--- a/lib/unicorn/app/old_rails/static.rb
+++ b/lib/unicorn/app/old_rails/static.rb
@@ -19,28 +19,28 @@ require 'rack/file'
 # This means that if you are using page caching it will actually work
 # with Unicorn and you should see a decent speed boost (but not as
 # fast as if you use a static server like nginx).
-class Unicorn::App::OldRails::Static
+class Unicorn::App::OldRails::Static < Struct.new(:app, :root, :file_server)
   FILE_METHODS = { 'GET' => true, 'HEAD' => true }.freeze
   REQUEST_METHOD = 'REQUEST_METHOD'.freeze
   REQUEST_URI = 'REQUEST_URI'.freeze
   PATH_INFO = 'PATH_INFO'.freeze
 
   def initialize(app)
-    @app = app
-    @root = "#{::RAILS_ROOT}/public"
-    @file_server = ::Rack::File.new(@root)
+    self.app = app
+    self.root = "#{::RAILS_ROOT}/public"
+    self.file_server = ::Rack::File.new(root)
   end
 
   def call(env)
     # short circuit this ASAP if serving non-file methods
-    FILE_METHODS.include?(env[REQUEST_METHOD]) or return @app.call(env)
+    FILE_METHODS.include?(env[REQUEST_METHOD]) or return app.call(env)
 
     # first try the path as-is
     path_info = env[PATH_INFO].chomp("/")
-    if File.file?("#@root/#{::Rack::Utils.unescape(path_info)}")
+    if File.file?("#{root}/#{::Rack::Utils.unescape(path_info)}")
       # File exists as-is so serve it up
       env[PATH_INFO] = path_info
-      return @file_server.call(env)
+      return file_server.call(env)
     end
 
     # then try the cached version:
@@ -50,11 +50,11 @@ class Unicorn::App::OldRails::Static
     env[REQUEST_URI] =~ /^#{Regexp.escape(path_info)}(;[^\?]+)/
     path_info << "#$1#{ActionController::Base.page_cache_extension}"
 
-    if File.file?("#@root/#{::Rack::Utils.unescape(path_info)}")
+    if File.file?("#{root}/#{::Rack::Utils.unescape(path_info)}")
       env[PATH_INFO] = path_info
-      return @file_server.call(env)
+      return file_server.call(env)
     end
 
-    @app.call(env) # call OldRails
+    app.call(env) # call OldRails
   end
 end if defined?(Unicorn::App::OldRails)
diff --git a/lib/unicorn/chunked_reader.rb b/lib/unicorn/chunked_reader.rb
new file mode 100644
index 0000000..606e4a6
--- /dev/null
+++ b/lib/unicorn/chunked_reader.rb
@@ -0,0 +1,94 @@
+# Copyright (c) 2009 Eric Wong
+# You can redistribute it and/or modify it under the same terms as Ruby.
+
+require 'unicorn'
+require 'unicorn/http11'
+
+module Unicorn
+  class ChunkedReader
+
+    def initialize(env, input, buf)
+      @env, @input, @buf = env, input, buf
+      @chunk_left = 0
+      parse_chunk_header
+    end
+
+    def readpartial(max, buf = Z.dup)
+      while @input && @chunk_left <= 0 && ! parse_chunk_header
+        @buf << @input.readpartial(Const::CHUNK_SIZE, buf)
+      end
+
+      if @input
+        begin
+          @buf << @input.read_nonblock(Const::CHUNK_SIZE, buf)
+        rescue Errno::EAGAIN, Errno::EINTR
+        end
+      end
+
+      max = @chunk_left if max > @chunk_left
+      buf.replace(last_block(max) || Z)
+      @chunk_left -= buf.size
+      (0 == buf.size && @input.nil?) and raise EOFError
+      buf
+    end
+
+    def gets
+      line = nil
+      begin
+        line = readpartial(Const::CHUNK_SIZE)
+        begin
+          if line.sub!(%r{\A(.*?#{$/})}, Z)
+            @chunk_left += line.size
+            @buf = @buf ? (line << @buf) : line
+            return $1.dup
+          end
+          line << readpartial(Const::CHUNK_SIZE)
+        end while true
+      rescue EOFError
+        return line
+      end
+    end
+
+  private
+
+    def last_block(max = nil)
+      rv = @buf
+      if max && rv && max < rv.size
+        @buf = rv[max - rv.size, rv.size - max]
+        return rv[0, max]
+      end
+      @buf = Z.dup
+      rv
+    end
+
+    def parse_chunk_header
+      buf = @buf
+      # ignoring chunk-extension info for now, I haven't seen any use for it
+      # (or any users, and TE:chunked sent by clients is rare already)
+      # if there was not enough data in buffer to parse length of the chunk
+      # then just return
+      if buf.sub!(/\A(?:\r\n)?([a-fA-F0-9]{1,8})[^\r]*?\r\n/, Z)
+        @chunk_left = $1.to_i(16)
+        if 0 == @chunk_left # EOF
+          buf.sub!(/\A\r\n(?:\r\n)?/, Z) # cleanup for future requests
+          if trailer = @env[Const::HTTP_TRAILER]
+            tp = TrailerParser.new(trailer)
+            while ! tp.execute!(@env, buf)
+              buf << @input.readpartial(Const::CHUNK_SIZE)
+            end
+          end
+          @input = nil
+        end
+        return @chunk_left
+      end
+
+      buf.size > 256 and
+          raise HttpParserError,
+                "malformed chunk, chunk-length not found in buffer: " \
+                "#{buf.inspect}"
+      nil
+    end
+
+  end
+
+end
diff --git a/lib/unicorn/configurator.rb b/lib/unicorn/configurator.rb
index 860962a..0ecd0d5 100644
--- a/lib/unicorn/configurator.rb
+++ b/lib/unicorn/configurator.rb
@@ -14,14 +14,13 @@ module Unicorn
   #   after_fork do |server,worker|
   #     server.listen("127.0.0.1:#{9293 + worker.nr}") rescue nil
   #   end
-  class Configurator
+  class Configurator < Struct.new(:set, :config_file)
     # The default logger writes its output to $stderr
-    DEFAULT_LOGGER = Logger.new($stderr) unless defined?(DEFAULT_LOGGER)
+    DEFAULT_LOGGER = Logger.new($stderr)
 
     # Default settings for Unicorn
     DEFAULTS = {
       :timeout => 60,
-      :listeners => [],
       :logger => DEFAULT_LOGGER,
       :worker_processes => 1,
       :after_fork => lambda { |server, worker|
@@ -42,42 +41,32 @@ module Unicorn
         },
       :pid => nil,
       :preload_app => false,
-      :stderr_path => nil,
-      :stdout_path => nil,
     }
 
-    attr_reader :config_file #:nodoc:
-
     def initialize(defaults = {}) #:nodoc:
-      @set = Hash.new(:unset)
+      self.set = Hash.new(:unset)
       use_defaults = defaults.delete(:use_defaults)
-      @config_file = defaults.delete(:config_file)
-      @config_file.freeze
-      @set.merge!(DEFAULTS) if use_defaults
+      self.config_file = defaults.delete(:config_file)
+      set.merge!(DEFAULTS) if use_defaults
       defaults.each { |key, value| self.send(key, value) }
       reload
     end
 
     def reload #:nodoc:
-      instance_eval(File.read(@config_file)) if @config_file
+      instance_eval(File.read(config_file)) if config_file
     end
 
     def commit!(server, options = {}) #:nodoc:
       skip = options[:skip] || []
-      @set.each do |key, value|
-        (Symbol === value && value == :unset) and next
+      set.each do |key, value|
+        value == :unset and next
         skip.include?(key) and next
-        setter = "#{key}="
-        if server.respond_to?(setter)
-          server.send(setter, value)
-        else
-          server.instance_variable_set("@#{key}", value)
-        end
+        server.__send__("#{key}=", value)
       end
     end
 
     def [](key) # :nodoc:
-      @set[key]
+      set[key]
     end
 
     # sets object to the +new+ Logger-like object.  The new logger-like
@@ -89,7 +78,7 @@ module Unicorn
         raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
       end
 
-      @set[:logger] = new
+      set[:logger] = new
     end
 
     # sets after_fork hook to a given block.  This block will be called by
@@ -151,7 +140,7 @@ module Unicorn
                                   "not numeric: timeout=#{seconds.inspect}"
       seconds >= 3 or raise ArgumentError,
                                   "too low: timeout=#{seconds.inspect}"
-      @set[:timeout] = seconds
+      set[:timeout] = seconds
     end
 
     # sets the current number of worker_processes to +nr+.  Each worker
@@ -161,7 +150,7 @@ module Unicorn
                              "not an integer: worker_processes=#{nr.inspect}"
       nr >= 0 or raise ArgumentError,
                              "not non-negative: worker_processes=#{nr.inspect}"
-      @set[:worker_processes] = nr
+      set[:worker_processes] = nr
     end
 
     # sets listeners to the given +addresses+, replacing or augmenting the
@@ -172,7 +161,7 @@ module Unicorn
     def listeners(addresses) # :nodoc:
       Array === addresses or addresses = Array(addresses)
       addresses.map! { |addr| expand_addr(addr) }
-      @set[:listeners] = addresses
+      set[:listeners] = addresses
     end
 
     # adds an +address+ to the existing listener set.
@@ -227,8 +216,8 @@ module Unicorn
     def listen(address, opt = {})
       address = expand_addr(address)
       if String === address
-        Hash === @set[:listener_opts] or
-          @set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
+        Hash === set[:listener_opts] or
+          set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
         [ :backlog, :sndbuf, :rcvbuf ].each do |key|
           value = opt[key] or next
           Integer === value or
@@ -239,11 +228,11 @@ module Unicorn
           TrueClass === value || FalseClass === value or
             raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
         end
-        @set[:listener_opts][address].merge!(opt)
+        set[:listener_opts][address].merge!(opt)
       end
 
-      @set[:listeners] = [] unless Array === @set[:listeners]
-      @set[:listeners] << address
+      set[:listeners] = [] unless Array === set[:listeners]
+      set[:listeners] << address
     end
 
     # sets the +path+ for the PID file of the unicorn master process
@@ -265,7 +254,7 @@ module Unicorn
     def preload_app(bool)
       case bool
       when TrueClass, FalseClass
-        @set[:preload_app] = bool
+        set[:preload_app] = bool
       else
         raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
       end
@@ -298,7 +287,7 @@ module Unicorn
       else
         raise ArgumentError
       end
-      @set[var] = path
+      set[var] = path
     end
 
     def set_hook(var, my_proc, req_arity = 2) #:nodoc:
@@ -314,7 +303,7 @@ module Unicorn
       else
         raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
       end
-      @set[var] = my_proc
+      set[var] = my_proc
     end
 
     # expands "unix:path/to/foo" to a socket relative to the current path
diff --git a/lib/unicorn/const.rb b/lib/unicorn/const.rb
index 72a4d61..ef58984 100644
--- a/lib/unicorn/const.rb
+++ b/lib/unicorn/const.rb
@@ -5,7 +5,7 @@ module Unicorn
   # gave about a 3% to 10% performance improvement over using the strings directly.
   # Symbols did not really improve things much compared to constants.
   module Const
-    UNICORN_VERSION="0.8.2".freeze
+    UNICORN_VERSION="0.9.1".freeze
 
     DEFAULT_HOST = "0.0.0.0".freeze # default TCP listen host address
     DEFAULT_PORT = "8080".freeze    # default TCP listen port
@@ -24,11 +24,15 @@ module Unicorn
     # common errors we'll send back
     ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n".freeze
     ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n".freeze
+    EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
 
     # A frozen format for this is about 15% faster
+    HTTP_TRANSFER_ENCODING = 'HTTP_TRANSFER_ENCODING'.freeze
     CONTENT_LENGTH="CONTENT_LENGTH".freeze
     REMOTE_ADDR="REMOTE_ADDR".freeze
     HTTP_X_FORWARDED_FOR="HTTP_X_FORWARDED_FOR".freeze
+    HTTP_EXPECT="HTTP_EXPECT".freeze
+    HTTP_TRAILER="HTTP_TRAILER".freeze
     RACK_INPUT="rack.input".freeze
   end
 
diff --git a/lib/unicorn/http_request.rb b/lib/unicorn/http_request.rb
index d7078a3..b8df403 100644
--- a/lib/unicorn/http_request.rb
+++ b/lib/unicorn/http_request.rb
@@ -1,19 +1,11 @@
-require 'tempfile'
 require 'stringio'
 
 # compiled extension
 require 'unicorn/http11'
 
 module Unicorn
-  #
-  # The HttpRequest.initialize method will convert any request that is larger than
-  # Const::MAX_BODY into a Tempfile and use that as the body.  Otherwise it uses
-  # a StringIO object.  To be safe, you should assume it works like a file.
-  #
   class HttpRequest
 
-    attr_accessor :logger
-
     # default parameters we merge into the request env for Rack handlers
     DEFAULTS = {
       "rack.errors" => $stderr,
@@ -27,21 +19,19 @@ module Unicorn
       "SERVER_SOFTWARE" => "Unicorn #{Const::UNICORN_VERSION}".freeze
     }
 
-    # Optimize for the common case where there's no request body
-    # (GET/HEAD) requests.
-    NULL_IO = StringIO.new
+    NULL_IO = StringIO.new(Z)
     LOCALHOST = '127.0.0.1'.freeze
 
+    def initialize
+    end
+
     # Being explicitly single-threaded, we have certain advantages in
     # not having to worry about variables being clobbered :)
     BUFFER = ' ' * Const::CHUNK_SIZE # initial size, may grow
+    BUFFER.force_encoding(Encoding::BINARY) if Z.respond_to?(:force_encoding)
     PARSER = HttpParser.new
     PARAMS = Hash.new
 
-    def initialize(logger = Configurator::DEFAULT_LOGGER)
-      @logger = logger
-    end
-
     # Does the majority of the IO processing.  It has been written in
     # Ruby using about 8 different IO processing strategies.
     #
@@ -56,11 +46,6 @@ module Unicorn
     # This does minimal exception trapping and it is up to the caller
     # to handle any socket errors (e.g. user aborted upload).
     def read(socket)
-      # reset the parser
-      unless NULL_IO == (input = PARAMS[Const::RACK_INPUT]) # unlikely
-        input.close rescue nil
-        input.close! rescue nil
-      end
       PARAMS.clear
       PARSER.reset
 
@@ -86,69 +71,30 @@ module Unicorn
         data << socket.readpartial(Const::CHUNK_SIZE, BUFFER)
         PARSER.execute(PARAMS, data) and return handle_body(socket)
       end while true
-      rescue HttpParserError => e
-        @logger.error "HTTP parse error, malformed request " \
-                      "(#{PARAMS[Const::HTTP_X_FORWARDED_FOR] ||
-                          PARAMS[Const::REMOTE_ADDR]}): #{e.inspect}"
-        @logger.error "REQUEST DATA: #{data.inspect}\n---\n" \
-                      "PARAMS: #{PARAMS.inspect}\n---\n"
-        raise e
     end
 
     private
 
     # Handles dealing with the rest of the request
-    # returns a Rack environment if successful, raises an exception if not
+    # returns a Rack environment if successful
     def handle_body(socket)
-      http_body = PARAMS.delete(:http_body)
-      content_length = PARAMS[Const::CONTENT_LENGTH].to_i
-
-      if content_length == 0 # short circuit the common case
-        PARAMS[Const::RACK_INPUT] = NULL_IO.closed? ? NULL_IO.reopen : NULL_IO
-        return PARAMS.update(DEFAULTS)
+      PARAMS[Const::RACK_INPUT] = if (body = PARAMS.delete(:http_body))
+        length = PARAMS[Const::CONTENT_LENGTH].to_i
+
+        if te = PARAMS[Const::HTTP_TRANSFER_ENCODING]
+          if /\Achunked\z/i =~ te
+            socket = ChunkedReader.new(PARAMS, socket, body)
+            length = body = nil
+          end
+        end
+
+        TeeInput.new(socket, length, body)
+      else
+        NULL_IO.closed? ? NULL_IO.reopen(Z) : NULL_IO
       end
 
-      # must read more data to complete body
-      remain = content_length - http_body.length
-
-      body = PARAMS[Const::RACK_INPUT] = (remain < Const::MAX_BODY) ?
-          StringIO.new : Tempfile.new('unicorn')
-
-      body.binmode
-      body.write(http_body)
-
-      # Some clients (like FF1.0) report 0 for body and then send a body.
-      # This will probably truncate them but at least the request goes through
-      # usually.
-      read_body(socket, remain, body) if remain > 0
-      body.rewind
-
-      # in case read_body overread because the client tried to pipeline
-      # another request, we'll truncate it.  Again, we don't do pipelining
-      # or keepalive
-      body.truncate(content_length)
       PARAMS.update(DEFAULTS)
     end
 
-    # Does the heavy lifting of properly reading the larger body
-    # requests in small chunks.  It expects PARAMS['rack.input'] to be
-    # an IO object, socket to be valid, It also expects any initial part
-    # of the body that has been read to be in the PARAMS['rack.input']
-    # already.  It will return true if successful and false if not.
-    def read_body(socket, remain, body)
-      begin
-        # write always writes the requested amount on a POSIX filesystem
-        remain -= body.write(socket.readpartial(Const::CHUNK_SIZE, BUFFER))
-      end while remain > 0
-    rescue Object => e
-      @logger.error "Error reading HTTP body: #{e.inspect}"
-
-      # Any errors means we should delete the file, including if the file
-      # is dumped.  Truncate it ASAP to help avoid page flushes to disk.
-      body.truncate(0) rescue nil
-      reset
-      raise e
-    end
-
   end
 end
diff --git a/lib/unicorn/http_response.rb b/lib/unicorn/http_response.rb
index 15df3f6..bfaa33d 100644
--- a/lib/unicorn/http_response.rb
+++ b/lib/unicorn/http_response.rb
@@ -31,7 +31,6 @@ module Unicorn
     # Connection: and Date: headers no matter what (if anything) our
     # Rack application sent us.
     SKIP = { 'connection' => true, 'date' => true, 'status' => true }.freeze
-    EMPTY = ''.freeze # :nodoc
     OUT = [] # :nodoc
 
     # writes the rack_response to socket as an HTTP response
@@ -59,7 +58,7 @@ module Unicorn
                    "Date: #{Time.now.httpdate}\r\n" \
                    "Status: #{status}\r\n" \
                    "Connection: close\r\n" \
-                   "#{OUT.join(EMPTY)}\r\n")
+                   "#{OUT.join(Z)}\r\n")
       body.each { |chunk| socket.write(chunk) }
       socket.close # flushes and uncorks the socket immediately
       ensure
diff --git a/lib/unicorn/tee_input.rb b/lib/unicorn/tee_input.rb
new file mode 100644
index 0000000..06028a6
--- /dev/null
+++ b/lib/unicorn/tee_input.rb
@@ -0,0 +1,135 @@
+# Copyright (c) 2009 Eric Wong
+# You can redistribute it and/or modify it under the same terms as Ruby.
+
+require 'tempfile'
+
+# acts like tee(1) on an input input to provide a input-like stream
+# while providing rewindable semantics through a Tempfile/StringIO
+# backing store.  On the first pass, the input is only read on demand
+# so your Rack application can use input notification (upload progress
+# and like).  This should fully conform to the Rack::InputWrapper
+# specification on the public API.  This class is intended to be a
+# strict interpretation of Rack::InputWrapper functionality and will
+# not support any deviations from it.
+
+module Unicorn
+  class TeeInput
+
+    def initialize(input, size, body)
+      @tmp = Tempfile.new(nil)
+      @tmp.unlink
+      @tmp.binmode
+      @tmp.sync = true
+
+      if body
+        @tmp.write(body)
+        @tmp.seek(0)
+      end
+      @input = input
+      @size = size # nil if chunked
+    end
+
+    # returns the size of the input.  This is what the Content-Length
+    # header value should be, and how large our input is expected to be.
+    # For TE:chunked, this requires consuming all of the input stream
+    # before returning since there's no other way
+    def size
+      @size and return @size
+
+      if @input
+        buf = Z.dup
+        while tee(Const::CHUNK_SIZE, buf)
+        end
+        @tmp.rewind
+      end
+
+      @size = @tmp.stat.size
+    end
+
+    def read(*args)
+      @input or return @tmp.read(*args)
+
+      length = args.shift
+      if nil == length
+        rv = @tmp.read || Z.dup
+        tmp = Z.dup
+        while tee(Const::CHUNK_SIZE, tmp)
+          rv << tmp
+        end
+        rv
+      else
+        buf = args.shift || Z.dup
+        diff = @tmp.stat.size - @tmp.pos
+        if 0 == diff
+          tee(length, buf)
+        else
+          @tmp.read(diff > length ? length : diff, buf)
+        end
+      end
+    end
+
+    # takes zero arguments for strict Rack::Lint compatibility, unlike IO#gets
+    def gets
+      @input or return @tmp.gets
+      nil == $/ and return read
+
+      line = nil
+      if @tmp.pos < @tmp.stat.size
+        line = @tmp.gets # cannot be nil here
+        $/ == line[-$/.size, $/.size] and return line
+
+        # half the line was already read, and the rest of has not been read
+        if buf = @input.gets
+          @tmp.write(buf)
+          line << buf
+        else
+          @input = nil
+        end
+      elsif line = @input.gets
+        @tmp.write(line)
+      end
+
+      line
+    end
+
+    def each(&block)
+      while line = gets
+        yield line
+      end
+
+      self # Rack does not specify what the return value here
+    end
+
+    def rewind
+      @tmp.rewind # Rack does not specify what the return value here
+    end
+
+  private
+
+    # tees off a +length+ chunk of data from the input into the IO
+    # backing store as well as returning it.  +buf+ must be specified.
+    # returns nil if reading from the input returns nil
+    def tee(length, buf)
+      begin
+        if @size
+          left = @size - @tmp.stat.size
+          0 == left and return nil
+          if length >= left
+            @input.readpartial(left, buf) == left and @input = nil
+          elsif @input.nil?
+            return nil
+          else
+            @input.readpartial(length, buf)
+          end
+        else # ChunkedReader#readpartial just raises EOFError when done
+          @input.readpartial(length, buf)
+        end
+      rescue EOFError
+        return @input = nil
+      end
+      @tmp.write(buf)
+      buf
+    end
+
+  end
+end
diff --git a/lib/unicorn/trailer_parser.rb b/lib/unicorn/trailer_parser.rb
new file mode 100644
index 0000000..9431331
--- /dev/null
+++ b/lib/unicorn/trailer_parser.rb
@@ -0,0 +1,52 @@
+# Copyright (c) 2009 Eric Wong
+# You can redistribute it and/or modify it under the same terms as Ruby.
+require 'unicorn'
+require 'unicorn/http11'
+
+# Eventually I should integrate this into HttpParser...
+module Unicorn
+  class TrailerParser
+
+    TR_FR = 'a-z-'.freeze
+    TR_TO = 'A-Z_'.freeze
+
+    # initializes HTTP trailer parser with acceptable +trailer+
+    def initialize(http_trailer)
+      @trailers = http_trailer.split(/\s*,\s*/).inject({}) { |hash, key|
+        hash[key.tr(TR_FR, TR_TO)] = true
+        hash
+      }
+    end
+
+    # Executes our TrailerParser on +data+ and modifies +env+  This will
+    # shrink +data+ as it is being consumed.  Returns true if it has
+    # parsed all trailers, false if not.  It raises HttpParserError on
+    # parse failure or unknown headers.  It has slightly smaller limits
+    # than the C-based HTTP parser but should not be an issue in practice
+    # since Content-MD5 is probably the only legitimate use for it.
+    def execute!(env, data)
+      data.size > 0xffff and
+        raise HttpParserError, "trailer buffer too large: #{data.size} bytes"
+
+      begin
+        data.sub!(/\A([^\r]+)\r\n/, Z) or return false # need more data
+
+        key, val = $1.split(/:\s*/, 2)
+
+        key.size > 256 and
+          raise HttpParserError, "trailer key #{key.inspect} is too long"
+        val.size > 8192 and
+          raise HttpParserError, "trailer value #{val.inspect} is too long"
+
+        key.tr!(TR_FR, TR_TO)
+
+        @trailers.delete(key) or
+          raise HttpParserError, "unknown trailer: #{key.inspect}"
+        env["HTTP_#{key}"] = val
+
+        @trailers.empty? and return true
+      end while true
+    end
+
+  end
+end
diff --git a/local.mk.sample b/local.mk.sample
index 84bcf44..d218474 100644
--- a/local.mk.sample
+++ b/local.mk.sample
@@ -38,7 +38,7 @@ publish_doc:
 # Create gzip variants of the same timestamp as the original so nginx
 # "gzip_static on" can serve the gzipped versions directly.
 doc_gz: suf := html js css
-doc_gz: globs := $(addprefix doc/*.,$(suf)) $(addprefix doc/*/*.,$(suf))
-doc_gz: docs := $(wildcard $(globs))
+doc_gz: docs = $(shell find doc/ -regex '^.*\.\(html\|js\|css\)$$')
 doc_gz:
-        for i in $(docs); do gzip < $$i > $$i.gz; touch -r $$i $$i.gz; done
+        for i in $(docs); do \
+          gzip --rsyncable < $$i > $$i.gz; touch -r $$i $$i.gz; done
diff --git a/test/rails/test_rails.rb b/test/rails/test_rails.rb
index c7add20..e6f6a36 100644
--- a/test/rails/test_rails.rb
+++ b/test/rails/test_rails.rb
@@ -142,18 +142,24 @@ logger Logger.new('#{COMMON_TMP.path}')
         end
       end
     end
-    resp = `curl -isSfN -Ffile=@#{tmp.path} http://#@addr:#@port/foo/xpost`
-    assert $?.success?
-    resp = resp.split(/\r?\n/)
-    grepped = resp.grep(/^sha1: (.{40})/)
-    assert_equal 1, grepped.size
-    assert_equal(sha1.hexdigest, /^sha1: (.{40})/.match(grepped.first)[1])
-
-    grepped = resp.grep(/^Content-Type:\s+(.+)/i)
-    assert_equal 1, grepped.size
-    assert_match %r{^text/plain}, grepped.first.split(/\s*:\s*/)[1]
-
-    assert_equal 1, resp.grep(/^Status:/i).size
+
+    # fixed in Rack commit 44ed4640f077504a49b7f1cabf8d6ad7a13f6441,
+    # no released version of Rails or Rack has this fix
+    if RB_V[0] >= 1 && RB_V[1] >= 9
+      warn "multipart broken with Rack 1.0.0 and Rails 2.3.2.1 under 1.9"
+    else
+      resp = `curl -isSfN -Ffile=@#{tmp.path} http://#@addr:#@port/foo/xpost`
+      assert $?.success?
+      resp = resp.split(/\r?\n/)
+      grepped = resp.grep(/^sha1: (.{40})/)
+      assert_equal 1, grepped.size
+      assert_equal(sha1.hexdigest, /^sha1: (.{40})/.match(grepped.first)[1])
+
+      grepped = resp.grep(/^Content-Type:\s+(.+)/i)
+      assert_equal 1, grepped.size
+      assert_match %r{^text/plain}, grepped.first.split(/\s*:\s*/)[1]
+      assert_equal 1, resp.grep(/^Status:/i).size
+    end
 
     # make sure we can get 403 responses, too
     uri = URI.parse("http://#@addr:#@port/foo/xpost")
diff --git a/test/test_helper.rb b/test/test_helper.rb
index 787adbf..0f2f311 100644
--- a/test/test_helper.rb
+++ b/test/test_helper.rb
@@ -262,3 +262,29 @@ def wait_for_death(pid)
   end
   raise "PID:#{pid} never died!"
 end
+
+# executes +cmd+ and chunks its STDOUT
+def chunked_spawn(stdout, *cmd)
+  fork {
+    crd, cwr = IO.pipe
+    crd.binmode
+    cwr.binmode
+    crd.sync = cwr.sync = true
+
+    pid = fork {
+      STDOUT.reopen(cwr)
+      crd.close
+      cwr.close
+      exec(*cmd)
+    }
+    cwr.close
+    begin
+      buf = crd.readpartial(16384)
+      stdout.write("#{'%x' % buf.size}\r\n#{buf}")
+    rescue EOFError
+      stdout.write("0\r\n")
+      pid, status = Process.waitpid(pid)
+      exit status.exitstatus
+    end while true
+  }
+end
diff --git a/test/unit/test_chunked_reader.rb b/test/unit/test_chunked_reader.rb
new file mode 100644
index 0000000..67fe43b
--- /dev/null
+++ b/test/unit/test_chunked_reader.rb
@@ -0,0 +1,180 @@
+require 'test/unit'
+require 'unicorn'
+require 'unicorn/http11'
+require 'tempfile'
+require 'io/nonblock'
+require 'digest/sha1'
+
+class TestChunkedReader < Test::Unit::TestCase
+
+  def setup
+    @env = {}
+    @rd, @wr = IO.pipe
+    @rd.binmode
+    @wr.binmode
+    @rd.sync = @wr.sync = true
+    @start_pid = $$
+  end
+
+  def teardown
+    return if $$ != @start_pid
+    @rd.close rescue nil
+    @wr.close rescue nil
+    begin
+      Process.wait
+    rescue Errno::ECHILD
+      break
+    end while true
+  end
+
+  def test_error
+    cr = bin_reader("8\r\nasdfasdf\r\n8\r\nasdfasdfa#{'a' * 1024}")
+    a = nil
+    assert_nothing_raised { a = cr.readpartial(8192) }
+    assert_equal 'asdfasdf', a
+    assert_nothing_raised { a = cr.readpartial(8192) }
+    assert_equal 'asdfasdf', a
+    assert_raises(Unicorn::HttpParserError) { cr.readpartial(8192) }
+  end
+
+  def test_eof1
+    cr = bin_reader("0\r\n")
+    assert_raises(EOFError) { cr.readpartial(8192) }
+  end
+
+  def test_eof2
+    cr = bin_reader("0\r\n\r\n")
+    assert_raises(EOFError) { cr.readpartial(8192) }
+  end
+
+  def test_readpartial1
+    cr = bin_reader("4\r\nasdf\r\n0\r\n")
+    assert_equal 'asdf', cr.readpartial(8192)
+    assert_raises(EOFError) { cr.readpartial(8192) }
+  end
+
+  def test_gets1
+    cr = bin_reader("4\r\nasdf\r\n0\r\n")
+    STDOUT.sync = true
+    assert_equal 'asdf', cr.gets
+    assert_raises(EOFError) { cr.readpartial(8192) }
+  end
+
+  def test_gets2
+    cr = bin_reader("4\r\nasd\n\r\n0\r\n\r\n")
+    assert_equal "asd\n", cr.gets
+    assert_nil cr.gets
+  end
+
+  def test_gets3
+    max = Unicorn::Const::CHUNK_SIZE * 2
+    str = ('a' * max).freeze
+    first = 5
+    last = str.size - first
+    cr = bin_reader(
+      "#{'%x' % first}\r\n#{str[0, first]}\r\n" \
+      "#{'%x' % last}\r\n#{str[-last, last]}\r\n" \
+      "0\r\n")
+    assert_equal str, cr.gets
+    assert_nil cr.gets
+  end
+
+  def test_readpartial_gets_mixed1
+    max = Unicorn::Const::CHUNK_SIZE * 2
+    str = ('a' * max).freeze
+    first = 5
+    last = str.size - first
+    cr = bin_reader(
+      "#{'%x' % first}\r\n#{str[0, first]}\r\n" \
+      "#{'%x' % last}\r\n#{str[-last, last]}\r\n" \
+      "0\r\n")
+    partial = cr.readpartial(16384)
+    assert String === partial
+
+    len = max - partial.size
+    assert_equal(str[-len, len], cr.gets)
+    assert_raises(EOFError) { cr.readpartial(1) }
+    assert_nil cr.gets
+  end
+
+  def test_gets_mixed_readpartial
+    max = 10
+    str = ("z\n" * max).freeze
+    first = 5
+    last = str.size - first
+    cr = bin_reader(
+      "#{'%x' % first}\r\n#{str[0, first]}\r\n" \
+      "#{'%x' % last}\r\n#{str[-last, last]}\r\n" \
+      "0\r\n")
+    assert_equal("z\n", cr.gets)
+    assert_equal("z\n", cr.gets)
+  end
+
+  def test_dd
+    cr = bin_reader("6\r\nhello\n\r\n")
+    tmp = Tempfile.new('test_dd')
+    tmp.sync = true
+
+    pid = fork {
+      crd, cwr = IO.pipe
+      crd.binmode
+      cwr.binmode
+      crd.sync = cwr.sync = true
+
+      pid = fork {
+        STDOUT.reopen(cwr)
+        crd.close
+        cwr.close
+        exec('dd', 'if=/dev/urandom', 'bs=93390', 'count=16')
+      }
+      cwr.close
+      begin
+        buf = crd.readpartial(16384)
+        tmp.write(buf)
+        @wr.write("#{'%x' % buf.size}\r\n#{buf}\r\n")
+      rescue EOFError
+        @wr.write("0\r\n\r\n")
+        Process.waitpid(pid)
+        exit 0
+      end while true
+    }
+    assert_equal "hello\n", cr.gets
+    sha1 = Digest::SHA1.new
+    buf = Unicorn::Z.dup
+    begin
+      cr.readpartial(16384, buf)
+      sha1.update(buf)
+    rescue EOFError
+      break
+    end while true
+
+    assert_nothing_raised { Process.waitpid(pid) }
+    sha1_file = Digest::SHA1.new
+    File.open(tmp.path, 'rb') { |fp|
+      while fp.read(16384, buf)
+        sha1_file.update(buf)
+      end
+    }
+    assert_equal sha1_file.hexdigest, sha1.hexdigest
+  end
+
+  def test_trailer
+    @env['HTTP_TRAILER'] = 'Content-MD5'
+    pid = fork { @wr.syswrite("Content-MD5: asdf\r\n") }
+    cr = bin_reader("8\r\nasdfasdf\r\n8\r\nasdfasdf\r\n0\r\n")
+    assert_equal 'asdfasdf', cr.readpartial(4096)
+    assert_equal 'asdfasdf', cr.readpartial(4096)
+    assert_raises(EOFError) { cr.readpartial(4096) }
+    pid, status = Process.waitpid2(pid)
+    assert status.success?
+    assert_equal 'asdf', @env['HTTP_CONTENT_MD5']
+  end
+
+private
+
+  def bin_reader(buf)
+    buf.force_encoding(Encoding::BINARY) if buf.respond_to?(:force_encoding)
+    Unicorn::ChunkedReader.new(@env, @rd, buf)
+  end
+
+end
diff --git a/test/unit/test_configurator.rb b/test/unit/test_configurator.rb
index 98f2db6..aa29f61 100644
--- a/test/unit/test_configurator.rb
+++ b/test/unit/test_configurator.rb
@@ -1,7 +1,9 @@
 require 'test/unit'
 require 'tempfile'
-require 'unicorn/configurator'
+require 'unicorn'
 
+TestStruct = Struct.new(
+  *(Unicorn::Configurator::DEFAULTS.keys + %w(listener_opts listeners)))
 class TestConfigurator < Test::Unit::TestCase
 
   def test_config_init
@@ -51,22 +53,23 @@ class TestConfigurator < Test::Unit::TestCase
 
   def test_config_defaults
     cfg = Unicorn::Configurator.new(:use_defaults => true)
-    assert_nothing_raised { cfg.commit!(self) }
+    test_struct = TestStruct.new
+    assert_nothing_raised { cfg.commit!(test_struct) }
     Unicorn::Configurator::DEFAULTS.each do |key,value|
-      assert_equal value, instance_variable_get("@#{key.to_s}")
+      assert_equal value, test_struct.__send__(key)
     end
   end
 
   def test_config_defaults_skip
     cfg = Unicorn::Configurator.new(:use_defaults => true)
     skip = [ :logger ]
-    assert_nothing_raised { cfg.commit!(self, :skip => skip) }
-    @logger = nil
+    test_struct = TestStruct.new
+    assert_nothing_raised { cfg.commit!(test_struct, :skip => skip) }
     Unicorn::Configurator::DEFAULTS.each do |key,value|
       next if skip.include?(key)
-      assert_equal value, instance_variable_get("@#{key.to_s}")
+      assert_equal value, test_struct.__send__(key)
     end
-    assert_nil @logger
+    assert_nil test_struct.logger
   end
 
   def test_listen_options
@@ -78,8 +81,9 @@ class TestConfigurator < Test::Unit::TestCase
     assert_nothing_raised do
       cfg = Unicorn::Configurator.new(:config_file => tmp.path)
     end
-    assert_nothing_raised { cfg.commit!(self) }
-    assert(listener_opts = instance_variable_get("@listener_opts"))
+    test_struct = TestStruct.new
+    assert_nothing_raised { cfg.commit!(test_struct) }
+    assert(listener_opts = test_struct.listener_opts)
     assert_equal expect, listener_opts[listener]
   end
 
@@ -94,9 +98,10 @@ class TestConfigurator < Test::Unit::TestCase
   end
 
   def test_after_fork_proc
+    test_struct = TestStruct.new
     [ proc { |a,b| }, Proc.new { |a,b| }, lambda { |a,b| } ].each do |my_proc|
-      Unicorn::Configurator.new(:after_fork => my_proc).commit!(self)
-      assert_equal my_proc, @after_fork
+      Unicorn::Configurator.new(:after_fork => my_proc).commit!(test_struct)
+      assert_equal my_proc, test_struct.after_fork
     end
   end
 
diff --git a/test/unit/test_http_parser.rb b/test/unit/test_http_parser.rb
index a158ebb..560f8d4 100644
--- a/test/unit/test_http_parser.rb
+++ b/test/unit/test_http_parser.rb
@@ -23,6 +23,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'GET', req['REQUEST_METHOD']
     assert_nil req['FRAGMENT']
     assert_equal '', req['QUERY_STRING']
+    assert_nil req[:http_body]
 
     parser.reset
     req.clear
@@ -41,6 +42,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'GET', req['REQUEST_METHOD']
     assert_nil req['FRAGMENT']
     assert_equal '', req['QUERY_STRING']
+    assert_nil req[:http_body]
   end
 
   def test_parse_server_host_default_port
@@ -49,6 +51,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert parser.execute(req, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n")
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
+    assert_nil req[:http_body]
   end
 
   def test_parse_server_host_alt_port
@@ -57,6 +60,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert parser.execute(req, "GET / HTTP/1.1\r\nHost: foo:999\r\n\r\n")
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '999', req['SERVER_PORT']
+    assert_nil req[:http_body]
   end
 
   def test_parse_server_host_empty_port
@@ -65,6 +69,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert parser.execute(req, "GET / HTTP/1.1\r\nHost: foo:\r\n\r\n")
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
+    assert_nil req[:http_body]
   end
 
   def test_parse_server_host_xfp_https
@@ -74,6 +79,7 @@ class HttpParserTest < Test::Unit::TestCase
                           "X-Forwarded-Proto: https\r\n\r\n")
     assert_equal 'foo', req['SERVER_NAME']
     assert_equal '443', req['SERVER_PORT']
+    assert_nil req[:http_body]
   end
 
   def test_parse_strange_headers
@@ -81,6 +87,7 @@ class HttpParserTest < Test::Unit::TestCase
     req = {}
     should_be_good = "GET / HTTP/1.1\r\naaaaaaaaaaaaa:++++++++++\r\n\r\n"
     assert parser.execute(req, should_be_good)
+    assert_nil req[:http_body]
 
     # ref: http://thread.gmane.org/gmane.comp.lang.ruby.mongrel.devel/37/focus=45
     # (note we got 'pen' mixed up with 'pound' in that thread,
@@ -104,6 +111,7 @@ class HttpParserTest < Test::Unit::TestCase
       req = {}
       sorta_safe = %(GET #{path} HTTP/1.1\r\n\r\n)
       assert parser.execute(req, sorta_safe)
+      assert_nil req[:http_body]
     end
   end
   
@@ -115,6 +123,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert_raises(HttpParserError) { parser.execute(req, bad_http) }
     parser.reset
     assert(parser.execute({}, "GET / HTTP/1.0\r\n\r\n"))
+    assert_nil req[:http_body]
   end
 
   def test_piecemeal
@@ -134,6 +143,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL']
     assert_nil req['FRAGMENT']
     assert_equal '', req['QUERY_STRING']
+    assert_nil req[:http_body]
   end
 
   # not common, but underscores do appear in practice
@@ -150,6 +160,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal 'under_score.example.com', req['HTTP_HOST']
     assert_equal 'under_score.example.com', req['SERVER_NAME']
     assert_equal '80', req['SERVER_PORT']
+    assert_nil req[:http_body]
   end
 
   def test_absolute_uri
@@ -243,6 +254,24 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal "", req[:http_body]
   end
 
+  def test_unknown_methods
+    %w(GETT HEADR XGET XHEAD).each { |m|
+      parser = HttpParser.new
+      req = {}
+      s = "#{m} /forums/1/topics/2375?page=1#posts-17408 HTTP/1.1\r\n\r\n"
+      ok = false
+      assert_nothing_raised do
+        ok = parser.execute(req, s)
+      end
+      assert ok
+      assert_equal '/forums/1/topics/2375?page=1', req['REQUEST_URI']
+      assert_equal 'posts-17408', req['FRAGMENT']
+      assert_equal 'page=1', req['QUERY_STRING']
+      assert_equal "", req[:http_body]
+      assert_equal m, req['REQUEST_METHOD']
+    }
+  end
+
   def test_fragment_in_uri
     parser = HttpParser.new
     req = {}
@@ -255,6 +284,7 @@ class HttpParserTest < Test::Unit::TestCase
     assert_equal '/forums/1/topics/2375?page=1', req['REQUEST_URI']
     assert_equal 'posts-17408', req['FRAGMENT']
     assert_equal 'page=1', req['QUERY_STRING']
+    assert_nil req[:http_body]
   end
 
   # lame random garbage maker
diff --git a/test/unit/test_request.rb b/test/unit/test_request.rb
index 0bfff7d..139fc82 100644
--- a/test/unit/test_request.rb
+++ b/test/unit/test_request.rb
@@ -16,10 +16,11 @@ class RequestTest < Test::Unit::TestCase
 
   class MockRequest < StringIO
     alias_method :readpartial, :sysread
+    alias_method :read_nonblock, :sysread
   end
 
   def setup
-    @request = HttpRequest.new(Logger.new($stderr))
+    @request = HttpRequest.new
     @app = lambda do |env|
       [ 200, { 'Content-Length' => '0', 'Content-Type' => 'text/plain' }, [] ]
     end
@@ -149,7 +150,11 @@ class RequestTest < Test::Unit::TestCase
     assert_nothing_raised { env = @request.read(client) }
     assert ! env.include?(:http_body)
     assert_equal length, env['rack.input'].size
-    count.times { assert_equal buf, env['rack.input'].read(bs) }
+    count.times {
+      tmp = env['rack.input'].read(bs)
+      tmp << env['rack.input'].read(bs - tmp.size) if tmp.size != bs
+      assert_equal buf, tmp
+    }
     assert_nil env['rack.input'].read(bs)
     assert_nothing_raised { env['rack.input'].rewind }
     assert_nothing_raised { res = @lint.call(env) }
diff --git a/test/unit/test_server.rb b/test/unit/test_server.rb
index 742b240..22b9934 100644
--- a/test/unit/test_server.rb
+++ b/test/unit/test_server.rb
@@ -12,6 +12,8 @@ class TestHandler
 
   def call(env)
   #   response.socket.write("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\nhello!\n")
+    while env['rack.input'].read(4096)
+    end
     [200, { 'Content-Type' => 'text/plain' }, ['hello!\n']]
    end
 end
@@ -152,9 +154,18 @@ class WebServerTest < Test::Unit::TestCase
 
   def test_file_streamed_request
     body = "a" * (Unicorn::Const::MAX_BODY * 2)
-    long = "GET /test HTTP/1.1\r\nContent-length: #{body.length}\r\n\r\n" + body
+    long = "PUT /test HTTP/1.1\r\nContent-length: #{body.length}\r\n\r\n" + body
     do_test(long, Unicorn::Const::CHUNK_SIZE * 2 -400)
   end
 
+  def test_file_streamed_request_bad_method
+    body = "a" * (Unicorn::Const::MAX_BODY * 2)
+    long = "GET /test HTTP/1.1\r\nContent-length: #{body.length}\r\n\r\n" + body
+    assert_raises(EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,
+                  Errno::EBADF) {
+      do_test(long, Unicorn::Const::CHUNK_SIZE * 2 -400)
+    }
+  end
+
 end
 
diff --git a/test/unit/test_signals.rb b/test/unit/test_signals.rb
index ef66ed6..8ac4707 100644
--- a/test/unit/test_signals.rb
+++ b/test/unit/test_signals.rb
@@ -158,6 +158,8 @@ class SignalsTest < Test::Unit::TestCase
 
   def test_request_read
     app = lambda { |env|
+      while env['rack.input'].read(4096)
+      end
       [ 200, {'Content-Type'=>'text/plain', 'X-Pid'=>Process.pid.to_s}, [] ]
     }
     redirect_test_io { @server = HttpServer.new(app, @server_opts).start }
diff --git a/test/unit/test_trailer_parser.rb b/test/unit/test_trailer_parser.rb
new file mode 100644
index 0000000..840e9ad
--- /dev/null
+++ b/test/unit/test_trailer_parser.rb
@@ -0,0 +1,52 @@
+require 'test/unit'
+require 'unicorn'
+require 'unicorn/http11'
+require 'unicorn/trailer_parser'
+
+class TestTrailerParser < Test::Unit::TestCase
+
+  def test_basic
+    tp = Unicorn::TrailerParser.new('Content-MD5')
+    env = {}
+    assert ! tp.execute!(env, "Content-MD5: asdf")
+    assert env.empty?
+    assert tp.execute!(env, "Content-MD5: asdf\r\n")
+    assert_equal 'asdf', env['HTTP_CONTENT_MD5']
+    assert_equal 1, env.size
+  end
+
+  def test_invalid_trailer
+    tp = Unicorn::TrailerParser.new('Content-MD5')
+    env = {}
+    assert_raises(Unicorn::HttpParserError) {
+      tp.execute!(env, "Content-MD: asdf\r\n")
+    }
+    assert env.empty?
+  end
+
+  def test_multiple_trailer
+    tp = Unicorn::TrailerParser.new('Foo,Bar')
+    env = {}
+    buf = "Bar: a\r\nFoo: b\r\n"
+    assert tp.execute!(env, buf)
+    assert_equal 'a', env['HTTP_BAR']
+    assert_equal 'b', env['HTTP_FOO']
+  end
+
+  def test_too_big_key
+    tp = Unicorn::TrailerParser.new('Foo,Bar')
+    env = {}
+    buf = "Bar#{'a' * 1024}: a\r\nFoo: b\r\n"
+    assert_raises(Unicorn::HttpParserError) { tp.execute!(env, buf) }
+    assert env.empty?
+  end
+
+  def test_too_big_value
+    tp = Unicorn::TrailerParser.new('Foo,Bar')
+    env = {}
+    buf = "Bar: #{'a' * (1024 * 1024)}: a\r\nFoo: b\r\n"
+    assert_raises(Unicorn::HttpParserError) { tp.execute!(env, buf) }
+    assert env.empty?
+  end
+
+end
diff --git a/test/unit/test_upload.rb b/test/unit/test_upload.rb
index 9ef3ed7..dad5825 100644
--- a/test/unit/test_upload.rb
+++ b/test/unit/test_upload.rb
@@ -1,5 +1,6 @@
 # Copyright (c) 2009 Eric Wong
 require 'test/test_helper'
+require 'digest/md5'
 
 include Unicorn
 
@@ -18,29 +19,33 @@ class UploadTest < Test::Unit::TestCase
     @sha1 = Digest::SHA1.new
     @sha1_app = lambda do |env|
       input = env['rack.input']
-      resp = { :pos => input.pos, :size => input.size, :class => input.class }
+      resp = {}
 
-      # sysread
       @sha1.reset
-      begin
-        loop { @sha1.update(input.sysread(@bs)) }
-      rescue EOFError
+      while buf = input.read(@bs)
+        @sha1.update(buf)
       end
       resp[:sha1] = @sha1.hexdigest
 
-      # read
-      input.sysseek(0) if input.respond_to?(:sysseek)
+      # rewind and read again
       input.rewind
       @sha1.reset
-      loop {
-        buf = input.read(@bs) or break
+      while buf = input.read(@bs)
         @sha1.update(buf)
-      }
+      end
 
       if resp[:sha1] == @sha1.hexdigest
         resp[:sysread_read_byte_match] = true
       end
 
+      if expect_size = env['HTTP_X_EXPECT_SIZE']
+        if expect_size.to_i == input.size
+          resp[:expect_size_match] = true
+        end
+      end
+      resp[:size] = input.size
+      resp[:content_md5] = env['HTTP_CONTENT_MD5']
+
       [ 200, @hdr.merge({'X-Resp' => resp.inspect}), [] ]
     end
   end
@@ -54,7 +59,7 @@ class UploadTest < Test::Unit::TestCase
     start_server(@sha1_app)
     sock = TCPSocket.new(@addr, @port)
     sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
-    @count.times do
+    @count.times do |i|
       buf = @random.sysread(@bs)
       @sha1.update(buf)
       sock.syswrite(buf)
@@ -63,10 +68,34 @@ class UploadTest < Test::Unit::TestCase
     assert_equal "HTTP/1.1 200 OK", read[0]
     resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
     assert_equal length, resp[:size]
-    assert_equal 0, resp[:pos]
     assert_equal @sha1.hexdigest, resp[:sha1]
   end
 
+  def test_put_content_md5
+    md5 = Digest::MD5.new
+    start_server(@sha1_app)
+    sock = TCPSocket.new(@addr, @port)
+    sock.syswrite("PUT / HTTP/1.0\r\nTransfer-Encoding: chunked\r\n" \
+                  "Trailer: Content-MD5\r\n\r\n")
+    @count.times do |i|
+      buf = @random.sysread(@bs)
+      @sha1.update(buf)
+      md5.update(buf)
+      sock.syswrite("#{'%x' % buf.size}\r\n")
+      sock.syswrite(buf << "\r\n")
+    end
+    sock.syswrite("0\r\n")
+
+    content_md5 = [ md5.digest! ].pack('m').strip.freeze
+    sock.syswrite("Content-MD5: #{content_md5}\r\n")
+    read = sock.read.split(/\r\n/)
+    assert_equal "HTTP/1.1 200 OK", read[0]
+    resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
+    assert_equal length, resp[:size]
+    assert_equal @sha1.hexdigest, resp[:sha1]
+    assert_equal content_md5, resp[:content_md5]
+  end
+
   def test_put_trickle_small
     @count, @bs = 2, 128
     start_server(@sha1_app)
@@ -85,42 +114,7 @@ class UploadTest < Test::Unit::TestCase
     assert_equal "HTTP/1.1 200 OK", read[0]
     resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
     assert_equal length, resp[:size]
-    assert_equal 0, resp[:pos]
     assert_equal @sha1.hexdigest, resp[:sha1]
-    assert_equal StringIO, resp[:class]
-  end
-
-  def test_tempfile_unlinked
-    spew_path = lambda do |env|
-      if orig = env['HTTP_X_OLD_PATH']
-        assert orig != env['rack.input'].path
-      end
-      assert_equal length, env['rack.input'].size
-      [ 200, @hdr.merge('X-Tempfile-Path' => env['rack.input'].path), [] ]
-    end
-    start_server(spew_path)
-    sock = TCPSocket.new(@addr, @port)
-    sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
-    @count.times { sock.syswrite(' ' * @bs) }
-    path = sock.read[/^X-Tempfile-Path: (\S+)/, 1]
-    sock.close
-
-    # send another request to ensure we hit the next request
-    sock = TCPSocket.new(@addr, @port)
-    sock.syswrite("PUT / HTTP/1.0\r\nX-Old-Path: #{path}\r\n" \
-                  "Content-Length: #{length}\r\n\r\n")
-    @count.times { sock.syswrite(' ' * @bs) }
-    path2 = sock.read[/^X-Tempfile-Path: (\S+)/, 1]
-    sock.close
-    assert path != path2
-
-    # make sure the next request comes in so the unlink got processed
-    sock = TCPSocket.new(@addr, @port)
-    sock.syswrite("GET ?lasdf\r\n\r\n\r\n\r\n")
-    sock.sysread(4096) rescue nil
-    sock.close
-
-    assert ! File.exist?(path)
   end
 
   def test_put_keepalive_truncates_small_overwrite
@@ -136,75 +130,31 @@ class UploadTest < Test::Unit::TestCase
     sock.syswrite('12345') # write 4 bytes more than we expected
     @sha1.update('1')
 
-    read = sock.read.split(/\r\n/)
+    buf = sock.readpartial(4096)
+    while buf !~ /\r\n\r\n/
+      buf << sock.readpartial(4096)
+    end
+    read = buf.split(/\r\n/)
     assert_equal "HTTP/1.1 200 OK", read[0]
     resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
     assert_equal to_upload, resp[:size]
-    assert_equal 0, resp[:pos]
     assert_equal @sha1.hexdigest, resp[:sha1]
   end
 
   def test_put_excessive_overwrite_closed
-    start_server(lambda { |env| [ 200, @hdr, [] ] })
-    sock = TCPSocket.new(@addr, @port)
-    buf = ' ' * @bs
-    sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
-    @count.times { sock.syswrite(buf) }
-    assert_raise(Errno::ECONNRESET, Errno::EPIPE) do
-      ::Unicorn::Const::CHUNK_SIZE.times { sock.syswrite(buf) }
-    end
-  end
-
-  def test_put_handler_closed_file
-    nr = '0'
     start_server(lambda { |env|
-      env['rack.input'].close
-      resp = { :nr => nr.succ! }
-      [ 200, @hdr.merge({ 'X-Resp' => resp.inspect}), [] ]
+      while env['rack.input'].read(65536); end
+      [ 200, @hdr, [] ]
     })
     sock = TCPSocket.new(@addr, @port)
     buf = ' ' * @bs
     sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
-    @count.times { sock.syswrite(buf) }
-    read = sock.read.split(/\r\n/)
-    assert_equal "HTTP/1.1 200 OK", read[0]
-    resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
-    assert_equal '1', resp[:nr]
-
-    # server still alive?
-    sock = TCPSocket.new(@addr, @port)
-    sock.syswrite("GET / HTTP/1.0\r\n\r\n")
-    read = sock.read.split(/\r\n/)
-    assert_equal "HTTP/1.1 200 OK", read[0]
-    resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
-    assert_equal '2', resp[:nr]
-  end
 
-  def test_renamed_file_not_closed
-    start_server(lambda { |env|
-      new_tmp = Tempfile.new('unicorn_test')
-      input = env['rack.input']
-      File.rename(input.path, new_tmp.path)
-      resp = {
-        :inode => input.stat.ino,
-        :size => input.stat.size,
-        :new_tmp => new_tmp.path,
-        :old_tmp => input.path,
-      }
-      [ 200, @hdr.merge({ 'X-Resp' => resp.inspect}), [] ]
-    })
-    sock = TCPSocket.new(@addr, @port)
-    buf = ' ' * @bs
-    sock.syswrite("PUT / HTTP/1.0\r\nContent-Length: #{length}\r\n\r\n")
     @count.times { sock.syswrite(buf) }
-    read = sock.read.split(/\r\n/)
-    assert_equal "HTTP/1.1 200 OK", read[0]
-    resp = eval(read.grep(/^X-Resp: /).first.sub!(/X-Resp: /, ''))
-    new_tmp = File.open(resp[:new_tmp])
-    assert_equal resp[:inode], new_tmp.stat.ino
-    assert_equal length, resp[:size]
-    assert ! File.exist?(resp[:old_tmp])
-    assert_equal resp[:size], new_tmp.stat.size
+    assert_raise(Errno::ECONNRESET, Errno::EPIPE) do
+      ::Unicorn::Const::CHUNK_SIZE.times { sock.syswrite(buf) }
+    end
+    assert_equal "HTTP/1.1 200 OK\r\n", sock.gets
   end
 
   # Despite reading numerous articles and inspecting the 1.9.1-p0 C
@@ -233,7 +183,6 @@ class UploadTest < Test::Unit::TestCase
     resp = `curl -isSfN -T#{tmp.path} http://#@addr:#@port/`
     assert $?.success?, 'curl ran OK'
     assert_match(%r!\b#{sha1}\b!, resp)
-    assert_match(/Tempfile/, resp)
     assert_match(/sysread_read_byte_match/, resp)
 
     # small StringIO path
@@ -249,10 +198,87 @@ class UploadTest < Test::Unit::TestCase
     resp = `curl -isSfN -T#{tmp.path} http://#@addr:#@port/`
     assert $?.success?, 'curl ran OK'
     assert_match(%r!\b#{sha1}\b!, resp)
-    assert_match(/StringIO/, resp)
     assert_match(/sysread_read_byte_match/, resp)
   end
 
+  def test_chunked_upload_via_curl
+    # POSIX doesn't require all of these to be present on a system
+    which('curl') or return
+    which('sha1sum') or return
+    which('dd') or return
+
+    start_server(@sha1_app)
+
+    tmp = Tempfile.new('dd_dest')
+    assert(system("dd", "if=#{@random.path}", "of=#{tmp.path}",
+                        "bs=#{@bs}", "count=#{@count}"),
+           "dd #@random to #{tmp}")
+    sha1_re = %r!\b([a-f0-9]{40})\b!
+    sha1_out = `sha1sum #{tmp.path}`
+    assert $?.success?, 'sha1sum ran OK'
+
+    assert_match(sha1_re, sha1_out)
+    sha1 = sha1_re.match(sha1_out)[1]
+    cmd = "curl -H 'X-Expect-Size: #{tmp.size}' --tcp-nodelay \
+           -isSf --no-buffer -T- " \
+          "http://#@addr:#@port/"
+    resp = Tempfile.new('resp')
+    resp.sync = true
+
+    rd, wr = IO.pipe
+    wr.sync = rd.sync = true
+    pid = fork {
+      STDIN.reopen(rd)
+      rd.close
+      wr.close
+      STDOUT.reopen(resp)
+      exec cmd
+    }
+    rd.close
+
+    tmp.rewind
+    @count.times { |i|
+      wr.write(tmp.read(@bs))
+      sleep(rand / 10) if 0 == i % 8
+    }
+    wr.close
+    pid, status = Process.waitpid2(pid)
+
+    resp.rewind
+    resp = resp.read
+    assert status.success?, 'curl ran OK'
+    assert_match(%r!\b#{sha1}\b!, resp)
+    assert_match(/sysread_read_byte_match/, resp)
+    assert_match(/expect_size_match/, resp)
+  end
+
+  def test_curl_chunked_small
+    # POSIX doesn't require all of these to be present on a system
+    which('curl') or return
+    which('sha1sum') or return
+    which('dd') or return
+
+    start_server(@sha1_app)
+
+    tmp = Tempfile.new('dd_dest')
+    # small StringIO path
+    assert(system("dd", "if=#{@random.path}", "of=#{tmp.path}",
+                        "bs=1024", "count=1"),
+           "dd #@random to #{tmp}")
+    sha1_re = %r!\b([a-f0-9]{40})\b!
+    sha1_out = `sha1sum #{tmp.path}`
+    assert $?.success?, 'sha1sum ran OK'
+
+    assert_match(sha1_re, sha1_out)
+    sha1 = sha1_re.match(sha1_out)[1]
+    resp = `curl -H 'X-Expect-Size: #{tmp.size}' --tcp-nodelay \
+            -isSf --no-buffer -T- http://#@addr:#@port/ < #{tmp.path}`
+    assert $?.success?, 'curl ran OK'
+    assert_match(%r!\b#{sha1}\b!, resp)
+    assert_match(/sysread_read_byte_match/, resp)
+    assert_match(/expect_size_match/, resp)
+  end
+
   private
 
   def length