about summary refs log tree commit homepage
path: root/lib/rainbows/ev_thread_core.rb
diff options
context:
space:
mode:
authorEric Wong <normalperson@yhbt.net>2009-11-24 01:50:26 -0800
committerEric Wong <normalperson@yhbt.net>2009-11-24 22:51:48 -0800
commit24248e78de684fbac374be216892a0b4050a1693 (patch)
tree0a5d9cb87fd23415a44b72c065770ff5e6a72c3a /lib/rainbows/ev_thread_core.rb
parent9cc509bda610fa5ca8c642cdcf480835b8dfc468 (diff)
downloadrainbows-24248e78de684fbac374be216892a0b4050a1693.tar.gz
Exposing a synchronous interface is too complicated for too
little gain.  Given the following factors:

* basic ThreadSpawn performs admirably under REE 1.8
* both ThreadSpawn and Revactor work well under 1.9
* few applications/requests actually need a streaming "rack.input"

We've decided its not worth the effort to attempt to support
streaming rack.input at the moment.  Instead, the new
RevThreadSpawn model performs much better for most applications
under Ruby 1.9
Diffstat (limited to 'lib/rainbows/ev_thread_core.rb')
-rw-r--r--lib/rainbows/ev_thread_core.rb80
1 files changed, 0 insertions, 80 deletions
diff --git a/lib/rainbows/ev_thread_core.rb b/lib/rainbows/ev_thread_core.rb
deleted file mode 100644
index e132f18..0000000
--- a/lib/rainbows/ev_thread_core.rb
+++ /dev/null
@@ -1,80 +0,0 @@
-# -*- encoding: binary -*-
-require 'thread' # for Queue
-require 'rainbows/ev_core'
-
-module Rainbows
-
-  # base module for mixed Thread + evented models like RevThreadSpawn
-  module EvThreadCore
-    include EvCore
-
-    def post_init
-      super
-      @lock = Mutex.new
-      @thread = nil
-    end
-
-    # we pass ourselves off as a Socket to Unicorn::TeeInput and this
-    # is the only method Unicorn::TeeInput requires from the socket
-    def readpartial(length, buf = "")
-      # we must modify the original buffer if there was one
-      length == 0 and return buf.replace("")
-
-      # wait on the main loop to feed us
-      while @tbuf.size == 0
-        @tbuf.write(@state.pop)
-        resume
-      end
-      buf.replace(@tbuf.read(length))
-    end
-
-    def app_spawn(input)
-      begin
-        @thread.nil? or @thread.join # only one thread per connection
-        env = @env.dup
-        alive, headers = @hp.keepalive?, @hp.headers?
-        @thread = Thread.new(self) do |client|
-          begin
-            env[REMOTE_ADDR] = @remote_addr
-            env[RACK_INPUT] = input || TeeInput.new(client, env, @hp, @buf)
-            response = APP.call(env.update(RACK_DEFAULTS))
-            if 100 == response.first.to_i
-              write(EXPECT_100_RESPONSE)
-              env.delete(HTTP_EXPECT)
-              response = APP.call(env)
-            end
-
-            alive &&= G.alive
-            out = [ alive ? CONN_ALIVE : CONN_CLOSE ] if headers
-            response_write(response, out)
-          rescue => e
-            handle_error(e) rescue nil
-          end
-        end
-        if alive # in case we pipeline
-          @hp.reset
-          redo if @hp.headers(@env.clear, @buf)
-        end
-      end while false
-    end
-
-    def on_read(data)
-      case @state
-      when :headers
-        @hp.headers(@env, @buf << data) or return
-        if 0 == @hp.content_length
-          app_spawn(HttpRequest::NULL_IO) # common case
-        else # nil or len > 0
-          @state, @tbuf = Queue.new, ::IO::Buffer.new
-          app_spawn(nil)
-        end
-      when Queue
-        pause
-        @state << data
-      end
-      rescue => e
-        handle_error(e)
-    end
-
-  end
-end