From e3d3f3be3dc41426d4715574d367e63ba185f177 Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Thu, 26 Nov 2009 21:14:52 -0800 Subject: split Fiber::Base into its own file While Revactor uses Fiber::Queue in AppPool, we don't want/need to expose the rest of our Fiber stuff to it since it can lead to lost Fibers if misused. This includes the Rainbows::Fiber.sleep method which only works inside Fiber{Spawn,Pool} models and the Rainbows::Fiber::IO wrapper class. --- lib/rainbows/fiber.rb | 103 ++------------------------------------------------ 1 file changed, 3 insertions(+), 100 deletions(-) (limited to 'lib/rainbows/fiber.rb') diff --git a/lib/rainbows/fiber.rb b/lib/rainbows/fiber.rb index 1927a78..c30bd7a 100644 --- a/lib/rainbows/fiber.rb +++ b/lib/rainbows/fiber.rb @@ -1,108 +1,11 @@ # -*- encoding: binary -*- require 'fiber' -require 'rainbows/fiber/io' module Rainbows + + # core module for all things that use Fibers in Rainbows! module Fiber + autoload :Base, 'rainbows/fiber/base' autoload :Queue, 'rainbows/fiber/queue' - - RD = {} - WR = {} - ZZ = {} - - # puts the current Fiber into uninterruptible sleep for at least - # +seconds+. Unlike Kernel#sleep, this it is not possible to sleep - # indefinitely to be woken up (nobody wants that in a web server, - # right?). - def self.sleep(seconds) - ZZ[::Fiber.current] = Time.now + seconds - ::Fiber.yield - end - - module Base - include Rainbows::Base - - # the scheduler method that powers both FiberSpawn and FiberPool - # concurrency models. It times out idle clients and attempts to - # schedules ones that were blocked on I/O. At most it'll sleep - # for one second (returned by the schedule_sleepers method) which - # will cause it. - def schedule(&block) - ret = begin - G.tick - RD.keys.each { |c| c.f.resume } # attempt to time out idle clients - t = schedule_sleepers - Kernel.select(RD.keys.concat(LISTENERS), WR.keys, nil, t) or return - rescue Errno::EINTR - retry - rescue Errno::EBADF, TypeError - LISTENERS.compact! - raise - end or return - - # active writers first, then _all_ readers for keepalive timeout - ret[1].concat(RD.keys).each { |c| c.f.resume } - - # accept is an expensive syscall, filter out listeners we don't want - (ret.first & LISTENERS).each(&block) - end - - # wakes up any sleepers that need to be woken and - # returns an interval to IO.select on - def schedule_sleepers - max = nil - now = Time.now - ZZ.delete_if { |fib, time| - if now >= time - fib.resume - now = Time.now - else - max = time - false - end - } - max.nil? || max > (now + 1) ? 1 : max - now - end - - def process_client(client) - G.cur += 1 - io = client.to_io - buf = client.read_timeout or return - hp = HttpParser.new - env = {} - alive = true - remote_addr = TCPSocket === io ? io.peeraddr.last : LOCALHOST - - begin # loop - while ! hp.headers(env, buf) - buf << (client.read_timeout or return) - end - - env[RACK_INPUT] = 0 == hp.content_length ? - HttpRequest::NULL_IO : TeeInput.new(client, env, hp, buf) - env[REMOTE_ADDR] = remote_addr - response = APP.call(env.update(RACK_DEFAULTS)) - - if 100 == response.first.to_i - client.write(EXPECT_100_RESPONSE) - env.delete(HTTP_EXPECT) - response = APP.call(env) - end - - alive = hp.keepalive? && G.alive - out = [ alive ? CONN_ALIVE : CONN_CLOSE ] if hp.headers? - HttpResponse.write(client, response, out) - end while alive and hp.reset.nil? and env.clear - rescue => e - handle_error(io, e) - ensure - G.cur -= 1 - RD.delete(client) - WR.delete(client) - ZZ.delete(client.f) - io.close unless io.closed? - end - - end end end -- cgit v1.2.3-24-ge0c7