1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
| | # -*- encoding: binary -*-
require 'uri'
# Used for reading deprecated "bigfile" objects generated by the deprecated
# mogtool(1) utility. This is for reading legacy data and not recommended for
# new projects. MogileFS itself is capable of storing standalone objects
# of arbitrary length (as long as the underlying database and underlying
# filesystem on the DAV devices accept them).
module MogileFS::Bigfile
# VALID_TYPES = %w(file tarball partition).map { |x| x.freeze }.freeze
# returns a big_info hash if successful
def bigfile_stat(key)
bigfile_parse_info(get_file_data(key))
end
# returns total bytes written and the big_info hash if successful, raises an
# exception if not. wr_io is expected to be an IO-like object capable of
# receiving the write method.
def bigfile_write(key, wr_io, opts = { :verify => false })
info = bigfile_stat(key)
total = 0
t = @get_file_data_timeout
# we only decode raw zlib deflated streams that mogtool (unfortunately)
# generates. tarballs and gzip(1) are up to to the application to decrypt.
if info[:compressed] || opts[:verify]
wr_io = MogileFS::Bigfile::Filter.new(wr_io, info, opts)
end
info[:parts].each_with_index do |part,part_nr|
next if part_nr == 0 # info[:parts][0] is always empty
begin
sock = MogileFS::HTTPReader.first(part[:paths], t)
rescue
# part[:paths] may not be valid anymore due to rebalancing, however we
# can get_keys on key,<part_nr> and retry paths if all paths fail
part_key = "#{key.sub(/^_big_info:/, '')},#{part_nr}"
paths = get_paths(part_key)
paths.empty? and
raise MogileFS::Backend::NoDevices,
"no device for key=#{part_key.inspect}", []
sock = MogileFS::HTTPReader.first(paths, t)
end
begin
w = MogileFS::X.copy_stream(sock, wr_io)
ensure
sock.close
end
wr_io.respond_to?(:md5_check!) and wr_io.md5_check!(part[:md5])
total += w
end
wr_io.flush
total += wr_io.flushed_bytes if wr_io.respond_to?(:flushed_bytes)
[ total, info ]
end
##
# parses the contents of a _big_info: string or IO object
def bigfile_parse_info(info) # :nodoc:
rv = { :parts => [] }
info.each_line do |line|
line.chomp!
case line
when /^(des|type|filename)\s+(.+)$/
rv[$1.to_sym] = $2
when /^compressed\s+([01])$/
rv[:compressed] = ($1 == '1')
when /^(chunks|size)\s+(\d+)$/
rv[$1.to_sym] = $2.to_i
when /^part\s+(\d+)\s+bytes=(\d+)\s+md5=(.+)\s+paths:\s+(.+)$/
rv[:parts][$1.to_i] = {
:bytes => $2.to_i,
:md5 => $3.downcase,
:paths => $4.split(/\s*,\s*/),
}
end
end
rv
end
end
require "mogilefs/bigfile/filter"
__END__
# Copied from mogtool:
# http://code.sixapart.com/svn/mogilefs/utils/mogtool, r1221
# this is a temporary file that we delete when we're doing recording all chunks
_big_pre:<key>
starttime=UNIXTIMESTAMP
# when done, we write the _info file and delete the _pre.
_big_info:<key>
des Cow's ljdb backup as of 2004-11-17
type { partition, file, tarball }
compressed {0, 1}
filename ljbinlog.305.gz
partblocks 234324324324
part 1 <bytes> <md5hex>
part 2 <bytes> <md5hex>
part 3 <bytes> <md5hex>
part 4 <bytes> <md5hex>
part 5 <bytes> <md5hex>
_big:<key>,<n>
_big:<key>,<n>
_big:<key>,<n>
Receipt format:
BEGIN MOGTOOL RECIEPT
type partition
des Foo
compressed foo
part 1 bytes=23423432 md5=2349823948239423984 paths: http://dev5/2/23/23/.fid, http://dev6/23/423/4/324.fid
part 1 bytes=23423432 md5=2349823948239423984 paths: http://dev5/2/23/23/.fid, http://dev6/23/423/4/324.fid
part 1 bytes=23423432 md5=2349823948239423984 paths: http://dev5/2/23/23/.fid, http://dev6/23/423/4/324.fid
part 1 bytes=23423432 md5=2349823948239423984 paths: http://dev5/2/23/23/.fid, http://dev6/23/423/4/324.fid
END RECIEPT
|