# File lib/s3/s3_interface.rb, line 49 def self.bench @@bench end
# File lib/s3/s3_interface.rb, line 57 def self.bench_s3 @@bench.service end
# File lib/s3/s3_interface.rb, line 53 def self.bench_xml @@bench.xml end
# File lib/s3/s3_interface.rb, line 43 def self.connection_name :s3_connection end
Creates new RightS3 instance.
s3 = Aws::S3Interface.new('1E3GDYEOGFJPIT7XXXXXX','hgTHt68JY07JKUY08ftHYtERkjgtfERn57XXXXXX', {:multi_thread => true, :logger => Logger.new('/tmp/x.log')}) #=> #<Aws::S3Interface:0xb7b3c27c>
Params is a hash:
{:server => 's3.amazonaws.com' # Amazon service host: 's3.amazonaws.com'(default) :port => 443 # Amazon service port: 80 or 443(default) :protocol => 'https' # Amazon service protocol: 'http' or 'https'(default) :connection_mode => :default # options are :default (will use best known safe (as in won't need explicit close) option, may change in the future) :per_request (opens and closes a connection on every request) :single (one thread across entire app) :per_thread (one connection per thread) :logger => Logger Object} # Logger instance: logs to STDOUT if omitted }
# File lib/s3/s3_interface.rb, line 78 def initialize(aws_access_key_id=nil, aws_secret_access_key=nil, params={}) init({:name => 'S3', :default_host => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).host : DEFAULT_HOST, :default_port => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).port : DEFAULT_PORT, :default_service => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).path : DEFAULT_SERVICE, :default_protocol => ENV['S3_URL'] ? URI.parse(ENV['S3_URL']).scheme : DEFAULT_PROTOCOL}, aws_access_key_id || ENV['AWS_ACCESS_KEY_ID'], aws_secret_access_key || ENV['AWS_SECRET_ACCESS_KEY'], params) end
Retrieve bucket location
s3.create_bucket('my-awesome-bucket-us') #=> true puts s3.bucket_location('my-awesome-bucket-us') #=> '' (Amazon's default value assumed) s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true puts s3.bucket_location('my-awesome-bucket-eu') #=> 'EU'
# File lib/s3/s3_interface.rb, line 239 def bucket_location(bucket, headers={}) req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}?location")) request_info(req_hash, S3BucketLocationParser.new) rescue on_exception end
Removes all keys from bucket. Returns true
or an exception.
s3.clear_bucket('my_awesome_bucket') #=> true
# File lib/s3/s3_interface.rb, line 924 def clear_bucket(bucket) incrementally_list_bucket(bucket) do |results| p results results[:contents].each { |key| p key; delete(bucket, key[:key]) } end true rescue on_exception end
Completes a multipart upload, returning true or an exception docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadComplete.html
Clients must specify the uploadId (obtained from the #initiate_multipart call) and the reassembly manifest hash which specifies the each partNumber corresponding etag (obtained from the #upload_part call):
s3.complete_multipart('my_awesome_bucket', 'hugeObject', "WL7dk8sqbtk3Rg641HHWaNeG6RxI",
{"1"=>"a54357aff0632cce46d942af68356b38", "2"=>"0c78aef83f66abc1fa1e8477f296d394"}) => true
See docs.amazonwebservices.com/AmazonS3/latest/dev/mpuoverview.html
# File lib/s3/s3_interface.rb, line 588 def complete_multipart(bucket, key, uploadId, manifest_hash, headers={}) parts_string = manifest_hash.inject("") do |res, (part,etag)| res<< " <Part> <PartNumber>#{part}</PartNumber> <ETag>"#{etag}"</ETag> </Part> " res end data = " <CompleteMultipartUpload> #{parts_string} </CompleteMultipartUpload> " req_hash = generate_rest_request('POST', headers.merge(:url => "#{bucket}/#{CGI::escape key}?uploadId=#{uploadId}", :data => data)) request_info(req_hash, RightHttp2xxParser.new) rescue on_exception end
Copy an object.
directive: :copy - copy meta-headers from source (default value) :replace - replace meta-headers by passed ones # copy a key with meta-headers s3.copy('b1', 'key1', 'b1', 'key1_copy') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:25:22.000Z"} # copy a key, overwrite meta-headers s3.copy('b1', 'key2', 'b1', 'key2_copy', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:26:22.000Z"}
see: docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingCopyingObjects.html
http://docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTObjectCOPY.html
# File lib/s3/s3_interface.rb, line 778 def copy(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={}) dest_key ||= src_key headers['x-amz-metadata-directive'] = directive.to_s.upcase headers['x-amz-copy-source'] = "#{src_bucket}/#{CGI::escape src_key}" req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{dest_bucket}/#{CGI::escape dest_key}")) request_info(req_hash, S3CopyParser.new) rescue on_exception end
Creates new bucket. Returns true
or an exception.
# create a bucket at American server s3.create_bucket('my-awesome-bucket-us') #=> true # create a bucket at European server s3.create_bucket('my-awesome-bucket-eu', :location => :eu) #=> true
# File lib/s3/s3_interface.rb, line 216 def create_bucket(bucket, headers={}) data = nil unless Aws::Utils.blank?(headers[:location]) # data = "<CreateBucketConfiguration><LocationConstraint>#{headers[:location].to_s.upcase}</LocationConstraint></CreateBucketConfiguration>" location = headers[:location].to_s location.upcase! if location == 'eu' data = "<CreateBucketConfiguration><LocationConstraint>#{location}</LocationConstraint></CreateBucketConfiguration>" end req_hash = generate_rest_request('PUT', headers.merge(:url=>bucket, :data => data)) request_info(req_hash, RightHttp2xxParser.new) rescue Exception => e # if the bucket exists AWS returns an error for the location constraint interface. Drop it e.is_a?(Aws::AwsError) && e.message.include?('BucketAlreadyOwnedByYou') ? true : on_exception end
Generates link for 'CreateBucket'.
s3.create_bucket_link('my_awesome_bucket') #=> url string
# File lib/s3/s3_interface.rb, line 1012 def create_bucket_link(bucket, expires=nil, headers={}) generate_link('PUT', headers.merge(:url=>bucket), expires) rescue on_exception end
Deletes key. Returns true
or an exception.
s3.delete('my_awesome_bucket', 'log/curent/1.log') #=> true
# File lib/s3/s3_interface.rb, line 758 def delete(bucket, key='', headers={}) req_hash = generate_rest_request('DELETE', headers.merge(:url=>"#{bucket}/#{CGI::escape key}")) request_info(req_hash, RightHttp2xxParser.new) rescue on_exception end
Deletes new bucket. Bucket must be empty! Returns true
or an
exception.
s3.delete_bucket('my_awesome_bucket') #=> true
See also: #force_delete_bucket method
# File lib/s3/s3_interface.rb, line 283 def delete_bucket(bucket, headers={}) req_hash = generate_rest_request('DELETE', headers.merge(:url=>bucket)) request_info(req_hash, RightHttp2xxParser.new) rescue on_exception end
Generates link for 'DeleteBucket'.
s3.delete_bucket_link('my_awesome_bucket') #=> url string
# File lib/s3/s3_interface.rb, line 1022 def delete_bucket_link(bucket, expires=nil, headers={}) generate_link('DELETE', headers.merge(:url=>bucket), expires) rescue on_exception end
Deletes all keys where the 'folder_key' may be assumed as 'folder' name. Returns an array of string keys that have been deleted.
s3.list_bucket('my_awesome_bucket').map{|key_data| key_data[:key]} #=> ['test','test/2/34','test/3','test1','test1/logs'] s3.delete_folder('my_awesome_bucket','test') #=> ['test','test/2/34','test/3']
# File lib/s3/s3_interface.rb, line 950 def delete_folder(bucket, folder_key, separator='/') folder_key.chomp!(separator) allkeys = [] incrementally_list_bucket(bucket, {'prefix' => folder_key}) do |results| keys = results[:contents].map { |s3_key| s3_key[:key][%r^#{folder_key}($|#{separator}.*)/] ? s3_key[:key] : nil }.compact keys.each { |key| delete(bucket, key) } allkeys << keys end allkeys rescue on_exception end
Generates link for 'DeleteObject'.
s3.delete_link('my_awesome_bucket',key) #=> url string
# File lib/s3/s3_interface.rb, line 1084 def delete_link(bucket, key, expires=nil, headers={}) generate_link('DELETE', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}"), expires) rescue on_exception end
Deletes all keys in bucket then deletes bucket. Returns true
or an exception.
s3.force_delete_bucket('my_awesome_bucket')
# File lib/s3/s3_interface.rb, line 938 def force_delete_bucket(bucket) clear_bucket(bucket) delete_bucket(bucket) rescue on_exception end
Retrieves object data from Amazon. Returns a hash
or an
exception.
s3.get('my_awesome_bucket', 'log/curent/1.log') #=> {:object => "Ola-la!", :headers => {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT", "content-type" => "", "etag" => "\"000000000096f4ee74bc4596443ef2a4\"", "date" => "Wed, 23 May 2007 09:08:03 GMT", "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu", "x-amz-meta-family" => "Woho556!", "x-amz-request-id" => "0000000C246D770C", "server" => "AmazonS3", "content-length" => "7"}}
If a block is provided, yields incrementally to the block as the response is read. For large responses, this function is ideal as the response can be 'streamed'. The hash containing header fields is still returned. Example: foo = File.new('./chunder.txt', File::CREAT|File::RDWR) rhdr = s3.get('aws-test', 'Cent5V1_7_1.img.part.00') do |chunk|
foo.write(chunk)
end foo.close
# File lib/s3/s3_interface.rb, line 661 def get(bucket, key, headers={}, &block) req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}")) request_info(req_hash, S3HttpResponseBodyParser.new, &block) rescue on_exception end
Retieves the ACL (access control policy) for a bucket or object. Returns a hash of headers and xml doc with ACL data. See: docs.amazonwebservices.com/AmazonS3/2006-03-01/RESTAccessPolicy.html.
s3.get_acl('my_awesome_bucket', 'log/curent/1.log') #=> {:headers => {"x-amz-id-2"=>"B3BdDMDUz+phFF2mGBH04E46ZD4Qb9HF5PoPHqDRWBv+NVGeA3TOQ3BkVvPBjgxX", "content-type"=>"application/xml;charset=ISO-8859-1", "date"=>"Wed, 23 May 2007 09:40:16 GMT", "x-amz-request-id"=>"B183FA7AB5FBB4DD", "server"=>"AmazonS3", "transfer-encoding"=>"chunked"}, :object => "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\"><Owner> <ID>16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Owner> <AccessControlList><Grant><Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID> 16144ab2929314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a</ID><DisplayName>root</DisplayName></Grantee> <Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>" }
# File lib/s3/s3_interface.rb, line 829 def get_acl(bucket, key='', headers={}) key = Aws::Utils.blank?(key) ? '' : "/#{CGI::escape key}" req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl")) request_info(req_hash, S3HttpResponseBodyParser.new) rescue on_exception end
Generates link for 'GetACL'.
s3.get_acl_link('my_awesome_bucket',key) #=> url string
# File lib/s3/s3_interface.rb, line 1095 def get_acl_link(bucket, key='', headers={}) return generate_link('GET', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}?acl")) rescue on_exception end
Retieves the ACL (access control policy) for a bucket or object. Returns a hash of {:owner, :grantees}
s3.get_acl_parse('my_awesome_bucket', 'log/curent/1.log') #=> { :grantees=> { "16...2a"=> { :display_name=>"root", :permissions=>["FULL_CONTROL"], :attributes=> { "xsi:type"=>"CanonicalUser", "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}, "http://acs.amazonaws.com/groups/global/AllUsers"=> { :display_name=>"AllUsers", :permissions=>["READ"], :attributes=> { "xsi:type"=>"Group", "xmlns:xsi"=>"http://www.w3.org/2001/XMLSchema-instance"}}}, :owner=> { :id=>"16..2a", :display_name=>"root"}}
# File lib/s3/s3_interface.rb, line 859 def get_acl_parse(bucket, key='', headers={}) key = Aws::Utils.blank?(key) ? '' : "/#{CGI::escape key}" req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}#{key}?acl")) acl = request_info(req_hash, S3AclParser.new(:logger => @logger)) result = {} result[:owner] = acl[:owner] result[:grantees] = {} acl[:grantees].each do |grantee| key = grantee[:id] || grantee[:uri] if result[:grantees].key?(key) result[:grantees][key][:permissions] << grantee[:permissions] else result[:grantees][key] = {:display_name => grantee[:display_name] || grantee[:uri].to_s[%r[^\/]*$/], :permissions => grantee[:permissions].lines.to_a, :attributes => grantee[:attributes]} end end result rescue on_exception end
Retieves the ACL (access control policy) for a bucket. Returns a hash of headers and xml doc with ACL data.
# File lib/s3/s3_interface.rb, line 892 def get_bucket_acl(bucket, headers={}) return get_acl(bucket, '', headers) rescue on_exception end
Generates link for 'GetBucketACL'.
s3.get_acl_link('my_awesome_bucket',key) #=> url string
# File lib/s3/s3_interface.rb, line 1115 def get_bucket_acl_link(bucket, headers={}) return get_acl_link(bucket, '', headers) rescue on_exception end
# File lib/s3/s3_interface.rb, line 905 def get_bucket_policy(bucket) req_hash = generate_rest_request('GET', {:url=>"#{bucket}?policy"}) request_info(req_hash, S3HttpResponseBodyParser.new) rescue on_exception end
Generates link for 'GetObject'.
if a bucket comply with virtual hosting naming then retuns a link with the bucket as a part of host name:
s3.get_link('my-awesome-bucket',key) #=> https://my-awesome-bucket.s3.amazonaws.com:443/asia%2Fcustomers?Signature=nh7...
otherwise returns an old style link (the bucket is a part of path):
s3.get_link('my_awesome_bucket',key) #=> https://s3.amazonaws.com:443/my_awesome_bucket/asia%2Fcustomers?Signature=QAO...
see docs.amazonwebservices.com/AmazonS3/2006-03-01/VirtualHosting.html
# File lib/s3/s3_interface.rb, line 1064 def get_link(bucket, key, expires=nil, headers={}) generate_link('GET', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key.to_s}"), expires) rescue on_exception end
Retrieves the logging configuration for a bucket. Returns a hash of {:enabled, :targetbucket, :targetprefix}
s3.interface.get_logging_parse(:bucket => "asset_bucket")
=> {:enabled=>true, :targetbucket=>"mylogbucket", :targetprefix=>"loggylogs/"}
# File lib/s3/s3_interface.rb, line 253 def get_logging_parse(params) Utils.mandatory_arguments([:bucket], params) Utils.allow_only([:bucket, :headers], params) params[:headers] = {} unless params[:headers] req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}?logging")) request_info(req_hash, S3LoggingParser.new) rescue on_exception end
Retrieves object data only (headers are omitted). Returns
string
or an exception.
s3.get('my_awesome_bucket', 'log/curent/1.log') #=> 'Ola-la!'
# File lib/s3/s3_interface.rb, line 967 def get_object(bucket, key, headers={}) get(bucket, key, headers)[:object] rescue on_exception end
Retrieves object metadata. Returns a hash
of
http_response_headers.
s3.head('my_awesome_bucket', 'log/curent/1.log') #=> {"last-modified" => "Wed, 23 May 2007 09:08:04 GMT", "content-type" => "", "etag" => "\"000000000096f4ee74bc4596443ef2a4\"", "date" => "Wed, 23 May 2007 09:08:03 GMT", "x-amz-id-2" => "ZZZZZZZZZZZZZZZZZZZZ1HJXZoehfrS4QxcxTdNGldR7w/FVqblP50fU8cuIMLiu", "x-amz-meta-family" => "Woho556!", "x-amz-request-id" => "0000000C246D770C", "server" => "AmazonS3", "content-length" => "7"}
# File lib/s3/s3_interface.rb, line 747 def head(bucket, key, headers={}) req_hash = generate_rest_request('HEAD', headers.merge(:url=>"#{bucket}/#{CGI::escape key}")) request_info(req_hash, S3HttpResponseHeadParser.new) rescue on_exception end
Generates link for 'HeadObject'.
s3.head_link('my_awesome_bucket',key) #=> url string
# File lib/s3/s3_interface.rb, line 1074 def head_link(bucket, key, expires=nil, headers={}) generate_link('HEAD', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}"), expires) rescue on_exception end
Incrementally list the contents of a bucket. Yields the following hash to a block:
s3.incrementally_list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) yields { :name => 'bucketname', :prefix => 'subfolder/', :marker => 'fileN.jpg', :max_keys => 234, :delimiter => '/', :is_truncated => true, :next_marker => 'fileX.jpg', :contents => [ { :key => "file1", :last_modified => "2007-05-18T07:00:59.000Z", :e_tag => "000000000059075b964b07152d234b70", :size => 3, :storage_class => "STANDARD", :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a", :owner_display_name => "root" }, { :key, ...}, ... {:key, ...} ] :common_prefixes => [ "prefix1", "prefix2", ..., "prefixN" ] }
# File lib/s3/s3_interface.rb, line 344 def incrementally_list_bucket(bucket, options={}, headers={}, &block) internal_options = (options.map {|k,v| [k.to_sym, v] }).inject({}) {|h, ar| h[ar[0]] = ar[1]; h} begin internal_bucket = bucket.dup unless internal_options.nil? || internal_options.empty? internal_bucket << '?' internal_bucket << internal_options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') end req_hash = generate_rest_request('GET', headers.merge(:url=>internal_bucket)) response = request_info(req_hash, S3ImprovedListBucketParser.new(:logger => @logger)) there_are_more_keys = response[:is_truncated] if (there_are_more_keys) internal_options[:marker] = decide_marker(response) total_results = response[:contents].length + response[:common_prefixes].length internal_options[:'max-keys'] ? (internal_options[:'max-keys'] -= total_results) : nil end yield response end while there_are_more_keys && under_max_keys(internal_options) true rescue on_exception end
Initiates a multipart upload and returns an upload ID or an exception docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html
s3.initiate_multipart('my_awesome_bucket', 'hugeObject') => WL7dk8sqbtk3Rg641HHWaNeG6RxI4fzS8V0YvuQAfs5Hbk6WNZOU1z_AhGv
The returned uploadId must be retained for use in uploading parts; see docs.amazonwebservices.com/AmazonS3/latest/dev/mpuoverview.html
# File lib/s3/s3_interface.rb, line 537 def initiate_multipart(bucket, key, headers={}) req_hash = generate_rest_request('POST', headers.merge(:url =>"#{bucket}/#{CGI::escape key}?uploads")) request_info(req_hash, S3InitiateMultipartUploadParser.new) rescue on_exception end
docs.amazonwebservices.com/AmazonS3/2006-03-01/index.html?BucketRestrictions.html
# File lib/s3/s3_interface.rb, line 128 def is_dns_bucket?(bucket_name) bucket_name = bucket_name.to_s return nil unless (3..63) === bucket_name.size bucket_name.split('.').each do |component| return nil unless component[%r^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/] end true end
Returns an array of customer's buckets. Each item is a hash
.
s3.list_all_my_buckets #=> [{:owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a", :owner_display_name => "root", :name => "bucket_name", :creation_date => "2007-04-19T18:47:43.000Z"}, ..., {...}]
# File lib/s3/s3_interface.rb, line 202 def list_all_my_buckets(headers={}) req_hash = generate_rest_request('GET', headers.merge(:url=>'')) request_info(req_hash, S3ListAllMyBucketsParser.new(:logger => @logger)) rescue on_exception end
Generates link for 'ListAllMyBuckets'.
s3.list_all_my_buckets_link #=> url string
# File lib/s3/s3_interface.rb, line 1002 def list_all_my_buckets_link(expires=nil, headers={}) generate_link('GET', headers.merge(:url=>''), expires) rescue on_exception end
Returns an array of bucket's keys. Each array item (key data) is a
hash
.
s3.list_bucket('my_awesome_bucket', { 'prefix'=>'t', 'marker'=>'', 'max-keys'=>5, delimiter=>'' }) #=> [{:key => "test1", :last_modified => "2007-05-18T07:00:59.000Z", :owner_id => "00000000009314cc309ffe736daa2b264357476c7fea6efb2c3347ac3ab2792a", :owner_display_name => "root", :e_tag => "000000000059075b964b07152d234b70", :storage_class => "STANDARD", :size => 3, :service=> {'is_truncated' => false, 'prefix' => "t", 'marker' => "", 'name' => "my_awesome_bucket", 'max-keys' => "5"}, ..., {...}]
# File lib/s3/s3_interface.rb, line 306 def list_bucket(bucket, options={}, headers={}) unless options.nil? || options.empty? bucket << '?' bucket << options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') end req_hash = generate_rest_request('GET', headers.merge(:url=>bucket)) request_info(req_hash, S3ListBucketParser.new(:logger => @logger)) rescue on_exception end
Generates link for 'ListBucket'.
s3.list_bucket_link('my_awesome_bucket') #=> url string
# File lib/s3/s3_interface.rb, line 1032 def list_bucket_link(bucket, options=nil, expires=nil, headers={}) unless options.nil? || options.empty? bucket << '?' bucket << options.map { |k, v| "#{k.to_s}=#{CGI::escape v.to_s}" }.join('&') end generate_link('GET', headers.merge(:url=>bucket), expires) rescue on_exception end
List parts of a multipart upload, returning a hash or an exception docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadListParts.html
response looks like:
{ :bucket=>"mariosFoo_awesome_test_bucket_000A1", :key=>"mariosFoosegmented", :upload_id=>"jQKX7JdJBTrbvLn9apUPIXkt14FHdp6nMZVg--" :parts=> [ {:part_number=>"1", :last_modified=>"2012-10-30T15:06:28.000Z", :etag=>"\"78f871f6f01673a4aca05b1f8e26df08\"", :size=>"6276589"}, {:part_number=>"2", :last_modified=>"2012-10-30T15:08:22.000Z", :etag=>"\"e7b94a1e959ca066026da3ec63aad321\"", :size=>"7454095"}] }
Clients must specify the uploadId (obtained from the #initiate_multipart call)
s3.list_parts('my_awesome_bucket', 'hugeObject', "WL7dk8sqbtk3Rg641HHWaNeG6RxI",
See docs.amazonwebservices.com/AmazonS3/latest/dev/mpuoverview.html
# File lib/s3/s3_interface.rb, line 629 def list_parts(bucket, key, uploadId, headers={}) req_hash = generate_rest_request('GET', headers.merge(:url=>"#{bucket}/#{CGI::escape key}?uploadId=#{uploadId}")) request_info(req_hash, S3ListMultipartPartsParser.new) end
Move an object.
directive: :copy - copy meta-headers from source (default value) :replace - replace meta-headers by passed ones # move bucket1/key1 to bucket1/key2 s3.move('bucket1', 'key1', 'bucket1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:27:22.000Z"} # move bucket1/key1 to bucket2/key2 with new meta-headers assignment s3.copy('bucket1', 'key1', 'bucket2', 'key2', :replace, 'x-amz-meta-family'=>'Woho555!') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:28:22.000Z"}
# File lib/s3/s3_interface.rb, line 798 def move(src_bucket, src_key, dest_bucket, dest_key=nil, directive=:copy, headers={}) copy_result = copy(src_bucket, src_key, dest_bucket, dest_key, directive, headers) # delete an original key if it differs from a destination one delete(src_bucket, src_key) unless src_bucket == dest_bucket && src_key == dest_key copy_result end
Saves object to Amazon. Returns true
or an exception. Any
header starting with AMAZON_METADATA_PREFIX
is considered user metadata. It will be stored with the object and returned
when you retrieve the object. The total size of the HTTP request, not
including the body, must be less than 4 KB.
s3.put('my_awesome_bucket', 'log/current/1.log', 'Ola-la!', 'x-amz-meta-family'=>'Woho556!') #=> true
This method is capable of 'streaming' uploads; that is, it can upload data from a file or other IO object without first reading all the data into memory. This is most useful for large PUTs - it is difficult to read a 2 GB file entirely into memory before sending it to S3. To stream an upload, pass an object that responds to 'read' (like the read method of IO) and to either 'lstat' or 'size'. For files, this means streaming is enabled by simply making the call:
s3.put(bucket_name, 'S3keyname.forthisfile', File.open('localfilename.dat'))
If the IO object you wish to stream from responds to the read method but doesn't implement lstat or size, you can extend the object dynamically to implement these methods, or define your own class which defines these methods. Be sure that your class returns 'nil' from read() after having read 'size' bytes. Otherwise S3 will drop the socket after 'Content-Length' bytes have been uploaded, and HttpConnection will interpret this as an error.
This method now supports very large PUTs, where very large is > 2 GB.
For Win32 users: Files and IO objects should be opened in binary mode. If a text mode IO object is passed to PUT, it will be converted to binary mode.
# File lib/s3/s3_interface.rb, line 422 def put(bucket, key, data=nil, headers={}) # On Windows, if someone opens a file in text mode, we must reset it so # to binary mode for streaming to work properly if (data.respond_to?(:binmode)) data.binmode end if data.is_a?(String) data = StringIO.new(data) # puts "encoding = #{data.external_encoding} - #{data.internal_encoding}" # data.set_encoding("UTF-8") # puts "encoding = #{data.external_encoding} - #{data.internal_encoding}" end data_size = data.respond_to?(:lstat) ? data.lstat.size : # data.respond_to?(:bytesize) ? data.bytesize : (data.respond_to?(:size) ? data.size : 0) # puts 'data_size=' + data_size.to_s if (data_size >= USE_100_CONTINUE_PUT_SIZE) headers['expect'] = '100-continue' end req_hash = generate_rest_request('PUT', headers.merge(:url =>"#{bucket}/#{CGI::escape key}", :data =>data, 'Content-Length' => data_size.to_s)) request_info(req_hash, RightHttp2xxParser.new) rescue on_exception end
Sets the ACL on a bucket or object.
# File lib/s3/s3_interface.rb, line 883 def put_acl(bucket, key, acl_xml_doc, headers={}) key = Aws::Utils.blank?(key) ? '' : "/#{CGI::escape key}" req_hash = generate_rest_request('PUT', headers.merge(:url=>"#{bucket}#{key}?acl", :data=>acl_xml_doc)) request_info(req_hash, S3HttpResponseBodyParser.new) rescue on_exception end
Generates link for 'PutACL'.
s3.put_acl_link('my_awesome_bucket',key) #=> url string
# File lib/s3/s3_interface.rb, line 1105 def put_acl_link(bucket, key='', headers={}) return generate_link('PUT', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}?acl")) rescue on_exception end
Sets the ACL on a bucket only.
# File lib/s3/s3_interface.rb, line 899 def put_bucket_acl(bucket, acl_xml_doc, headers={}) return put_acl(bucket, '', acl_xml_doc, headers) rescue on_exception end
Generates link for 'PutBucketACL'.
s3.put_acl_link('my_awesome_bucket',key) #=> url string
# File lib/s3/s3_interface.rb, line 1125 def put_bucket_acl_link(bucket, acl_xml_doc, headers={}) return put_acl_link(bucket, '', acl_xml_doc, headers) rescue on_exception end
# File lib/s3/s3_interface.rb, line 912 def put_bucket_policy(bucket, policy) key = Aws::Utils.blank?(key) ? '' : "/#{CGI::escape key}" req_hash = generate_rest_request('PUT', {:url=>"#{bucket}?policy", :data=>policy}) request_info(req_hash, S3HttpResponseBodyParser.new) rescue on_exception end
Generates link for 'PutObject'.
s3.put_link('my_awesome_bucket',key, object) #=> url string
# File lib/s3/s3_interface.rb, line 1046 def put_link(bucket, key, data=nil, expires=nil, headers={}) generate_link('PUT', headers.merge(:url=>"#{bucket}/#{Utils::URLencode key}", :data=>data), expires) rescue on_exception end
Sets logging configuration for a bucket from the XML configuration document.
params: :bucket :xmldoc
# File lib/s3/s3_interface.rb, line 267 def put_logging(params) Utils.mandatory_arguments([:bucket, :xmldoc], params) Utils.allow_only([:bucket, :xmldoc, :headers], params) params[:headers] = {} unless params[:headers] req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}?logging", :data => params[:xmldoc])) request_info(req_hash, S3TrueParser.new) rescue on_exception end
Rename an object.
# rename bucket1/key1 to bucket1/key2 s3.rename('bucket1', 'key1', 'key2') #=> {:e_tag=>"\"e8b...8d\"", :last_modified=>"2008-05-11T10:29:22.000Z"}
# File lib/s3/s3_interface.rb, line 810 def rename(src_bucket, src_key, dest_key, headers={}) move(src_bucket, src_key, src_bucket, dest_key, :copy, headers) end
New experimental API for retrieving objects, introduced in Aws 1.8.1. #retrieve_object is similar in function to the older function get. It allows for optional verification of object md5 checksums on retrieval. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments.
If the optional :md5 argument is provided, #retrieve_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
The optional argument of :headers allows the caller to specify arbitrary request header values. Mandatory arguments:
:bucket - the bucket in which the object is stored :key - the object address (or path) within the bucket
Optional arguments:
:headers - hash of additional HTTP headers to include with the request :md5 - MD5 checksum against which to verify the retrieved object s3.retrieve_object(:bucket => "foobucket", :key => "foo") => {:verified_md5=>false, :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT", "x-amz-id-2"=>"2Aj3TDz6HP5109qly//18uHZ2a1TNHGLns9hyAtq2ved7wmzEXDOPGRHOYEa3Qnp", "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"", "date"=>"Tue, 30 Sep 2008 00:52:44 GMT", "x-amz-request-id"=>"EE4855DE27A2688C", "server"=>"AmazonS3", "content-length"=>"10"}, :object=>"polemonium"} s3.retrieve_object(:bucket => "foobucket", :key => "foo", :md5=>'a507841b1bc8115094b00bbe8c1b2954') => {:verified_md5=>true, :headers=>{"last-modified"=>"Mon, 29 Sep 2008 18:58:56 GMT", "x-amz-id-2"=>"mLWQcI+VuKVIdpTaPXEo84g0cz+vzmRLbj79TS8eFPfw19cGFOPxuLy4uGYVCvdH", "content-type"=>"", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"", "date"=>"Tue, 30 Sep 2008 00:53:08 GMT", "x-amz-request-id"=>"6E7F317356580599", "server"=>"AmazonS3", "content-length"=>"10"}, :object=>"polemonium"}
If a block is provided, yields incrementally to the block as the response is read. For large responses, this function is ideal as the response can be 'streamed'. The hash containing header fields is still returned.
# File lib/s3/s3_interface.rb, line 709 def retrieve_object(params, &block) Utils.mandatory_arguments([:bucket, :key], params) Utils.allow_only([:bucket, :key, :headers, :md5], params) params[:headers] = {} unless params[:headers] req_hash = generate_rest_request('GET', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}")) resp = request_info(req_hash, S3HttpResponseBodyParser.new, &block) resp[:verified_md5] = false if (params[:md5] && (resp[:headers]['etag'].gsub(%r\"/, '') == params[:md5])) resp[:verified_md5] = true end resp rescue on_exception end
Identical in function to #retrieve_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument. If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict. This call is implemented as a wrapper around #retrieve_object and the user may gain different semantics by creating a custom wrapper.
# File lib/s3/s3_interface.rb, line 727 def retrieve_object_and_verify(params, &block) Utils.mandatory_arguments([:md5], params) resp = retrieve_object(params, &block) return resp if resp[:verified_md5] raise AwsError.new("Retrieved object failed MD5 checksum verification: #{resp.inspect}") end
New experimental API for uploading objects, introduced in Aws 1.8.1. #store_object is similar in function to the older function put, but returns the full response metadata. It also allows for optional verification of object md5 checksums on upload. Parameters are passed as hash entries and are checked for completeness as well as for spurious arguments. The hash of the response headers contains useful information like the Amazon request ID and the object ETag (MD5 checksum).
If the optional :md5 argument is provided, #store_object verifies that the given md5 matches the md5 returned by S3. The :verified_md5 field in the response hash is set true or false depending on the outcome of this check. If no :md5 argument is given, :verified_md5 will be false in the response.
The optional argument of :headers allows the caller to specify arbitrary request header values.
s3.store_object(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
=> {"x-amz-id-2"=>"SVsnS2nfDaR+ixyJUlRKM8GndRyEMS16+oZRieamuL61pPxPaTuWrWtlYaEhYrI/", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"", "date"=>"Mon, 29 Sep 2008 18:57:46 GMT", :verified_md5=>true, "x-amz-request-id"=>"63916465939995BA", "server"=>"AmazonS3", "content-length"=>"0"}
s3.store_object(:bucket => "foobucket", :key => "foo", :data => "polemonium" )
=> {"x-amz-id-2"=>"MAt9PLjgLX9UYJ5tV2fI/5dBZdpFjlzRVpWgBDpvZpl+V+gJFcBMW2L+LBstYpbR", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"", "date"=>"Mon, 29 Sep 2008 18:58:56 GMT", :verified_md5=>false, "x-amz-request-id"=>"3B25A996BC2CDD3B", "server"=>"AmazonS3", "content-length"=>"0"}
# File lib/s3/s3_interface.rb, line 479 def store_object(params) Utils.allow_only([:bucket, :key, :data, :headers, :md5], params) Utils.mandatory_arguments([:bucket, :key, :data], params) params[:headers] = {} unless params[:headers] params[:data].binmode if (params[:data].respond_to?(:binmode)) # On Windows, if someone opens a file in text mode, we must reset it to binary mode for streaming to work properly if (params[:data].respond_to?(:lstat) && params[:data].lstat.size >= USE_100_CONTINUE_PUT_SIZE) || (params[:data].respond_to?(:size) && params[:data].size >= USE_100_CONTINUE_PUT_SIZE) params[:headers]['expect'] = '100-continue' end req_hash = generate_rest_request('PUT', params[:headers].merge(:url=>"#{params[:bucket]}/#{CGI::escape params[:key]}", :data=>params[:data])) resp = request_info(req_hash, S3HttpResponseHeadParser.new) if (params[:md5]) resp[:verified_md5] = (resp['etag'].gsub(%r\"/, '') == params[:md5]) ? true : false else resp[:verified_md5] = false end resp rescue on_exception end
Identical in function to #store_object, but requires verification that the returned ETag is identical to the checksum passed in by the user as the 'md5' argument. If the check passes, returns the response metadata with the "verified_md5" field set true. Raises an exception if the checksums conflict. This call is implemented as a wrapper around #store_object and the user may gain different semantics by creating a custom wrapper.
s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2954", :data => "polemonium" )
=> {"x-amz-id-2"=>"IZN3XsH4FlBU0+XYkFTfHwaiF1tNzrm6dIW2EM/cthKvl71nldfVC0oVQyydzWpb", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"", "date"=>"Mon, 29 Sep 2008 18:38:32 GMT", :verified_md5=>true, "x-amz-request-id"=>"E8D7EA4FE00F5DF7", "server"=>"AmazonS3", "content-length"=>"0"}
s3.store_object_and_verify(:bucket => "foobucket", :key => "foo", :md5 => "a507841b1bc8115094b00bbe8c1b2953", :data => "polemonium" )
Aws::AwsError: Uploaded object failed MD5 checksum verification: {"x-amz-id-2"=>"HTxVtd2bf7UHHDn+WzEH43MkEjFZ26xuYvUzbstkV6nrWvECRWQWFSx91z/bl03n", "etag"=>"\"a507841b1bc8115094b00bbe8c1b2954\"", "date"=>"Mon, 29 Sep 2008 18:38:41 GMT", :verified_md5=>false, "x-amz-request-id"=>"0D7ADE09F42606F2", "server"=>"AmazonS3", "content-length"=>"0"}
# File lib/s3/s3_interface.rb, line 523 def store_object_and_verify(params) Utils.mandatory_arguments([:md5], params) r = store_object(params) r[:verified_md5] ? (return r) : (raise AwsError.new("Uploaded object failed MD5 checksum verification: #{r.inspect}")) end
Uploads a part in a multipart upload - returns an Etag for the part or an exception docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html
Among the parameters required, clients must supply the uploadId (obtained from the #initiate_multipart method call) as well as the partNumber for this part (user-specified, determining the sequence for reassembly).
s3.upload_part('my_awesome_bucket', 'hugeObject', "WL7dk8sqbtk3Rg641HHWaNeG6RxI", "2", File.open('localfilename.dat'))
=> "b54357faf0632cce46e942fa68356b38"
The return Etag must be retained for use in the completion of the multipart upload; see docs.amazonwebservices.com/AmazonS3/latest/dev/mpuoverview.html
# File lib/s3/s3_interface.rb, line 556 def upload_part(bucket, key, uploadId, partNumber, data, headers={}) if (data.respond_to?(:binmode)) data.binmode end if data.is_a?(String) data = StringIO.new(data) end data_size = data.respond_to?(:lstat) ? data.lstat.size : (data.respond_to?(:size) ? data.size : 0) if (data_size >= USE_100_CONTINUE_PUT_SIZE) headers['expect'] = '100-continue' end req_hash = generate_rest_request('PUT', headers.merge(:url =>"#{bucket}/#{CGI::escape key}?partNumber=#{partNumber}&uploadId=#{uploadId}", :data => data, 'Content-Length' => data_size.to_s)) request_info(req_hash, S3UploadPartParser.new) rescue on_exception end