Chunk size to use for multipart uploads Use small chunk sizes to minimize memory E.g. 5242880 = 5mb
# File lib/fog/aws/models/storage/file.rb, line 33 def acl=(new_acl) valid_acls = ['private', 'public-read', 'public-read-write', 'authenticated-read'] unless valid_acls.include?(new_acl) raise ArgumentError.new("acl must be one of [#{valid_acls.join(', ')}]") end @acl = new_acl end
# File lib/fog/aws/models/storage/file.rb, line 41 def body attributes[:body] ||= if last_modified && (file = collection.get(identity)) file.body else '' end end
# File lib/fog/aws/models/storage/file.rb, line 57 def copy(target_directory_key, target_file_key, options = {}) requires :directory, :key connection.copy_object(directory.key, key, target_directory_key, target_file_key, options) target_directory = connection.directories.new(:key => target_directory_key) target_directory.files.head(target_file_key) end
# File lib/fog/aws/models/storage/file.rb, line 64 def destroy(options = {}) requires :directory, :key attributes[:body] = nil if options['versionId'] == version connection.delete_object(directory.key, key, options) true end
# File lib/fog/aws/models/storage/file.rb, line 53 def directory @directory end
# File lib/fog/aws/models/storage/file.rb, line 72 def metadata attributes.reject {|key, value| !(key.to_s =~ /^x-amz-/)} end
# File lib/fog/aws/models/storage/file.rb, line 77 def metadata=(new_metadata) merge_attributes(new_metadata) end
# File lib/fog/aws/models/storage/file.rb, line 82 def owner=(new_owner) if new_owner attributes[:owner] = { :display_name => new_owner['DisplayName'], :id => new_owner['ID'] } end end
# File lib/fog/aws/models/storage/file.rb, line 91 def public=(new_public) if new_public @acl = 'public-read' else @acl = 'private' end new_public end
# File lib/fog/aws/models/storage/file.rb, line 100 def public_url requires :directory, :key if connection.get_object_acl(directory.key, key).body['AccessControlList'].detect {|grant| grant['Grantee']['URI'] == 'http://acs.amazonaws.com/groups/global/AllUsers' && grant['Permission'] == 'READ'} if directory.key.to_s =~ /^(?:[a-z]|\d(?!\d{0,2}(?:\.\d{1,3}){3}$))(?:[a-z0-9]|\-(?![\.])){1,61}[a-z0-9]$/ "https://#{directory.key}.s3.amazonaws.com/#{Fog::AWS.escape(key)}".gsub('%2F','/') else "https://s3.amazonaws.com/#{directory.key}/#{Fog::AWS.escape(key)}".gsub('%2F','/') end else nil end end
# File lib/fog/aws/models/storage/file.rb, line 113 def save(options = {}) requires :body, :directory, :key if options != {} Fog::Logger.deprecation("options param is deprecated, use acl= instead [light_black](#{caller.first})[/]") end options['x-amz-acl'] ||= @acl if @acl options['Cache-Control'] = cache_control if cache_control options['Content-Disposition'] = content_disposition if content_disposition options['Content-Encoding'] = content_encoding if content_encoding options['Content-MD5'] = content_md5 if content_md5 options['Content-Type'] = content_type if content_type options['Expires'] = expires if expires options.merge!(metadata) options['x-amz-storage-class'] = storage_class if storage_class options['x-amz-server-side-encryption'] = encryption if encryption if multipart_chunk_size && body.respond_to?(:read) data = multipart_save(options) merge_attributes(data.body) else data = connection.put_object(directory.key, key, body, options) merge_attributes(data.headers.reject {|key, value| ['Content-Length', 'Content-Type'].include?(key)}) end self.etag.gsub!('"','') self.content_length = Fog::Storage.get_body_size(body) self.content_type ||= Fog::Storage.get_content_type(body) true end
# File lib/fog/aws/models/storage/file.rb, line 142 def url(expires, options = {}) requires :key collection.get_url(key, expires, options) end
# File lib/fog/aws/models/storage/file.rb, line 147 def versions @versions ||= begin Fog::Storage::AWS::Versions.new( :file => self, :connection => connection ) end end
# File lib/fog/aws/models/storage/file.rb, line 158 def directory=(new_directory) @directory = new_directory end
# File lib/fog/aws/models/storage/file.rb, line 162 def multipart_save(options) # Initiate the upload res = connection.initiate_multipart_upload(directory.key, key, options) upload_id = res.body["UploadId"] # Store ETags of upload parts part_tags = [] # Upload each part # TODO: optionally upload chunks in parallel using threads # (may cause network performance problems with many small chunks) # TODO: Support large chunk sizes without reading the chunk into memory body.rewind if body.respond_to?(:rewind) while (chunk = body.read(multipart_chunk_size)) do md5 = Base64.encode64(Digest::MD5.digest(chunk)).strip part_upload = connection.upload_part(directory.key, key, upload_id, part_tags.size + 1, chunk, 'Content-MD5' => md5 ) part_tags << part_upload.headers["ETag"] end rescue # Abort the upload & reraise connection.abort_multipart_upload(directory.key, key, upload_id) if upload_id raise else # Complete the upload connection.complete_multipart_upload(directory.key, key, upload_id, part_tags) end