2014-09-26 01:01:01 +00:00
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
- - -
module : s3
2015-03-13 21:08:30 +00:00
short_description : manage objects in S3 .
2014-09-26 01:01:01 +00:00
description :
2015-03-13 21:08:30 +00:00
- This module allows the user to manage S3 buckets and the objects within them . Includes support for creating and deleting both objects and buckets , retrieving objects as files or strings and generating download links . This module has a dependency on python - boto .
2014-09-26 01:01:01 +00:00
version_added : " 1.1 "
options :
2015-02-24 18:24:23 +00:00
aws_access_key :
2014-09-26 01:01:01 +00:00
description :
2015-02-24 18:24:23 +00:00
- AWS access key id . If not set then the value of the AWS_ACCESS_KEY environment variable is used .
2014-09-26 01:01:01 +00:00
required : false
default : null
2015-02-24 18:24:23 +00:00
aliases : [ ' ec2_access_key ' , ' access_key ' ]
aws_secret_key :
2014-09-26 01:01:01 +00:00
description :
2015-02-24 18:24:23 +00:00
- AWS secret key . If not set then the value of the AWS_SECRET_KEY environment variable is used .
2014-09-26 01:01:01 +00:00
required : false
default : null
2015-02-24 18:24:23 +00:00
aliases : [ ' ec2_secret_key ' , ' secret_key ' ]
bucket :
2015-07-21 00:05:10 +00:00
description :
- Bucket name .
2015-02-24 18:24:23 +00:00
required : true
default : null
2014-09-26 01:01:01 +00:00
aliases : [ ]
dest :
description :
- The destination file path when downloading an object / key with a GET operation .
required : false
aliases : [ ]
version_added : " 1.3 "
2015-02-24 18:24:23 +00:00
encrypt :
2014-09-26 01:01:01 +00:00
description :
2015-02-24 18:24:23 +00:00
- When set for PUT mode , asks for server - side encryption
2014-09-26 01:01:01 +00:00
required : false
2015-02-24 18:24:23 +00:00
default : no
2015-07-09 20:38:14 +00:00
version_added : " 2.0 "
2014-09-26 01:01:01 +00:00
expiration :
description :
2015-03-28 14:17:12 +00:00
- Time limit ( in seconds ) for the URL generated and returned by S3 / Walrus when performing a mode = put or mode = geturl operation .
2014-09-26 01:01:01 +00:00
required : false
default : 600
aliases : [ ]
2015-06-30 13:04:15 +00:00
headers :
description :
- Custom headers for PUT operation , as a dictionary of ' key=value ' and ' key=value,key=value ' .
required : false
default : null
2015-07-28 18:50:39 +00:00
version_added : " 2.0 "
2015-06-25 17:36:07 +00:00
marker :
description :
- Specifies the key to start with when using list mode . Object keys are returned in alphabetical order , starting with key after the marker in order .
required : false
default : null
version_added : " 2.0 "
max_keys :
description :
- Max number of results to return in list mode , set this if you want to retrieve fewer than the default 1000 keys .
required : false
default : 1000
version_added : " 2.0 "
2014-09-26 01:01:01 +00:00
metadata :
description :
- Metadata for PUT operation , as a dictionary of ' key=value ' and ' key=value,key=value ' .
required : false
default : null
version_added : " 1.6 "
2015-02-24 18:24:23 +00:00
mode :
description :
2015-06-25 17:36:07 +00:00
- Switches the module behaviour between put ( upload ) , get ( download ) , geturl ( return download url ( Ansible 1.3 + ) , getstr ( download object as string ( 1.3 + ) ) , list ( list keys ( 2.0 + ) ) , create ( bucket ) , delete ( bucket ) , and delobj ( delete object ) .
2015-02-24 18:24:23 +00:00
required : true
default : null
aliases : [ ]
object :
description :
- Keyname of the object inside the bucket . Can be used to create " virtual directories " , see examples .
required : false
default : null
2015-06-25 17:36:07 +00:00
prefix :
description :
- Limits the response to keys that begin with the specified prefix for list mode
required : false
default : null
version_added : " 2.0 "
2015-05-14 12:47:07 +00:00
version :
description :
- Version ID of the object inside the bucket . Can be used to get a specific version of a file if versioning is enabled in the target bucket .
required : false
default : null
aliases : [ ]
version_added : " 2.0 "
2015-02-24 18:24:23 +00:00
overwrite :
description :
- Force overwrite either locally on the filesystem or remotely with the object / key . Used with PUT and GET operations .
required : false
default : true
version_added : " 1.2 "
2014-09-26 01:01:01 +00:00
region :
description :
2014-12-25 00:04:25 +00:00
- " AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect. "
2014-09-26 01:01:01 +00:00
required : false
default : null
version_added : " 1.8 "
2015-02-24 18:24:23 +00:00
retries :
description :
- On recoverable failure , how many times to retry before actually failing .
required : false
default : 0
version_added : " 2.0 "
s3_url :
2015-07-21 00:05:10 +00:00
description :
- S3 URL endpoint for usage with Eucalypus , fakes3 , etc . Otherwise assumes AWS
2015-02-24 18:24:23 +00:00
default : null
aliases : [ S3_URL ]
src :
2015-07-21 00:05:10 +00:00
description :
- The source file path when performing a PUT operation .
2015-02-24 18:24:23 +00:00
required : false
default : null
aliases : [ ]
version_added : " 1.3 "
requirements : [ " boto " ]
2015-06-30 13:04:15 +00:00
author :
2015-06-15 18:41:22 +00:00
- " Lester Wade (@lwade) "
- " Ralph Tice (@ralph-tice) "
2014-12-25 00:04:25 +00:00
extends_documentation_fragment : aws
2014-09-26 01:01:01 +00:00
'''
EXAMPLES = '''
# Simple PUT operation
- s3 : bucket = mybucket object = / my / desired / key . txt src = / usr / local / myfile . txt mode = put
2014-12-01 20:14:57 +00:00
2014-09-26 01:01:01 +00:00
# Simple GET operation
- s3 : bucket = mybucket object = / my / desired / key . txt dest = / usr / local / myfile . txt mode = get
2014-12-01 20:14:57 +00:00
2015-05-14 12:47:07 +00:00
# Get a specific version of an object.
- s3 : bucket = mybucket object = / my / desired / key . txt version = 48 c9ee5131af7a716edc22df9772aa6f dest = / usr / local / myfile . txt mode = get
2014-09-26 01:01:01 +00:00
# PUT/upload with metadata
- s3 : bucket = mybucket object = / my / desired / key . txt src = / usr / local / myfile . txt mode = put metadata = ' Content-Encoding=gzip,Cache-Control=no-cache '
2014-12-01 20:14:57 +00:00
2015-06-30 13:04:15 +00:00
# PUT/upload with custom headers
- s3 : bucket = mybucket object = / my / desired / key . txt src = / usr / local / myfile . txt mode = put headers = x - amz - grant - full - control = emailAddress = owner @example.com
2014-11-15 00:07:29 +00:00
# List keys simple
- s3 : bucket = mybucket mode = list
# List keys all options
- s3 : bucket = mybucket mode = list prefix = / my / desired / marker = / my / desired / 0023. txt max_keys = 472
2014-09-26 01:01:01 +00:00
# Create an empty bucket
- s3 : bucket = mybucket mode = create
2014-12-01 20:14:57 +00:00
# Create a bucket with key as directory, in the EU region
- s3 : bucket = mybucket object = / my / directory / path mode = create region = eu - west - 1
2014-09-26 01:01:01 +00:00
# Delete a bucket and all contents
- s3 : bucket = mybucket mode = delete
2015-06-11 13:43:15 +00:00
2015-06-30 13:04:15 +00:00
# GET an object but dont download if the file checksums match
2015-06-11 13:43:15 +00:00
- s3 : bucket = mybucket object = / my / desired / key . txt dest = / usr / local / myfile . txt mode = get overwrite = different
# Delete an object from a bucket
- s3 : bucket = mybucket object = / my / desired / key . txt mode = delobj
2014-09-26 01:01:01 +00:00
'''
import os
import urlparse
2014-11-14 01:58:00 +00:00
from ssl import SSLError
2014-09-26 01:01:01 +00:00
try :
import boto
2014-10-31 14:46:32 +00:00
import boto . ec2
2014-09-26 01:01:01 +00:00
from boto . s3 . connection import Location
2015-04-01 23:16:54 +00:00
from boto . s3 . connection import OrdinaryCallingFormat
2014-12-25 00:04:25 +00:00
from boto . s3 . connection import S3Connection
2015-04-01 23:16:54 +00:00
HAS_BOTO = True
2014-09-26 01:01:01 +00:00
except ImportError :
2015-04-01 23:16:54 +00:00
HAS_BOTO = False
2015-05-14 12:47:07 +00:00
def key_check ( module , s3 , bucket , obj , version = None ) :
2014-09-26 01:01:01 +00:00
try :
bucket = s3 . lookup ( bucket )
2015-05-14 12:47:07 +00:00
key_check = bucket . get_key ( obj , version_id = version )
2014-09-26 01:01:01 +00:00
except s3 . provider . storage_response_error , e :
2015-05-14 12:47:07 +00:00
if version is not None and e . status == 400 : # If a specified version doesn't exist a 400 is returned.
key_check = None
else :
module . fail_json ( msg = str ( e ) )
2014-09-26 01:01:01 +00:00
if key_check :
return True
else :
return False
2015-05-14 12:47:07 +00:00
def keysum ( module , s3 , bucket , obj , version = None ) :
2014-09-26 01:01:01 +00:00
bucket = s3 . lookup ( bucket )
2015-05-14 12:47:07 +00:00
key_check = bucket . get_key ( obj , version_id = version )
2014-09-26 01:01:01 +00:00
if not key_check :
return None
md5_remote = key_check . etag [ 1 : - 1 ]
etag_multipart = ' - ' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True :
module . fail_json ( msg = " Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum. " )
return md5_remote
def bucket_check ( module , s3 , bucket ) :
try :
result = s3 . lookup ( bucket )
except s3 . provider . storage_response_error , e :
module . fail_json ( msg = str ( e ) )
if result :
return True
else :
return False
2015-04-24 15:03:36 +00:00
def create_bucket ( module , s3 , bucket , location = None ) :
if location is None :
location = Location . DEFAULT
2014-09-26 01:01:01 +00:00
try :
bucket = s3 . create_bucket ( bucket , location = location )
except s3 . provider . storage_response_error , e :
module . fail_json ( msg = str ( e ) )
if bucket :
return True
2014-11-15 00:07:29 +00:00
def get_bucket ( module , s3 , bucket ) :
try :
return s3 . lookup ( bucket )
except s3 . provider . storage_response_error , e :
module . fail_json ( msg = str ( e ) )
def list_keys ( module , bucket_object , prefix , marker , max_keys ) :
all_keys = bucket_object . get_all_keys ( prefix = prefix , marker = marker , max_keys = max_keys )
2015-06-25 17:37:17 +00:00
keys = [ x . key for x in all_keys ]
2014-11-15 00:07:29 +00:00
module . exit_json ( msg = " LIST operation complete " , s3_keys = keys )
2014-09-26 01:01:01 +00:00
def delete_bucket ( module , s3 , bucket ) :
try :
bucket = s3 . lookup ( bucket )
bucket_contents = bucket . list ( )
bucket . delete_keys ( [ key . name for key in bucket_contents ] )
bucket . delete ( )
return True
except s3 . provider . storage_response_error , e :
module . fail_json ( msg = str ( e ) )
def delete_key ( module , s3 , bucket , obj ) :
try :
bucket = s3 . lookup ( bucket )
bucket . delete_key ( obj )
module . exit_json ( msg = " Object deleted from bucket %s " % bucket , changed = True )
except s3 . provider . storage_response_error , e :
module . fail_json ( msg = str ( e ) )
2015-03-28 14:17:12 +00:00
2014-09-26 01:01:01 +00:00
def create_dirkey ( module , s3 , bucket , obj ) :
try :
bucket = s3 . lookup ( bucket )
key = bucket . new_key ( obj )
key . set_contents_from_string ( ' ' )
module . exit_json ( msg = " Virtual directory %s created in bucket %s " % ( obj , bucket . name ) , changed = True )
except s3 . provider . storage_response_error , e :
module . fail_json ( msg = str ( e ) )
def upload_file_check ( src ) :
if os . path . exists ( src ) :
file_exists is True
else :
file_exists is False
if os . path . isdir ( src ) :
module . fail_json ( msg = " Specifying a directory is not a valid source for upload. " , failed = True )
return file_exists
def path_check ( path ) :
if os . path . exists ( path ) :
2015-03-28 14:17:12 +00:00
return True
2014-09-26 01:01:01 +00:00
else :
return False
2015-02-24 18:24:23 +00:00
2015-06-30 13:04:15 +00:00
def upload_s3file ( module , s3 , bucket , obj , src , expiry , metadata , encrypt , headers ) :
2014-09-26 01:01:01 +00:00
try :
bucket = s3 . lookup ( bucket )
key = bucket . new_key ( obj )
if metadata :
for meta_key in metadata . keys ( ) :
key . set_metadata ( meta_key , metadata [ meta_key ] )
2015-06-30 13:04:15 +00:00
key . set_contents_from_filename ( src , encrypt_key = encrypt , headers = headers )
2014-09-26 01:01:01 +00:00
url = key . generate_url ( expiry )
module . exit_json ( msg = " PUT operation complete " , url = url , changed = True )
except s3 . provider . storage_copy_error , e :
module . fail_json ( msg = str ( e ) )
2015-05-14 12:47:07 +00:00
def download_s3file ( module , s3 , bucket , obj , dest , retries , version = None ) :
2015-05-27 14:03:29 +00:00
# retries is the number of loops; range/xrange needs to be one
# more to get that count of loops.
bucket = s3 . lookup ( bucket )
2015-05-14 12:47:07 +00:00
key = bucket . get_key ( obj , version_id = version )
2015-05-27 14:03:29 +00:00
for x in range ( 0 , retries + 1 ) :
try :
key . get_contents_to_filename ( dest )
module . exit_json ( msg = " GET operation complete " , changed = True )
except s3 . provider . storage_copy_error , e :
module . fail_json ( msg = str ( e ) )
except SSLError as e :
# actually fail on last pass through the loop.
if x > = retries :
module . fail_json ( msg = " s3 download failed; %s " % e )
# otherwise, try again, this may be a transient timeout.
pass
2014-09-26 01:01:01 +00:00
2015-05-14 12:47:07 +00:00
def download_s3str ( module , s3 , bucket , obj , version = None ) :
2014-09-26 01:01:01 +00:00
try :
bucket = s3 . lookup ( bucket )
2015-05-14 12:47:07 +00:00
key = bucket . get_key ( obj , version_id = version )
2014-09-26 01:01:01 +00:00
contents = key . get_contents_as_string ( )
module . exit_json ( msg = " GET operation complete " , contents = contents , changed = True )
except s3 . provider . storage_copy_error , e :
module . fail_json ( msg = str ( e ) )
def get_download_url ( module , s3 , bucket , obj , expiry , changed = True ) :
try :
bucket = s3 . lookup ( bucket )
key = bucket . lookup ( obj )
url = key . generate_url ( expiry )
module . exit_json ( msg = " Download url: " , url = url , expiry = expiry , changed = changed )
except s3 . provider . storage_response_error , e :
module . fail_json ( msg = str ( e ) )
def is_fakes3 ( s3_url ) :
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None :
2014-11-04 08:52:43 +00:00
return urlparse . urlparse ( s3_url ) . scheme in ( ' fakes3 ' , ' fakes3s ' )
2014-09-26 01:01:01 +00:00
else :
return False
def is_walrus ( s3_url ) :
""" Return True if it ' s Walrus endpoint, not S3
We assume anything other than * . amazonaws . com is Walrus """
if s3_url is not None :
o = urlparse . urlparse ( s3_url )
return not o . hostname . endswith ( ' amazonaws.com ' )
else :
return False
2015-02-24 18:24:23 +00:00
2014-09-26 01:01:01 +00:00
def main ( ) :
argument_spec = ec2_argument_spec ( )
argument_spec . update ( dict (
bucket = dict ( required = True ) ,
dest = dict ( default = None ) ,
2015-02-24 18:24:23 +00:00
encrypt = dict ( default = True , type = ' bool ' ) ,
2014-09-26 01:01:01 +00:00
expiry = dict ( default = 600 , aliases = [ ' expiration ' ] ) ,
2015-06-30 13:04:15 +00:00
headers = dict ( type = ' dict ' ) ,
2014-11-15 00:07:29 +00:00
marker = dict ( default = None ) ,
max_keys = dict ( default = 1000 ) ,
2015-05-27 14:03:29 +00:00
metadata = dict ( type = ' dict ' ) ,
2014-11-15 00:07:29 +00:00
mode = dict ( choices = [ ' get ' , ' put ' , ' delete ' , ' create ' , ' geturl ' , ' getstr ' , ' delobj ' , ' list ' ] , required = True ) ,
2015-02-24 18:24:23 +00:00
object = dict ( ) ,
2015-05-14 12:47:07 +00:00
version = dict ( default = None ) ,
2015-02-24 18:24:23 +00:00
overwrite = dict ( aliases = [ ' force ' ] , default = ' always ' ) ,
2014-11-15 00:07:29 +00:00
prefix = dict ( default = None ) ,
2015-05-27 14:03:29 +00:00
retries = dict ( aliases = [ ' retry ' ] , type = ' int ' , default = 0 ) ,
2015-02-24 18:24:23 +00:00
s3_url = dict ( aliases = [ ' S3_URL ' ] ) ,
src = dict ( ) ,
2014-09-26 01:01:01 +00:00
) ,
)
module = AnsibleModule ( argument_spec = argument_spec )
2015-04-01 23:16:54 +00:00
if not HAS_BOTO :
module . fail_json ( msg = ' boto required for this module ' )
2014-09-26 01:01:01 +00:00
bucket = module . params . get ( ' bucket ' )
2015-02-24 18:24:23 +00:00
encrypt = module . params . get ( ' encrypt ' )
expiry = int ( module . params [ ' expiry ' ] )
2014-09-26 01:01:01 +00:00
if module . params . get ( ' dest ' ) :
dest = os . path . expanduser ( module . params . get ( ' dest ' ) )
2015-06-30 13:04:15 +00:00
headers = module . params . get ( ' headers ' )
2014-11-15 00:07:29 +00:00
marker = module . params . get ( ' marker ' )
max_keys = module . params . get ( ' max_keys ' )
2015-02-24 18:24:23 +00:00
metadata = module . params . get ( ' metadata ' )
2014-09-26 01:01:01 +00:00
mode = module . params . get ( ' mode ' )
2015-02-24 18:24:23 +00:00
obj = module . params . get ( ' object ' )
2015-05-14 12:47:07 +00:00
version = module . params . get ( ' version ' )
2014-09-26 01:01:01 +00:00
overwrite = module . params . get ( ' overwrite ' )
2014-11-15 00:07:29 +00:00
prefix = module . params . get ( ' prefix ' )
2015-05-27 14:03:29 +00:00
retries = module . params . get ( ' retries ' )
2015-02-24 18:24:23 +00:00
s3_url = module . params . get ( ' s3_url ' )
src = module . params . get ( ' src ' )
2015-05-27 14:03:29 +00:00
2015-06-30 13:04:15 +00:00
if overwrite not in [ ' always ' , ' never ' , ' different ' ] :
if module . boolean ( overwrite ) :
overwrite = ' always '
else :
2015-05-14 03:33:50 +00:00
overwrite = ' never '
2015-06-30 13:04:15 +00:00
if overwrite not in [ ' always ' , ' never ' , ' different ' ] :
if module . boolean ( overwrite ) :
overwrite = ' always '
else :
2015-05-27 14:03:29 +00:00
overwrite = ' never '
2014-09-26 01:01:01 +00:00
2014-12-25 00:04:25 +00:00
region , ec2_url , aws_connect_kwargs = get_aws_connection_info ( module )
2014-09-26 01:01:01 +00:00
if region in ( ' us-east-1 ' , ' ' , None ) :
# S3ism for the US Standard region
location = Location . DEFAULT
else :
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
if module . params . get ( ' object ' ) :
obj = os . path . expanduser ( module . params [ ' object ' ] )
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and ' S3_URL ' in os . environ :
s3_url = os . environ [ ' S3_URL ' ]
# Look at s3_url and tweak connection settings
# if connecting to Walrus or fakes3
2015-03-26 01:40:13 +00:00
try :
if is_fakes3 ( s3_url ) :
2014-09-26 01:01:01 +00:00
fakes3 = urlparse . urlparse ( s3_url )
2014-12-25 00:04:25 +00:00
s3 = S3Connection (
2014-11-04 08:52:43 +00:00
is_secure = fakes3 . scheme == ' fakes3s ' ,
2014-09-26 01:01:01 +00:00
host = fakes3 . hostname ,
port = fakes3 . port ,
2014-12-25 00:04:25 +00:00
calling_format = OrdinaryCallingFormat ( ) ,
* * aws_connect_kwargs
)
2015-03-26 01:40:13 +00:00
elif is_walrus ( s3_url ) :
2014-09-26 01:01:01 +00:00
walrus = urlparse . urlparse ( s3_url ) . hostname
2015-03-30 03:56:44 +00:00
s3 = boto . connect_walrus ( walrus , * * aws_connect_kwargs )
2015-03-26 01:40:13 +00:00
else :
2015-03-30 03:56:44 +00:00
s3 = boto . s3 . connect_to_region ( location , is_secure = True , calling_format = OrdinaryCallingFormat ( ) , * * aws_connect_kwargs )
2015-03-26 15:23:32 +00:00
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if s3 is None :
2015-03-30 03:56:44 +00:00
s3 = boto . connect_s3 ( * * aws_connect_kwargs )
2015-03-26 15:23:32 +00:00
2015-03-26 01:40:13 +00:00
except boto . exception . NoAuthHandlerFound , e :
module . fail_json ( msg = ' No Authentication Handler found: %s ' % str ( e ) )
except Exception , e :
module . fail_json ( msg = ' Failed to connect to S3: %s ' % str ( e ) )
if s3 is None : # this should never happen
module . fail_json ( msg = ' Unknown error, failed to create s3 connection, no information from boto. ' )
2015-03-03 01:12:54 +00:00
2014-09-26 01:01:01 +00:00
# If our mode is a GET operation (download), go through the procedure as appropriate ...
if mode == ' get ' :
2015-03-03 01:12:54 +00:00
2014-09-26 01:01:01 +00:00
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check ( module , s3 , bucket )
if bucketrtn is False :
module . fail_json ( msg = " Target bucket cannot be found " , failed = True )
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
2015-05-14 12:47:07 +00:00
keyrtn = key_check ( module , s3 , bucket , obj , version = version )
2014-09-26 01:01:01 +00:00
if keyrtn is False :
2015-05-14 12:47:07 +00:00
if version is not None :
module . fail_json ( msg = " Key %s with version id %s does not exist. " % ( obj , version ) , failed = True )
else :
module . fail_json ( msg = " Key %s does not exist. " % obj , failed = True )
2014-09-26 01:01:01 +00:00
2015-07-24 13:38:11 +00:00
# If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download.
2014-09-26 01:01:01 +00:00
pathrtn = path_check ( dest )
2015-07-24 13:38:11 +00:00
if pathrtn is False or overwrite == ' always ' :
download_s3file ( module , s3 , bucket , obj , dest )
2014-09-26 01:01:01 +00:00
2015-03-28 14:17:12 +00:00
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
2014-09-26 01:01:01 +00:00
if pathrtn is True :
2015-05-14 12:47:07 +00:00
md5_remote = keysum ( module , s3 , bucket , obj , version = version )
2014-11-14 01:58:00 +00:00
md5_local = module . md5 ( dest )
2014-09-26 01:01:01 +00:00
if md5_local == md5_remote :
sum_matches = True
2015-05-14 12:47:07 +00:00
if overwrite == ' always ' :
download_s3file ( module , s3 , bucket , obj , dest , retries , version = version )
2014-09-26 01:01:01 +00:00
else :
module . exit_json ( msg = " Local and remote object are identical, ignoring. Use overwrite parameter to force. " , changed = False )
else :
sum_matches = False
2015-05-14 12:47:07 +00:00
if overwrite in ( ' always ' , ' different ' ) :
download_s3file ( module , s3 , bucket , obj , dest , retries , version = version )
2014-09-26 01:01:01 +00:00
else :
2015-03-11 15:59:38 +00:00
module . exit_json ( msg = " WARNING: Checksums do not match. Use overwrite parameter to force download. " )
2015-03-28 14:17:12 +00:00
# Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
2014-09-26 01:01:01 +00:00
if sum_matches is True and overwrite is False :
module . exit_json ( msg = " Local and remote object are identical, ignoring. Use overwrite parameter to force. " , changed = False )
2015-03-11 15:59:38 +00:00
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
2014-09-26 01:01:01 +00:00
if mode == ' put ' :
# Use this snippet to debug through conditionals:
# module.exit_json(msg="Bucket return %s"%bucketrtn)
# sys.exit(0)
# Lets check the src path.
pathrtn = path_check ( src )
if pathrtn is False :
module . fail_json ( msg = " Local object for PUT does not exist " , failed = True )
2015-03-11 15:59:38 +00:00
2014-09-26 01:01:01 +00:00
# Lets check to see if bucket exists to get ground truth.
bucketrtn = bucket_check ( module , s3 , bucket )
if bucketrtn is True :
keyrtn = key_check ( module , s3 , bucket , obj )
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucketrtn is True and keyrtn is True :
md5_remote = keysum ( module , s3 , bucket , obj )
2014-11-14 01:58:00 +00:00
md5_local = module . md5 ( src )
2015-06-04 06:28:57 +00:00
2014-09-26 01:01:01 +00:00
if md5_local == md5_remote :
sum_matches = True
2015-02-24 18:24:23 +00:00
if overwrite == ' always ' :
2015-06-30 13:04:15 +00:00
upload_s3file ( module , s3 , bucket , obj , src , expiry , metadata , encrypt , headers )
2014-09-26 01:01:01 +00:00
else :
get_download_url ( module , s3 , bucket , obj , expiry , changed = False )
else :
sum_matches = False
2015-02-24 18:24:23 +00:00
if overwrite in ( ' always ' , ' different ' ) :
2015-06-30 13:04:15 +00:00
upload_s3file ( module , s3 , bucket , obj , src , expiry , metadata , encrypt , headers )
2014-09-26 01:01:01 +00:00
else :
2015-03-11 15:59:38 +00:00
module . exit_json ( msg = " WARNING: Checksums do not match. Use overwrite parameter to force upload. " )
2014-09-26 01:01:01 +00:00
# If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True :
create_bucket ( module , s3 , bucket , location )
2015-06-30 13:04:15 +00:00
upload_s3file ( module , s3 , bucket , obj , src , expiry , metadata , encrypt , headers )
2014-09-26 01:01:01 +00:00
# If bucket exists but key doesn't, just upload.
if bucketrtn is True and pathrtn is True and keyrtn is False :
2015-06-30 13:04:15 +00:00
upload_s3file ( module , s3 , bucket , obj , src , expiry , metadata , encrypt , headers )
2014-09-26 01:01:01 +00:00
2015-06-11 13:43:15 +00:00
# Delete an object from a bucket, not the entire bucket
if mode == ' delobj ' :
if obj is None :
module . fail_json ( msg = " object parameter is required " , failed = True ) ;
if bucket :
bucketrtn = bucket_check ( module , s3 , bucket )
if bucketrtn is True :
deletertn = delete_key ( module , s3 , bucket , obj )
if deletertn is True :
module . exit_json ( msg = " Object %s deleted from bucket %s . " % ( obj , bucket ) , changed = True )
else :
module . fail_json ( msg = " Bucket does not exist. " , changed = False )
else :
module . fail_json ( msg = " Bucket parameter is required. " , failed = True )
# Delete an entire bucket, including all objects in the bucket
2014-09-26 01:01:01 +00:00
if mode == ' delete ' :
if bucket :
bucketrtn = bucket_check ( module , s3 , bucket )
if bucketrtn is True :
deletertn = delete_bucket ( module , s3 , bucket )
if deletertn is True :
module . exit_json ( msg = " Bucket %s and all keys have been deleted. " % bucket , changed = True )
else :
module . fail_json ( msg = " Bucket does not exist. " , changed = False )
else :
module . fail_json ( msg = " Bucket parameter is required. " , failed = True )
2015-03-28 14:17:12 +00:00
2014-11-15 00:07:29 +00:00
# Support for listing a set of keys
if mode == ' list ' :
bucket_object = get_bucket ( module , s3 , bucket )
# If the bucket does not exist then bail out
if bucket_object is None :
module . fail_json ( msg = " Target bucket ( %s ) cannot be found " % bucket , failed = True )
list_keys ( module , bucket_object , prefix , marker , max_keys )
2014-09-26 01:01:01 +00:00
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == ' create ' :
2015-03-28 14:17:12 +00:00
if bucket and not obj :
2014-09-26 01:01:01 +00:00
bucketrtn = bucket_check ( module , s3 , bucket )
if bucketrtn is True :
module . exit_json ( msg = " Bucket already exists. " , changed = False )
else :
module . exit_json ( msg = " Bucket created successfully " , changed = create_bucket ( module , s3 , bucket , location ) )
if bucket and obj :
bucketrtn = bucket_check ( module , s3 , bucket )
if obj . endswith ( ' / ' ) :
dirobj = obj
else :
dirobj = obj + " / "
if bucketrtn is True :
keyrtn = key_check ( module , s3 , bucket , dirobj )
2015-03-28 14:17:12 +00:00
if keyrtn is True :
2014-09-26 01:01:01 +00:00
module . exit_json ( msg = " Bucket %s and key %s already exists. " % ( bucket , obj ) , changed = False )
2015-03-28 14:17:12 +00:00
else :
2014-09-26 01:01:01 +00:00
create_dirkey ( module , s3 , bucket , dirobj )
if bucketrtn is False :
created = create_bucket ( module , s3 , bucket , location )
create_dirkey ( module , s3 , bucket , dirobj )
# Support for grabbing the time-expired URL for an object in S3/Walrus.
if mode == ' geturl ' :
if bucket and obj :
bucketrtn = bucket_check ( module , s3 , bucket )
if bucketrtn is False :
module . fail_json ( msg = " Bucket %s does not exist. " % bucket , failed = True )
else :
keyrtn = key_check ( module , s3 , bucket , obj )
if keyrtn is True :
get_download_url ( module , s3 , bucket , obj , expiry )
else :
module . fail_json ( msg = " Key %s does not exist. " % obj , failed = True )
else :
module . fail_json ( msg = " Bucket and Object parameters must be set " , failed = True )
if mode == ' getstr ' :
if bucket and obj :
bucketrtn = bucket_check ( module , s3 , bucket )
if bucketrtn is False :
module . fail_json ( msg = " Bucket %s does not exist. " % bucket , failed = True )
else :
2015-05-14 12:47:07 +00:00
keyrtn = key_check ( module , s3 , bucket , obj , version = version )
2014-09-26 01:01:01 +00:00
if keyrtn is True :
2015-05-14 12:47:07 +00:00
download_s3str ( module , s3 , bucket , obj , version = version )
2014-09-26 01:01:01 +00:00
else :
2015-05-14 12:47:07 +00:00
if version is not None :
module . fail_json ( msg = " Key %s with version id %s does not exist. " % ( obj , version ) , failed = True )
else :
module . fail_json ( msg = " Key %s does not exist. " % obj , failed = True )
2014-09-26 01:01:01 +00:00
module . exit_json ( failed = False )
# import module snippets
from ansible . module_utils . basic import *
from ansible . module_utils . ec2 import *
main ( )