Merge remote-tracking branch 'upstream/devel' into devel
commit
a4ff61748d
|
@ -7,8 +7,8 @@ multinode orchestration framework.
|
|||
Read the documentation and more at http://ansibleworks.com/
|
||||
|
||||
Many users run straight from the development branch (it's generally fine to do so), but you might also wish to consume a release. You can find
|
||||
instructions on http://ansibleworks.com/docs/intro_getting_started.html for a variety of platforms. If you want a tarball of the last release, go to
|
||||
http://ansibleworks.com/releases/ and you can also install with pip (though that will bring in some optional binary dependencies you normally do not need).
|
||||
instructions [here](on http://ansibleworks.com/docs/intro_getting_started.html) for a variety of platforms. If you want a tarball of the last release, go to
|
||||
http://ansibleworks.com/releases/ and you can also install with pip.
|
||||
|
||||
Design Principles
|
||||
=================
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
Ansible Documentation
|
||||
=====================
|
||||
|
||||
|
@ -70,7 +69,7 @@ Playbooks
|
|||
|
||||
Playbooks are Ansible's configuration, deployment, and orchestration language. They can describe a policy you want your remote systems to enforce, or a set of steps in a general IT process.
|
||||
|
||||
If Ansible modules are your the tools in your workshop, playbooks are your design plans.
|
||||
If Ansible modules are the tools in your workshop, playbooks are your design plans.
|
||||
|
||||
At a basic level, playbooks can be used to manage configurations of and deployments to remote machines. At a more advanced level, they can sequence multi-tier rollouts involving rolling updates, and can delegate actions to other hosts, interacting with monitoring servers and load balancers along the way.
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ Introduction
|
|||
|
||||
|
||||
Ansible ships with a number of modules (called the 'module library')
|
||||
that can be executed directly on remote hosts or through :doc:`playbooks`.
|
||||
that can be executed directly on remote hosts or through :doc:`Playbooks <playbooks>`.
|
||||
Users can also write their own modules. These modules can control system
|
||||
resources, like services, packages, or files (anything really), or
|
||||
handle executing system commands.
|
||||
|
|
|
@ -53,6 +53,32 @@ This length can be changed by passing an extra parameter::
|
|||
|
||||
.. note:: If the file already exists, no data will be written to it. If the file has contents, those contents will be read in as the password. Empty files cause the password to return as an empty string
|
||||
|
||||
Starting in version 1.4, password accepts a "chars" parameter to allow defining a custom character set in the generated passwords. It accepts comma separated list of names that are either string module attributes (ascii_letters,digits, etc) or are used literally::
|
||||
|
||||
---
|
||||
- hosts: all
|
||||
|
||||
tasks:
|
||||
|
||||
# create a mysql user with a random password using only ascii letters:
|
||||
- mysql_user: name={{ client }}
|
||||
password="{{ lookup('password', '/tmp/passwordfile chars=ascii') }}"
|
||||
priv={{ client }}_{{ tier }}_{{ role }}.*:ALL
|
||||
|
||||
# create a mysql user with a random password using only digits:
|
||||
- mysql_user: name={{ client }}
|
||||
password="{{ lookup('password', '/tmp/passwordfile chars=digits') }}"
|
||||
priv={{ client }}_{{ tier }}_{{ role }}.*:ALL
|
||||
|
||||
# create a mysql user with a random password using many different char sets:
|
||||
- mysql_user: name={{ client }}
|
||||
password="{{ lookup('password', '/tmp/passwordfile chars=ascii,numbers,digits,hexdigits,punctuation') }}"
|
||||
priv={{ client }}_{{ tier }}_{{ role }}.*:ALL
|
||||
|
||||
(...)
|
||||
|
||||
To enter comma use two commas ',,' somewhere - preferably at the end. Quotes and double qoutes are not supported.
|
||||
|
||||
.. _more_lookups:
|
||||
|
||||
More Lookups
|
||||
|
|
|
@ -72,7 +72,10 @@ class InventoryScript(object):
|
|||
self.host_vars_from_top = data['hostvars']
|
||||
continue
|
||||
|
||||
group = groups[group_name] = Group(group_name)
|
||||
if group_name != all.name:
|
||||
group = groups[group_name] = Group(group_name)
|
||||
else:
|
||||
group = all
|
||||
host = None
|
||||
|
||||
if not isinstance(data, dict):
|
||||
|
|
|
@ -21,6 +21,8 @@ from ansible import utils, errors
|
|||
import os
|
||||
import errno
|
||||
from string import ascii_letters, digits
|
||||
import string
|
||||
import random
|
||||
|
||||
|
||||
class LookupModule(object):
|
||||
|
@ -48,6 +50,7 @@ class LookupModule(object):
|
|||
paramvals = {
|
||||
'length': LookupModule.LENGTH,
|
||||
'encrypt': None,
|
||||
'chars': ['ascii_letters','digits',".,:-_"],
|
||||
}
|
||||
|
||||
# get non-default parameters if specified
|
||||
|
@ -57,6 +60,11 @@ class LookupModule(object):
|
|||
assert(name in paramvals)
|
||||
if name == 'length':
|
||||
paramvals[name] = int(value)
|
||||
elif name == 'chars':
|
||||
use_chars=[]
|
||||
if ",," in value: use_chars.append(',')
|
||||
use_chars.extend(value.replace(',,',',').split(','))
|
||||
paramvals['chars'] = use_chars
|
||||
else:
|
||||
paramvals[name] = value
|
||||
except (ValueError, AssertionError) as e:
|
||||
|
@ -64,6 +72,7 @@ class LookupModule(object):
|
|||
|
||||
length = paramvals['length']
|
||||
encrypt = paramvals['encrypt']
|
||||
use_chars = paramvals['chars']
|
||||
|
||||
# get password or create it if file doesn't exist
|
||||
path = utils.path_dwim(self.basedir, relpath)
|
||||
|
@ -71,8 +80,10 @@ class LookupModule(object):
|
|||
pathdir = os.path.dirname(path)
|
||||
if not os.path.isdir(pathdir):
|
||||
os.makedirs(pathdir)
|
||||
chars = ascii_letters + digits + ".,:-_"
|
||||
password = utils.random_password(length)
|
||||
|
||||
chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'')
|
||||
password = ''.join(random.choice(chars) for _ in range(length))
|
||||
|
||||
if encrypt is not None:
|
||||
salt = self.random_salt()
|
||||
content = '%s salt=%s' % (password, salt)
|
||||
|
|
|
@ -392,9 +392,7 @@ def create_instances(module, ec2):
|
|||
if group_name:
|
||||
grp_details = ec2.get_all_security_groups()
|
||||
if type(group_name) == list:
|
||||
# FIXME: this should be a nice list comprehension
|
||||
# also not py 2.4 compliant
|
||||
group_id = list(filter(lambda grp: str(grp.id) if str(tmp) in str(grp) else None, grp_details) for tmp in group_name)
|
||||
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
|
||||
elif type(group_name) == str:
|
||||
for grp in grp_details:
|
||||
if str(group_name) in str(grp):
|
||||
|
|
|
@ -136,6 +136,29 @@ PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS', 'IMAPv4',
|
|||
'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
|
||||
|
||||
|
||||
def to_dict(obj):
|
||||
instance = {}
|
||||
for key in dir(obj):
|
||||
value = getattr(obj, key)
|
||||
if key == 'virtual_ips':
|
||||
instance[key] = []
|
||||
for vip in value:
|
||||
vip_dict = {}
|
||||
for vip_key, vip_value in vars(vip).iteritems():
|
||||
if isinstance(vip_value, NON_CALLABLES):
|
||||
vip_dict[vip_key] = vip_value
|
||||
instance[key].append(vip_dict)
|
||||
elif key == 'nodes':
|
||||
instance[key] = []
|
||||
for node in value:
|
||||
instance[key].append(node.to_dict())
|
||||
elif (isinstance(value, NON_CALLABLES) and
|
||||
not key.startswith('_')):
|
||||
instance[key] = value
|
||||
|
||||
return instance
|
||||
|
||||
|
||||
def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
|
||||
vip_type, timeout, wait, wait_timeout):
|
||||
for arg in (state, name, port, protocol, vip_type):
|
||||
|
@ -210,20 +233,7 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
|
|||
pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
|
||||
|
||||
balancer.get()
|
||||
instance = {}
|
||||
for key, value in vars(balancer).iteritems():
|
||||
if key == 'virtual_ips':
|
||||
virtual_ips = []
|
||||
instance[key] = []
|
||||
for vip in value:
|
||||
vip_dict = {}
|
||||
for vip_key, vip_value in vars(vip).iteritems():
|
||||
if isinstance(vip_value, NON_CALLABLES):
|
||||
vip_dict[vip_key] = vip_value
|
||||
instance[key].append(vip_dict)
|
||||
elif (isinstance(value, NON_CALLABLES) and
|
||||
not key.startswith('_')):
|
||||
instance[key] = value
|
||||
instance = to_dict(balancer)
|
||||
|
||||
result = dict(changed=changed, balancer=instance)
|
||||
|
||||
|
@ -246,20 +256,7 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
|
|||
except Exception, e:
|
||||
module.fail_json(msg='%s' % e.message)
|
||||
|
||||
instance = {}
|
||||
for key, value in vars(balancer).iteritems():
|
||||
if key == 'virtual_ips':
|
||||
virtual_ips = []
|
||||
instance[key] = []
|
||||
for vip in value:
|
||||
vip_dict = {}
|
||||
for vip_key, vip_value in vars(vip).iteritems():
|
||||
if isinstance(vip_value, NON_CALLABLES):
|
||||
vip_dict[vip_key] = vip_value
|
||||
instance[key].append(vip_dict)
|
||||
elif (isinstance(value, NON_CALLABLES) and
|
||||
not key.startswith('_')):
|
||||
instance[key] = value
|
||||
instance = to_dict(balancer)
|
||||
|
||||
if wait:
|
||||
attempts = wait_timeout / 5
|
||||
|
|
|
@ -24,8 +24,7 @@ DOCUMENTATION = '''
|
|||
module: async_status
|
||||
short_description: Obtain status of asynchronous task
|
||||
description:
|
||||
- "This module gets the status of an asynchronous task. See:
|
||||
U(http://www.ansibleworks.com/docs/playbooks2.html#asynchronous-actions-and-polling)"
|
||||
- "This module gets the status of an asynchronous task."
|
||||
version_added: "0.5"
|
||||
options:
|
||||
jid:
|
||||
|
@ -42,7 +41,7 @@ options:
|
|||
choices: [ "status", "cleanup" ]
|
||||
default: "status"
|
||||
notes:
|
||||
- See U(http://www.ansibleworks.com/docs/playbooks2.html#asynchronous-actions-and-polling)
|
||||
- See also U(http://www.ansibleworks.com/docs/playbooks_async.html#asynchronous-actions-and-polling)
|
||||
requirements: []
|
||||
author: Michael DeHaan
|
||||
'''
|
||||
|
|
|
@ -36,7 +36,7 @@ description:
|
|||
the target host, requests will be sent through that proxy. This
|
||||
behaviour can be overridden by setting a variable for this task
|
||||
(see `setting the environment
|
||||
<http://www.ansibleworks.com/docs/playbooks2.html#setting-the-environment-and-working-with-proxies>`_),
|
||||
<http://www.ansibleworks.com/docs/playbooks_environment.html#setting-the-environment-and-working-with-proxies>`_),
|
||||
or by using the use_proxy option.
|
||||
version_added: "0.6"
|
||||
options:
|
||||
|
|
|
@ -140,8 +140,8 @@ import fnmatch
|
|||
# APT related constants
|
||||
APT_ENVVARS = "DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical"
|
||||
DPKG_OPTIONS = 'force-confdef,force-confold'
|
||||
APT_GET_ZERO = "0 upgraded, 0 newly installed, 0 to remove and 0 not upgraded."
|
||||
APTITUDE_ZERO = "0 packages upgraded, 0 newly installed, 0 to remove and 0 not upgraded."
|
||||
APT_GET_ZERO = "0 upgraded, 0 newly installed"
|
||||
APTITUDE_ZERO = "0 packages upgraded, 0 newly installed"
|
||||
APT_LISTS_PATH = "/var/lib/apt/lists"
|
||||
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
|
||||
|
||||
|
@ -256,9 +256,9 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
|
|||
|
||||
rc, out, err = m.run_command(cmd)
|
||||
if rc:
|
||||
m.fail_json(msg="'apt-get install %s' failed: %s" % (packages, err))
|
||||
m.fail_json(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err)
|
||||
else:
|
||||
m.exit_json(changed=True)
|
||||
m.exit_json(changed=True, stdout=out, stderr=err)
|
||||
else:
|
||||
m.exit_json(changed=False)
|
||||
|
||||
|
@ -285,8 +285,8 @@ def remove(m, pkgspec, cache, purge=False,
|
|||
|
||||
rc, out, err = m.run_command(cmd)
|
||||
if rc:
|
||||
m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err))
|
||||
m.exit_json(changed=True)
|
||||
m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err)
|
||||
m.exit_json(changed=True, stdout=out, stderr=err)
|
||||
|
||||
def upgrade(m, mode="yes", force=False,
|
||||
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
|
||||
|
@ -319,10 +319,10 @@ def upgrade(m, mode="yes", force=False,
|
|||
force_yes, check_arg, upgrade_command)
|
||||
rc, out, err = m.run_command(cmd)
|
||||
if rc:
|
||||
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err))
|
||||
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out)
|
||||
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
|
||||
m.exit_json(changed=False, msg=out)
|
||||
m.exit_json(changed=True, msg=out)
|
||||
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
|
||||
m.exit_json(changed=True, msg=out, stdout=out, stderr=err)
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
|
@ -344,7 +344,13 @@ def main():
|
|||
)
|
||||
|
||||
if not HAS_PYTHON_APT:
|
||||
module.fail_json(msg="Could not import python modules: apt, apt_pkg. Please install python-apt package.")
|
||||
try:
|
||||
module.run_command('apt-get install python-apt -y -q')
|
||||
global apt, apt_pkg
|
||||
import apt
|
||||
import apt_pkg
|
||||
except:
|
||||
module.fail_json(msg="Could not import python modules: apt, apt_pkg. Please install python-apt package.")
|
||||
|
||||
global APTITUDE_CMD
|
||||
APTITUDE_CMD = module.get_bin_path("aptitude", False)
|
||||
|
|
|
@ -89,9 +89,10 @@ def remove_packages(module, pkgin_path, packages):
|
|||
if not query_package(module, pkgin_path, package):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s delete -y %s" % (pkgin_path, package))
|
||||
if not module.check_mode:
|
||||
rc, out, err = module.run_command("%s delete -y %s" % (pkgin_path, package))
|
||||
|
||||
if query_package(module, pkgin_path, package):
|
||||
if not module.check_mode and query_package(module, pkgin_path, package):
|
||||
module.fail_json(msg="failed to remove %s: %s" % (package, out))
|
||||
|
||||
remove_c += 1
|
||||
|
@ -110,7 +111,7 @@ def install_packages(module, pkgin_path, packages, cached, pkgsite):
|
|||
if pkgsite != "":
|
||||
pkgsite="PACKAGESITE=%s" % (pkgsite)
|
||||
|
||||
if cached == "no":
|
||||
if not module.check_mode and cached == "no":
|
||||
rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgin_path))
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Could not update catalogue")
|
||||
|
@ -119,9 +120,10 @@ def install_packages(module, pkgin_path, packages, cached, pkgsite):
|
|||
if query_package(module, pkgin_path, package):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s %s install -U -y %s" % (pkgsite, pkgin_path, package))
|
||||
if not module.check_mode:
|
||||
rc, out, err = module.run_command("%s %s install -U -y %s" % (pkgsite, pkgin_path, package))
|
||||
|
||||
if not query_package(module, pkgin_path, package):
|
||||
if not module.check_mode and query_package(module, pkgin_path, package):
|
||||
module.fail_json(msg="failed to install %s: %s" % (package, out))
|
||||
|
||||
install_c += 1
|
||||
|
@ -134,11 +136,12 @@ def install_packages(module, pkgin_path, packages, cached, pkgsite):
|
|||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(default="present", choices=["present","absent"]),
|
||||
name = dict(aliases=["pkg"], required=True),
|
||||
cached = dict(default="no", required=False, choices=["yes","no"]),
|
||||
pkgsite = dict(default="", required=False)))
|
||||
argument_spec = dict(
|
||||
state = dict(default="present", choices=["present","absent"]),
|
||||
name = dict(aliases=["pkg"], required=True),
|
||||
cached = dict(default=False, type='bool'),
|
||||
pkgsite = dict(default="", required=False)),
|
||||
supports_check_mode = True)
|
||||
|
||||
pkgin_path = module.get_bin_path('pkg', True)
|
||||
|
||||
|
|
|
@ -428,6 +428,8 @@ def main():
|
|||
changed = False
|
||||
res_args = dict()
|
||||
|
||||
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
|
||||
os.umask(022)
|
||||
crontab = CronTab(module, user, cron_file)
|
||||
|
||||
if crontab.syslogging:
|
||||
|
|
|
@ -790,9 +790,11 @@ class FreeBsdService(Service):
|
|||
self.rcconf_file = rcfile
|
||||
|
||||
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
|
||||
cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)
|
||||
rcvars = shlex.split(stdout, comments=True)
|
||||
|
||||
if not rcvars:
|
||||
self.module.fail_json(msg="unable to determine rcvar")
|
||||
self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
|
||||
|
||||
# In rare cases, i.e. sendmail, rcvar can return several key=value pairs
|
||||
# Usually there is just one, however. In other rare cases, i.e. uwsgi,
|
||||
|
@ -805,7 +807,7 @@ class FreeBsdService(Service):
|
|||
break
|
||||
|
||||
if self.rcconf_key is None:
|
||||
self.module.fail_json(msg="unable to determine rcvar")
|
||||
self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
|
||||
|
||||
return self.service_enable_rcconf()
|
||||
|
||||
|
|
|
@ -373,6 +373,9 @@ class Ec2Inventory(object):
|
|||
for name in route53_names:
|
||||
self.push(self.inventory, name, dest)
|
||||
|
||||
# Global Tag: tag all EC2 instances
|
||||
self.push(self.inventory, 'ec2', dest)
|
||||
|
||||
|
||||
def add_rds_instance(self, instance, region):
|
||||
''' Adds an RDS instance to the inventory and index, as long as it is
|
||||
|
@ -424,6 +427,9 @@ class Ec2Inventory(object):
|
|||
# Inventory: Group by parameter group
|
||||
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
|
||||
|
||||
# Global Tag: all RDS instances
|
||||
self.push(self.inventory, 'rds', dest)
|
||||
|
||||
|
||||
def get_route53_records(self):
|
||||
''' Get and store the map of resource records to domain names that
|
||||
|
|
Loading…
Reference in New Issue