2014-10-02 17:07:05 +00:00
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
2014-10-15 22:53:43 +00:00
2014-10-15 23:18:12 +00:00
# Make coding more python3-ish
from __future__ import ( absolute_import , division , print_function )
__metaclass__ = type
2015-10-02 04:35:22 +00:00
import base64
2015-04-27 19:43:25 +00:00
import json
import subprocess
import sys
import time
2016-02-10 14:22:57 +00:00
import traceback
2015-04-27 19:43:25 +00:00
2016-04-25 16:35:25 +00:00
from ansible . compat . six import iteritems , string_types , binary_type
2015-09-03 06:23:27 +00:00
2014-11-14 22:14:08 +00:00
from ansible import constants as C
2015-09-09 19:26:40 +00:00
from ansible . errors import AnsibleError , AnsibleParserError , AnsibleUndefinedVariable , AnsibleConnectionFailure
2016-02-23 20:07:06 +00:00
from ansible . executor . task_result import TaskResult
2015-01-15 07:13:45 +00:00
from ansible . playbook . conditional import Conditional
2015-01-02 13:51:15 +00:00
from ansible . playbook . task import Task
2015-05-02 04:48:11 +00:00
from ansible . template import Templar
2015-10-02 04:35:22 +00:00
from ansible . utils . encrypt import key_for_hostname
2015-01-15 07:13:45 +00:00
from ansible . utils . listify import listify_lookup_plugin_terms
2016-02-18 10:21:58 +00:00
from ansible . utils . unicode import to_unicode , to_bytes
2015-12-19 17:49:06 +00:00
from ansible . vars . unsafe_proxy import UnsafeProxy , wrap_var
2014-11-14 22:14:08 +00:00
2015-11-11 16:18:26 +00:00
try :
from __main__ import display
except ImportError :
from ansible . utils . display import Display
display = Display ( )
2014-11-14 22:14:08 +00:00
__all__ = [ ' TaskExecutor ' ]
2015-11-11 16:18:26 +00:00
2014-10-15 23:37:29 +00:00
class TaskExecutor :
2014-10-15 22:53:43 +00:00
2014-11-14 22:14:08 +00:00
'''
This is the main worker class for the executor pipeline , which
handles loading an action plugin to actually dispatch the task to
a given host . This class roughly corresponds to the old Runner ( )
class .
'''
2015-06-11 15:54:25 +00:00
# Modules that we optimize by squashing loop items into a single call to
# the module
2015-07-07 15:59:20 +00:00
SQUASH_ACTIONS = frozenset ( C . DEFAULT_SQUASH_ACTIONS )
2015-06-11 15:54:25 +00:00
2016-02-23 20:07:06 +00:00
def __init__ ( self , host , task , job_vars , play_context , new_stdin , loader , shared_loader_obj , rslt_q ) :
2015-05-02 04:48:11 +00:00
self . _host = host
self . _task = task
self . _job_vars = job_vars
2015-07-21 16:12:22 +00:00
self . _play_context = play_context
2015-05-02 04:48:11 +00:00
self . _new_stdin = new_stdin
self . _loader = loader
self . _shared_loader_obj = shared_loader_obj
2015-11-23 16:53:05 +00:00
self . _connection = None
2016-02-23 20:07:06 +00:00
self . _rslt_q = rslt_q
2014-11-14 22:14:08 +00:00
def run ( self ) :
'''
The main executor entrypoint , where we determine if the specified
2016-03-23 07:20:27 +00:00
task requires looping and either runs the task with self . _run_loop ( )
or self . _execute ( ) . After that , the returned results are parsed and
returned as a dict .
2014-11-14 22:14:08 +00:00
'''
2015-11-11 16:18:26 +00:00
display . debug ( " in run() " )
2015-01-08 16:51:54 +00:00
try :
2016-07-13 14:06:34 +00:00
# get search path for this task to pass to lookup plugins
self . _job_vars [ ' ansible_search_path ' ] = self . _task . get_search_path ( )
2015-01-15 07:13:45 +00:00
2015-01-08 16:51:54 +00:00
items = self . _get_loop_items ( )
2015-01-09 15:37:31 +00:00
if items is not None :
2015-01-08 16:51:54 +00:00
if len ( items ) > 0 :
item_results = self . _run_loop ( items )
2015-04-02 16:54:45 +00:00
# loop through the item results, and remember the changed/failed
# result flags based on any item there.
changed = False
failed = False
for item in item_results :
2015-06-02 14:41:46 +00:00
if ' changed ' in item and item [ ' changed ' ] :
2015-11-11 16:18:26 +00:00
changed = True
2015-06-02 14:41:46 +00:00
if ' failed ' in item and item [ ' failed ' ] :
2015-11-11 16:18:26 +00:00
failed = True
2015-04-02 16:54:45 +00:00
# create the overall result item, and set the changed/failed
# flags there to reflect the overall result of the loop
2015-01-08 16:51:54 +00:00
res = dict ( results = item_results )
2015-04-02 16:54:45 +00:00
if changed :
res [ ' changed ' ] = True
if failed :
res [ ' failed ' ] = True
res [ ' msg ' ] = ' One or more items failed '
else :
res [ ' msg ' ] = ' All items completed '
2015-01-08 16:51:54 +00:00
else :
res = dict ( changed = False , skipped = True , skipped_reason = ' No items in the list ' , results = [ ] )
2014-11-14 22:14:08 +00:00
else :
2015-11-11 16:18:26 +00:00
display . debug ( " calling self._execute() " )
2015-01-08 16:51:54 +00:00
res = self . _execute ( )
2015-11-11 16:18:26 +00:00
display . debug ( " _execute() done " )
2014-11-14 22:14:08 +00:00
2015-01-15 22:56:54 +00:00
# make sure changed is set in the result, if it's not present
if ' changed ' not in res :
res [ ' changed ' ] = False
2015-09-17 20:04:47 +00:00
def _clean_res ( res ) :
if isinstance ( res , dict ) :
for k in res . keys ( ) :
res [ k ] = _clean_res ( res [ k ] )
elif isinstance ( res , list ) :
for idx , item in enumerate ( res ) :
res [ idx ] = _clean_res ( item )
elif isinstance ( res , UnsafeProxy ) :
return res . _obj
2016-04-25 16:35:25 +00:00
elif isinstance ( res , binary_type ) :
return to_unicode ( res , errors = ' strict ' )
2015-09-17 20:04:47 +00:00
return res
2015-11-11 16:18:26 +00:00
display . debug ( " dumping result to json " )
2015-09-17 20:04:47 +00:00
res = _clean_res ( res )
2015-11-11 16:18:26 +00:00
display . debug ( " done dumping result, returning " )
2015-09-17 20:04:47 +00:00
return res
2015-08-27 06:16:11 +00:00
except AnsibleError as e :
2015-03-25 20:56:46 +00:00
return dict ( failed = True , msg = to_unicode ( e , nonstring = ' simplerepr ' ) )
2016-02-10 14:22:57 +00:00
except Exception as e :
return dict ( failed = True , msg = ' Unexpected failure during module execution. ' , exception = to_unicode ( traceback . format_exc ( ) ) , stdout = ' ' )
2015-09-09 18:21:56 +00:00
finally :
try :
self . _connection . close ( )
except AttributeError :
pass
except Exception as e :
2015-12-04 19:50:39 +00:00
display . debug ( u " error closing connection: %s " % to_unicode ( e ) )
2014-11-14 22:14:08 +00:00
def _get_loop_items ( self ) :
'''
Loads a lookup plugin to handle the with_ * portion of a task ( if specified ) ,
and returns the items result .
'''
2015-11-19 00:12:38 +00:00
# save the play context variables to a temporary dictionary,
# so that we can modify the job vars without doing a full copy
# and later restore them to avoid modifying things too early
play_context_vars = dict ( )
self . _play_context . update_vars ( play_context_vars )
old_vars = dict ( )
for k in play_context_vars . keys ( ) :
if k in self . _job_vars :
old_vars [ k ] = self . _job_vars [ k ]
self . _job_vars [ k ] = play_context_vars [ k ]
templar = Templar ( loader = self . _loader , shared_loader_obj = self . _shared_loader_obj , variables = self . _job_vars )
2014-11-14 22:14:08 +00:00
items = None
2015-08-04 16:10:23 +00:00
if self . _task . loop :
if self . _task . loop in self . _shared_loader_obj . lookup_loader :
2015-11-11 16:18:26 +00:00
#TODO: remove convert_bare true and deprecate this in with_
2015-11-09 18:51:54 +00:00
if self . _task . loop == ' first_found ' :
# first_found loops are special. If the item is undefined
# then we want to fall through to the next value rather
# than failing.
2016-02-11 19:14:37 +00:00
loop_terms = listify_lookup_plugin_terms ( terms = self . _task . loop_args , templar = templar , loader = self . _loader , fail_on_undefined = False , convert_bare = True )
2015-11-09 18:51:54 +00:00
loop_terms = [ t for t in loop_terms if not templar . _contains_vars ( t ) ]
else :
try :
2016-02-11 19:14:37 +00:00
loop_terms = listify_lookup_plugin_terms ( terms = self . _task . loop_args , templar = templar , loader = self . _loader , fail_on_undefined = True , convert_bare = True )
2015-11-09 18:51:54 +00:00
except AnsibleUndefinedVariable as e :
2016-02-18 10:21:58 +00:00
display . deprecated ( " Skipping task due to undefined Error, in the future this will be a fatal error.: %s " % to_bytes ( e ) )
2016-04-03 15:31:18 +00:00
return None
2016-07-13 14:06:34 +00:00
# get lookup
mylookup = self . _shared_loader_obj . lookup_loader . get ( self . _task . loop , loader = self . _loader , templar = templar )
# give lookup task 'context' for subdir (mostly needed for first_found)
2016-07-14 13:05:01 +00:00
for subdir in [ ' template ' , ' var ' , ' file ' ] : #TODO: move this to constants?
2016-07-13 14:06:34 +00:00
if subdir in self . _task . name :
break
setattr ( mylookup , ' _subdir ' , subdir + ' s ' )
# run lookup
items = mylookup . run ( terms = loop_terms , variables = self . _job_vars , wantlist = True )
2015-08-04 16:10:23 +00:00
else :
raise AnsibleError ( " Unexpected failure in finding the lookup named ' %s ' in the available lookup plugins " % self . _task . loop )
2014-11-14 22:14:08 +00:00
2015-11-19 00:12:38 +00:00
# now we restore any old job variables that may have been modified,
# and delete them if they were in the play context vars but not in
# the old variables dictionary
for k in play_context_vars . keys ( ) :
if k in old_vars :
self . _job_vars [ k ] = old_vars [ k ]
else :
del self . _job_vars [ k ]
2015-09-08 16:18:10 +00:00
if items :
from ansible . vars . unsafe_proxy import UnsafeProxy
for idx , item in enumerate ( items ) :
if item is not None and not isinstance ( item , UnsafeProxy ) :
items [ idx ] = UnsafeProxy ( item )
2014-11-14 22:14:08 +00:00
return items
def _run_loop ( self , items ) :
'''
Runs the task with the loop items specified and collates the result
into an array named ' results ' which is inserted into the final result
along with the item for which the loop ran .
'''
results = [ ]
2015-01-20 07:16:19 +00:00
# make copies of the job vars and task so we can add the item to
# the variables and re-validate the task with the item variable
2015-11-04 16:26:06 +00:00
#task_vars = self._job_vars.copy()
task_vars = self . _job_vars
2014-11-14 22:14:08 +00:00
2015-10-23 07:27:09 +00:00
loop_var = ' item '
if self . _task . loop_control :
# the value may be 'None', so we still need to default it back to 'item'
loop_var = self . _task . loop_control . loop_var or ' item '
if loop_var in task_vars :
2016-05-12 01:49:46 +00:00
display . warning ( " The loop variable ' %s ' is already in use. You should set the `loop_var` value in the `loop_control` option for the task to something else to avoid variable collisions and unexpected behavior. " % loop_var )
2015-10-23 07:27:09 +00:00
items = self . _squash_items ( items , loop_var , task_vars )
2014-11-14 22:14:08 +00:00
for item in items :
2015-10-23 07:27:09 +00:00
task_vars [ loop_var ] = item
2015-01-08 16:51:54 +00:00
try :
tmp_task = self . _task . copy ( )
2015-09-03 12:11:30 +00:00
tmp_play_context = self . _play_context . copy ( )
2015-08-27 06:16:11 +00:00
except AnsibleParserError as e :
2015-12-04 19:50:39 +00:00
results . append ( dict ( failed = True , msg = to_unicode ( e ) ) )
2015-01-08 16:51:54 +00:00
continue
2015-09-03 12:11:30 +00:00
# now we swap the internal task and play context with their copies,
# execute, and swap them back so we can do the next iteration cleanly
2015-01-08 16:51:54 +00:00
( self . _task , tmp_task ) = ( tmp_task , self . _task )
2015-09-03 12:11:30 +00:00
( self . _play_context , tmp_play_context ) = ( tmp_play_context , self . _play_context )
2015-01-12 22:04:56 +00:00
res = self . _execute ( variables = task_vars )
2015-01-08 16:51:54 +00:00
( self . _task , tmp_task ) = ( tmp_task , self . _task )
2015-09-03 12:11:30 +00:00
( self . _play_context , tmp_play_context ) = ( tmp_play_context , self . _play_context )
2015-01-08 16:51:54 +00:00
# now update the result with the item info, and append the result
# to the list of results
2015-10-23 07:27:09 +00:00
res [ loop_var ] = item
2016-02-23 20:07:06 +00:00
res [ ' _ansible_item_result ' ] = True
self . _rslt_q . put ( TaskResult ( self . _host , self . _task , res ) , block = False )
2014-11-14 22:14:08 +00:00
results . append ( res )
2015-10-23 07:27:09 +00:00
del task_vars [ loop_var ]
2014-11-14 22:14:08 +00:00
return results
2015-10-23 07:27:09 +00:00
def _squash_items ( self , items , loop_var , variables ) :
2015-01-20 07:16:19 +00:00
'''
Squash items down to a comma - separated list for certain modules which support it
( typically package management modules ) .
'''
2016-05-21 14:06:01 +00:00
name = None
2016-05-12 01:26:39 +00:00
try :
# _task.action could contain templatable strings (via action: and
# local_action:) Template it before comparing. If we don't end up
# optimizing it here, the templatable string might use template vars
# that aren't available until later (it could even use vars from the
# with_items loop) so don't make the templated string permanent yet.
templar = Templar ( loader = self . _loader , shared_loader_obj = self . _shared_loader_obj , variables = variables )
task_action = self . _task . action
if templar . _contains_vars ( task_action ) :
task_action = templar . template ( task_action , fail_on_undefined = False )
if len ( items ) > 0 and task_action in self . SQUASH_ACTIONS :
if all ( isinstance ( o , string_types ) for o in items ) :
final_items = [ ]
for allowed in [ ' name ' , ' pkg ' , ' package ' ] :
name = self . _task . args . pop ( allowed , None )
if name is not None :
break
# This gets the information to check whether the name field
# contains a template that we can squash for
template_no_item = template_with_item = None
if name :
if templar . _contains_vars ( name ) :
variables [ loop_var ] = ' \0 $ '
template_no_item = templar . template ( name , variables , cache = False )
variables [ loop_var ] = ' \0 @ '
template_with_item = templar . template ( name , variables , cache = False )
del variables [ loop_var ]
# Check if the user is doing some operation that doesn't take
# name/pkg or the name/pkg field doesn't have any variables
# and thus the items can't be squashed
if template_no_item != template_with_item :
for item in items :
variables [ loop_var ] = item
if self . _task . evaluate_conditional ( templar , variables ) :
new_item = templar . template ( name , cache = False )
final_items . append ( new_item )
self . _task . args [ ' name ' ] = final_items
# Wrap this in a list so that the calling function loop
# executes exactly once
return [ final_items ]
else :
# Restore the name parameter
self . _task . args [ ' name ' ] = name
#elif:
# Right now we only optimize single entries. In the future we
# could optimize more types:
# * lists can be squashed together
# * dicts could squash entries that match in all cases except the
# name or pkg field.
except :
# Squashing is an optimization. If it fails for any reason,
# simply use the unoptimized list of items.
2016-05-21 14:06:01 +00:00
# Restore the name parameter
if name is not None :
self . _task . args [ ' name ' ] = name
2015-11-05 02:46:47 +00:00
return items
2015-01-20 07:16:19 +00:00
2015-01-12 22:04:56 +00:00
def _execute ( self , variables = None ) :
2014-11-14 22:14:08 +00:00
'''
The primary workhorse of the executor system , this runs the task
on the specified host ( which may be the delegated_to host ) and handles
the retry / until and block rescue / always execution
'''
2015-01-12 22:04:56 +00:00
if variables is None :
variables = self . _job_vars
2015-05-02 04:48:11 +00:00
templar = Templar ( loader = self . _loader , shared_loader_obj = self . _shared_loader_obj , variables = variables )
2015-10-29 23:23:42 +00:00
context_validation_error = None
try :
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
self . _play_context = self . _play_context . set_task_and_variable_override ( task = self . _task , variables = variables , templar = templar )
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
self . _play_context . post_validate ( templar = templar )
2016-01-18 18:36:40 +00:00
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not self . _play_context . remote_addr :
self . _play_context . remote_addr = self . _host . address
2015-10-29 23:23:42 +00:00
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
self . _play_context . update_vars ( variables )
except AnsibleError as e :
# save the error, which we'll raise later if we don't end up
# skipping this task during the conditional evaluation step
context_validation_error = e
2015-04-15 02:10:17 +00:00
2015-01-20 20:03:26 +00:00
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
2015-10-22 03:14:27 +00:00
try :
if not self . _task . evaluate_conditional ( templar , variables ) :
2015-11-11 16:18:26 +00:00
display . debug ( " when evaluation failed, skipping this task " )
2015-10-22 03:14:27 +00:00
return dict ( changed = False , skipped = True , skip_reason = ' Conditional check failed ' , _ansible_no_log = self . _play_context . no_log )
except AnsibleError :
# skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
if self . _task . action != ' include ' :
raise
2014-11-14 22:14:08 +00:00
2015-10-29 23:23:42 +00:00
# if we ran into an error while setting up the PlayContext, raise it now
if context_validation_error is not None :
raise context_validation_error
2015-10-19 17:40:47 +00:00
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self . _task . action == ' include ' :
2015-10-20 18:19:38 +00:00
include_variables = self . _task . args . copy ( )
include_file = include_variables . pop ( ' _raw_params ' , None )
2015-10-19 17:40:47 +00:00
if not include_file :
return dict ( failed = True , msg = " No include file was specified to the include " )
2015-10-20 18:19:38 +00:00
include_file = templar . template ( include_file )
return dict ( include = include_file , include_variables = include_variables )
2015-10-19 17:40:47 +00:00
2015-07-05 05:06:54 +00:00
# Now we do final validation on the task, which sets all fields to their final values.
2015-05-02 04:48:11 +00:00
self . _task . post_validate ( templar = templar )
2015-07-24 14:31:14 +00:00
if ' _variable_params ' in self . _task . args :
variable_params = self . _task . args . pop ( ' _variable_params ' )
if isinstance ( variable_params , dict ) :
2015-11-11 16:18:26 +00:00
display . deprecated ( " Using variables for task params is unsafe, especially if the variables come from an external source like facts " )
2015-07-24 14:31:14 +00:00
variable_params . update ( self . _task . args )
self . _task . args = variable_params
2014-11-14 22:14:08 +00:00
2015-07-09 12:23:43 +00:00
# get the connection and the handler for this execution
2016-01-18 22:32:25 +00:00
if not self . _connection or not getattr ( self . _connection , ' connected ' , False ) or self . _play_context . remote_addr != self . _connection . _play_context . remote_addr :
2015-11-23 16:53:05 +00:00
self . _connection = self . _get_connection ( variables = variables , templar = templar )
2016-06-10 17:13:53 +00:00
self . _connection . set_host_overrides ( host = self . _host , hostvars = variables . get ( ' hostvars ' , { } ) . get ( self . _host . name , { } ) )
2016-01-18 18:36:40 +00:00
else :
# if connection is reused, its _play_context is no longer valid and needs
# to be replaced with the one templated above, in case other data changed
self . _connection . _play_context = self . _play_context
2015-07-09 12:23:43 +00:00
self . _handler = self . _get_action_handler ( connection = self . _connection , templar = templar )
2015-01-23 03:45:25 +00:00
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables . get ( ' omit ' )
if omit_token is not None :
2015-09-03 06:47:11 +00:00
self . _task . args = dict ( ( i [ 0 ] , i [ 1 ] ) for i in iteritems ( self . _task . args ) if i [ 1 ] != omit_token )
2015-01-23 03:45:25 +00:00
2015-01-20 20:03:26 +00:00
# Read some values from the task, so that we can modify them if need be
2016-05-12 18:42:24 +00:00
if self . _task . until :
2016-06-23 23:07:11 +00:00
retries = self . _task . retries + 1
2016-05-12 18:42:24 +00:00
if retries is None :
retries = 3
2016-06-23 23:07:11 +00:00
elif retries < = 0 :
retries = 1
2015-10-27 19:35:56 +00:00
else :
2014-11-14 22:14:08 +00:00
retries = 1
delay = self . _task . delay
if delay < 0 :
2015-01-07 17:44:52 +00:00
delay = 1
2014-11-14 22:14:08 +00:00
2015-01-15 22:56:54 +00:00
# make a copy of the job vars here, in case we need to update them
# with the registered variable value later on when testing conditions
2015-11-11 19:23:08 +00:00
vars_copy = variables . copy ( )
2015-01-15 22:56:54 +00:00
2015-11-11 16:18:26 +00:00
display . debug ( " starting attempt loop " )
2014-11-14 22:14:08 +00:00
result = None
2016-05-12 18:42:24 +00:00
for attempt in range ( 1 , retries + 1 ) :
2015-11-11 16:18:26 +00:00
display . debug ( " running the handler " )
2015-09-09 19:26:40 +00:00
try :
result = self . _handler . run ( task_vars = variables )
except AnsibleConnectionFailure as e :
2015-12-04 19:50:39 +00:00
return dict ( unreachable = True , msg = to_unicode ( e ) )
2015-11-11 16:18:26 +00:00
display . debug ( " handler run complete " )
2015-01-02 13:51:15 +00:00
2016-02-23 20:07:06 +00:00
# preserve no log
result [ " _ansible_no_log " ] = self . _play_context . no_log
2015-12-18 15:58:55 +00:00
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self . _task . register :
2015-12-19 17:49:06 +00:00
vars_copy [ self . _task . register ] = wrap_var ( result . copy ( ) )
2015-12-18 15:58:55 +00:00
2015-01-07 17:44:52 +00:00
if self . _task . async > 0 :
if self . _task . poll > 0 :
2015-05-04 06:33:10 +00:00
result = self . _poll_async_result ( result = result , templar = templar )
2015-01-02 13:51:15 +00:00
2016-02-23 20:37:21 +00:00
# ensure no log is preserved
result [ " _ansible_no_log " ] = self . _play_context . no_log
2015-11-11 19:23:08 +00:00
# helper methods for use below in evaluating changed/failed_when
2015-09-15 17:37:07 +00:00
def _evaluate_changed_when_result ( result ) :
2016-03-09 20:18:37 +00:00
if self . _task . changed_when is not None and self . _task . changed_when :
2015-11-11 19:23:08 +00:00
cond = Conditional ( loader = self . _loader )
2016-03-06 15:47:15 +00:00
cond . when = self . _task . changed_when
2015-05-04 06:33:10 +00:00
result [ ' changed ' ] = cond . evaluate_conditional ( templar , vars_copy )
2015-09-15 17:37:07 +00:00
def _evaluate_failed_when_result ( result ) :
2016-03-06 15:47:15 +00:00
if self . _task . failed_when :
2015-11-11 19:23:08 +00:00
cond = Conditional ( loader = self . _loader )
2016-03-06 15:47:15 +00:00
cond . when = self . _task . failed_when
2015-05-04 06:33:10 +00:00
failed_when_result = cond . evaluate_conditional ( templar , vars_copy )
2015-01-15 22:56:54 +00:00
result [ ' failed_when_result ' ] = result [ ' failed ' ] = failed_when_result
2016-03-10 13:01:54 +00:00
else :
failed_when_result = False
return failed_when_result
2015-09-15 17:37:07 +00:00
2015-11-11 19:23:08 +00:00
if ' ansible_facts ' in result :
vars_copy . update ( result [ ' ansible_facts ' ] )
# set the failed property if the result has a non-zero rc. This will be
# overridden below if the failed_when property is set
if result . get ( ' rc ' , 0 ) != 0 :
result [ ' failed ' ] = True
# if we didn't skip this task, use the helpers to evaluate the changed/
# failed_when properties
if ' skipped ' not in result :
_evaluate_changed_when_result ( result )
_evaluate_failed_when_result ( result )
2016-05-12 18:42:24 +00:00
if retries > 1 :
2015-11-11 19:23:08 +00:00
cond = Conditional ( loader = self . _loader )
2016-03-06 15:47:15 +00:00
cond . when = self . _task . until
2015-09-15 17:37:07 +00:00
if cond . evaluate_conditional ( templar , vars_copy ) :
2015-07-28 14:14:48 +00:00
break
2016-02-23 20:07:06 +00:00
else :
# no conditional check, or it failed, so sleep for the specified time
2016-05-12 18:42:24 +00:00
if attempt < retries :
result [ ' attempts ' ] = attempt
result [ ' _ansible_retry ' ] = True
result [ ' retries ' ] = retries
display . debug ( ' Retrying task, attempt %d of %d ' % ( attempt , retries ) )
self . _rslt_q . put ( TaskResult ( self . _host , self . _task , result ) , block = False )
time . sleep ( delay )
2016-02-23 18:12:38 +00:00
else :
if retries > 1 :
# we ran out of attempts, so mark the result as failed
result [ ' failed ' ] = True
2014-11-14 22:14:08 +00:00
2015-03-25 18:51:40 +00:00
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
if self . _task . register :
2015-12-19 17:49:06 +00:00
variables [ self . _task . register ] = wrap_var ( result )
2015-03-25 18:51:40 +00:00
if ' ansible_facts ' in result :
variables . update ( result [ ' ansible_facts ' ] )
2015-07-17 16:02:26 +00:00
# save the notification target in the result, if it was specified, as
# this task may be running in a loop in which case the notification
# may be item-specific, ie. "notify: service {{item}}"
2015-07-17 18:44:05 +00:00
if self . _task . notify is not None :
2015-07-27 18:01:02 +00:00
result [ ' _ansible_notify ' ] = self . _task . notify
2015-07-17 16:02:26 +00:00
2015-11-10 15:14:30 +00:00
# add the delegated vars to the result, so we can reference them
# on the results side without having to do any further templating
# FIXME: we only want a limited set of variables here, so this is currently
# hardcoded but should be possibly fixed if we want more or if
# there is another source of truth we can use
delegated_vars = variables . get ( ' ansible_delegated_vars ' , dict ( ) ) . get ( self . _task . delegate_to , dict ( ) ) . copy ( )
if len ( delegated_vars ) > 0 :
result [ " _ansible_delegated_vars " ] = dict ( )
for k in ( ' ansible_host ' , ) :
result [ " _ansible_delegated_vars " ] [ k ] = delegated_vars . get ( k )
2015-03-25 18:51:40 +00:00
# and return
2015-11-11 16:18:26 +00:00
display . debug ( " attempt loop complete, returning result " )
2014-11-14 22:14:08 +00:00
return result
2015-05-04 06:33:10 +00:00
def _poll_async_result ( self , result , templar ) :
2015-01-02 13:51:15 +00:00
'''
Polls for the specified JID to be complete
'''
2015-01-07 17:44:52 +00:00
async_jid = result . get ( ' ansible_job_id ' )
2015-01-02 13:51:15 +00:00
if async_jid is None :
return dict ( failed = True , msg = " No job id was returned by the async task " )
# Create a new psuedo-task to run the async_status module, and run
# that (with a sleep for "poll" seconds between each retry) until the
# async time limit is exceeded.
async_task = Task ( ) . load ( dict ( action = ' async_status jid= %s ' % async_jid ) )
# Because this is an async task, the action handler is async. However,
# we need the 'normal' action handler for the status check, so get it
# now via the action_loader
2015-08-28 20:32:09 +00:00
normal_handler = self . _shared_loader_obj . action_loader . get (
2015-01-02 13:51:15 +00:00
' normal ' ,
task = async_task ,
connection = self . _connection ,
2015-07-21 16:12:22 +00:00
play_context = self . _play_context ,
2015-01-15 07:13:45 +00:00
loader = self . _loader ,
2015-05-04 06:33:10 +00:00
templar = templar ,
2015-05-02 04:48:11 +00:00
shared_loader_obj = self . _shared_loader_obj ,
2015-01-02 13:51:15 +00:00
)
time_left = self . _task . async
while time_left > 0 :
time . sleep ( self . _task . poll )
async_result = normal_handler . run ( )
2016-07-06 21:26:50 +00:00
# We do not bail out of the loop in cases where the failure
# is associated with a parsing error. The async_runner can
# have issues which result in a half-written/unparseable result
# file on disk, which manifests to the user as a timeout happening
# before it's time to timeout.
if int ( async_result . get ( ' finished ' , 0 ) ) == 1 or ( ' failed ' in async_result and async_result . get ( ' parsed ' , True ) ) or ' skipped ' in async_result :
2015-01-02 13:51:15 +00:00
break
time_left - = self . _task . poll
if int ( async_result . get ( ' finished ' , 0 ) ) != 1 :
2016-07-06 21:26:50 +00:00
if async_result . get ( ' parsed ' ) :
return dict ( failed = True , msg = " async task did not complete within the requested time " )
else :
return dict ( failed = True , msg = " async task produced unparseable results " , async_result = async_result )
2015-01-02 13:51:15 +00:00
else :
return async_result
2015-10-02 04:35:22 +00:00
def _get_connection ( self , variables , templar ) :
2014-11-14 22:14:08 +00:00
'''
Reads the connection property for the host , and returns the
correct connection object from the list of connection plugins
'''
2015-02-09 22:54:44 +00:00
if self . _task . delegate_to is not None :
2015-09-18 18:48:26 +00:00
# since we're delegating, we don't want to use interpreter values
# which would have been set for the original target host
for i in variables . keys ( ) :
2016-05-05 15:14:11 +00:00
if isinstance ( i , string_types ) and i . startswith ( ' ansible_ ' ) and i . endswith ( ' _interpreter ' ) :
2015-09-18 18:48:26 +00:00
del variables [ i ]
# now replace the interpreter values with those that may have come
# from the delegated-to host
2015-10-21 15:05:45 +00:00
delegated_vars = variables . get ( ' ansible_delegated_vars ' , dict ( ) ) . get ( self . _task . delegate_to , dict ( ) )
2015-09-18 18:48:26 +00:00
if isinstance ( delegated_vars , dict ) :
for i in delegated_vars :
2016-05-05 15:14:11 +00:00
if isinstance ( i , string_types ) and i . startswith ( " ansible_ " ) and i . endswith ( " _interpreter " ) :
2015-09-18 18:48:26 +00:00
variables [ i ] = delegated_vars [ i ]
2015-02-09 22:54:44 +00:00
2015-07-21 16:12:22 +00:00
conn_type = self . _play_context . connection
2014-11-14 22:14:08 +00:00
if conn_type == ' smart ' :
2015-12-17 17:43:36 +00:00
conn_type = ' ssh '
if sys . platform . startswith ( ' darwin ' ) and self . _play_context . password :
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
conn_type = " paramiko "
else :
# see if SSH can support ControlPersist if not use paramiko
try :
cmd = subprocess . Popen ( [ ' ssh ' , ' -o ' , ' ControlPersist ' ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
( out , err ) = cmd . communicate ( )
2016-03-16 21:12:48 +00:00
err = to_unicode ( err )
if u " Bad configuration option " in err or u " Usage: " in err :
2015-12-17 17:43:36 +00:00
conn_type = " paramiko "
except OSError :
conn_type = " paramiko "
2014-11-14 22:14:08 +00:00
2015-08-28 20:32:09 +00:00
connection = self . _shared_loader_obj . connection_loader . get ( conn_type , self . _play_context , self . _new_stdin )
2014-11-14 22:14:08 +00:00
if not connection :
raise AnsibleError ( " the connection plugin ' %s ' was not found " % conn_type )
2015-10-02 04:35:22 +00:00
if self . _play_context . accelerate :
2016-05-13 20:59:36 +00:00
# accelerate is deprecated as of 2.1...
display . deprecated ( ' Accelerated mode is deprecated. Consider using SSH with ControlPersist and pipelining enabled instead ' )
2015-10-02 04:35:22 +00:00
# launch the accelerated daemon here
ssh_connection = connection
handler = self . _shared_loader_obj . action_loader . get (
' normal ' ,
task = self . _task ,
connection = ssh_connection ,
play_context = self . _play_context ,
loader = self . _loader ,
templar = templar ,
shared_loader_obj = self . _shared_loader_obj ,
)
key = key_for_hostname ( self . _play_context . remote_addr )
accelerate_args = dict (
password = base64 . b64encode ( key . __str__ ( ) ) ,
port = self . _play_context . accelerate_port ,
minutes = C . ACCELERATE_DAEMON_TIMEOUT ,
ipv6 = self . _play_context . accelerate_ipv6 ,
debug = self . _play_context . verbosity ,
)
connection = self . _shared_loader_obj . connection_loader . get ( ' accelerate ' , self . _play_context , self . _new_stdin )
if not connection :
raise AnsibleError ( " the connection plugin ' %s ' was not found " % conn_type )
try :
connection . _connect ( )
except AnsibleConnectionFailure :
2016-03-16 21:12:48 +00:00
display . debug ( ' connection failed, fallback to accelerate ' )
2015-10-02 04:35:22 +00:00
res = handler . _execute_module ( module_name = ' accelerate ' , module_args = accelerate_args , task_vars = variables , delete_remote_tmp = False )
2016-03-16 21:12:48 +00:00
display . debug ( res )
2015-10-02 04:35:22 +00:00
connection . _connect ( )
2014-11-14 22:14:08 +00:00
return connection
2015-05-04 06:33:10 +00:00
def _get_action_handler ( self , connection , templar ) :
2014-11-14 22:14:08 +00:00
'''
Returns the correct action plugin to handle the requestion task action
'''
2014-10-15 22:53:43 +00:00
2015-08-28 20:32:09 +00:00
if self . _task . action in self . _shared_loader_obj . action_loader :
2014-11-14 22:14:08 +00:00
if self . _task . async != 0 :
2015-09-23 14:11:52 +00:00
raise AnsibleError ( " async mode is not supported with the %s module " % self . _task . action )
2014-11-14 22:14:08 +00:00
handler_name = self . _task . action
elif self . _task . async == 0 :
handler_name = ' normal '
else :
handler_name = ' async '
2014-10-15 22:53:43 +00:00
2015-08-28 20:32:09 +00:00
handler = self . _shared_loader_obj . action_loader . get (
2014-11-14 22:14:08 +00:00
handler_name ,
task = self . _task ,
connection = connection ,
2015-07-21 16:12:22 +00:00
play_context = self . _play_context ,
2015-01-15 07:13:45 +00:00
loader = self . _loader ,
2015-05-04 06:33:10 +00:00
templar = templar ,
2015-05-02 04:48:11 +00:00
shared_loader_obj = self . _shared_loader_obj ,
2014-11-14 22:14:08 +00:00
)
2015-03-02 18:38:45 +00:00
2014-11-14 22:14:08 +00:00
if not handler :
raise AnsibleError ( " the handler ' %s ' was not found " % handler_name )
2014-10-15 22:53:43 +00:00
2014-11-14 22:14:08 +00:00
return handler