2014-11-14 22:14:08 +00:00
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import ( absolute_import , division , print_function )
__metaclass__ = type
2015-11-04 16:26:06 +00:00
import json
2014-11-14 22:14:08 +00:00
import time
2015-11-05 21:21:34 +00:00
import zlib
2016-04-06 06:48:37 +00:00
from collections import defaultdict
2014-11-14 22:14:08 +00:00
2015-09-22 16:41:06 +00:00
from jinja2 . exceptions import UndefinedError
2016-04-06 06:48:37 +00:00
from ansible . compat . six . moves import queue as Queue
from ansible . compat . six import iteritems , text_type , string_types
2015-08-28 16:14:23 +00:00
from ansible import constants as C
2015-12-17 17:43:36 +00:00
from ansible . errors import AnsibleError , AnsibleParserError , AnsibleUndefinedVariable
2015-12-11 19:55:44 +00:00
from ansible . executor . play_iterator import PlayIterator
2015-05-29 04:58:38 +00:00
from ansible . executor . task_result import TaskResult
2015-01-02 13:51:15 +00:00
from ansible . inventory . host import Host
from ansible . inventory . group import Group
2015-03-25 18:51:40 +00:00
from ansible . playbook . helpers import load_list_of_blocks
2015-07-24 20:21:16 +00:00
from ansible . playbook . included_file import IncludedFile
2016-08-15 18:45:02 +00:00
from ansible . playbook . task_include import TaskInclude
2016-08-26 17:42:13 +00:00
from ansible . playbook . role_include import IncludeRole
2015-10-14 21:50:23 +00:00
from ansible . plugins import action_loader , connection_loader , filter_loader , lookup_loader , module_loader , test_loader
2015-07-07 19:47:51 +00:00
from ansible . template import Templar
2016-01-18 22:42:50 +00:00
from ansible . utils . unicode import to_unicode
2015-11-11 18:19:58 +00:00
from ansible . vars . unsafe_proxy import wrap_var
2016-07-31 08:23:28 +00:00
from ansible . vars import combine_vars , strip_internal_keys
2014-11-14 22:14:08 +00:00
2015-07-23 14:24:50 +00:00
try :
from __main__ import display
except ImportError :
from ansible . utils . display import Display
display = Display ( )
2014-11-14 22:14:08 +00:00
__all__ = [ ' StrategyBase ' ]
2015-11-11 18:19:58 +00:00
2014-11-14 22:14:08 +00:00
class StrategyBase :
'''
This is the base class for strategy plugins , which contains some common
code useful to all strategies like running handlers , cleanup actions , etc .
'''
def __init__ ( self , tqm ) :
self . _tqm = tqm
self . _inventory = tqm . get_inventory ( )
2016-04-20 13:06:53 +00:00
self . _notified_handlers = tqm . _notified_handlers
self . _listening_handlers = tqm . _listening_handlers
2014-11-14 22:14:08 +00:00
self . _variable_manager = tqm . get_variable_manager ( )
self . _loader = tqm . get_loader ( )
self . _final_q = tqm . _final_q
2015-07-24 14:03:43 +00:00
self . _step = getattr ( tqm . _options , ' step ' , False )
2015-07-26 16:21:38 +00:00
self . _diff = getattr ( tqm . _options , ' diff ' , False )
2015-11-11 18:19:58 +00:00
# Backwards compat: self._display isn't really needed, just import the global display and use that.
2015-07-23 14:24:50 +00:00
self . _display = display
2014-11-14 22:14:08 +00:00
# internal counters
self . _pending_results = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self . _blocked_hosts = dict ( )
2016-08-05 18:44:57 +00:00
def run ( self , iterator , play_context , result = 0 ) :
2015-06-01 21:41:52 +00:00
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
2016-06-08 15:11:34 +00:00
failed_hosts = iterator . get_failed_hosts ( )
2015-06-01 21:41:52 +00:00
unreachable_hosts = self . _tqm . _unreachable_hosts . keys ( )
2015-01-12 22:04:56 +00:00
2015-11-11 18:19:58 +00:00
display . debug ( " running handlers " )
2016-08-05 18:44:57 +00:00
handler_result = self . run_handlers ( iterator , play_context )
if isinstance ( handler_result , bool ) and not handler_result :
result | = self . _tqm . RUN_ERROR
elif not handler_result :
result | = handler_result
2015-01-12 22:04:56 +00:00
2015-06-01 21:41:52 +00:00
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
2016-06-08 15:11:34 +00:00
failed_hosts = set ( failed_hosts ) . union ( iterator . get_failed_hosts ( ) )
2015-06-01 21:41:52 +00:00
unreachable_hosts = set ( unreachable_hosts ) . union ( self . _tqm . _unreachable_hosts . keys ( ) )
2015-11-28 18:37:02 +00:00
# return the appropriate code, depending on the status hosts after the run
2016-06-08 15:11:34 +00:00
if not isinstance ( result , bool ) and result != self . _tqm . RUN_OK :
return result
elif len ( unreachable_hosts ) > 0 :
return self . _tqm . RUN_UNREACHABLE_HOSTS
2015-06-01 21:41:52 +00:00
elif len ( failed_hosts ) > 0 :
2016-06-08 15:11:34 +00:00
return self . _tqm . RUN_FAILED_HOSTS
2015-01-12 22:04:56 +00:00
else :
2016-06-08 15:11:34 +00:00
return self . _tqm . RUN_OK
2014-11-14 22:14:08 +00:00
def get_hosts_remaining ( self , play ) :
2015-11-11 18:19:58 +00:00
return [ host for host in self . _inventory . get_hosts ( play . hosts )
if host . name not in self . _tqm . _failed_hosts and host . name not in self . _tqm . _unreachable_hosts ]
2014-11-14 22:14:08 +00:00
2015-02-09 22:54:44 +00:00
def get_failed_hosts ( self , play ) :
return [ host for host in self . _inventory . get_hosts ( play . hosts ) if host . name in self . _tqm . _failed_hosts ]
2014-11-14 22:14:08 +00:00
2015-06-23 01:03:55 +00:00
def add_tqm_variables ( self , vars , play ) :
'''
Base class method to add extra variables / information to the list of task
vars sent through the executor engine regarding the task queue manager state .
'''
2015-11-04 16:26:06 +00:00
vars [ ' ansible_current_hosts ' ] = [ h . name for h in self . get_hosts_remaining ( play ) ]
vars [ ' ansible_failed_hosts ' ] = [ h . name for h in self . get_failed_hosts ( play ) ]
2015-06-23 01:03:55 +00:00
2015-07-21 16:12:22 +00:00
def _queue_task ( self , host , task , task_vars , play_context ) :
2014-11-14 22:14:08 +00:00
''' handles queueing the task up to be sent to a worker '''
2016-08-26 19:55:56 +00:00
self . _tqm . queue_task ( host , task , task_vars , play_context )
self . _pending_results + = 1
2014-11-14 22:14:08 +00:00
2016-08-26 19:55:56 +00:00
def _process_pending_results ( self , iterator , one_pass = False , timeout = 0.001 ) :
2014-11-14 22:14:08 +00:00
'''
Reads results off the final queue and takes appropriate action
based on the result ( executing callbacks , updating state , etc . ) .
'''
2015-03-03 20:59:23 +00:00
ret_results = [ ]
2016-07-31 08:23:28 +00:00
def get_original_host ( host_name ) :
host_name = to_unicode ( host_name )
if host_name in self . _inventory . _hosts_cache :
return self . _inventory . _hosts_cache [ host_name ]
else :
return self . _inventory . get_host ( host_name )
def search_handler_blocks ( handler_name , handler_blocks ) :
for handler_block in handler_blocks :
for handler_task in handler_block . block :
handler_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , task = handler_task )
templar = Templar ( loader = self . _loader , variables = handler_vars )
try :
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
target_handler_name = templar . template ( handler_task . name )
if target_handler_name == handler_name :
return handler_task
else :
target_handler_name = templar . template ( handler_task . get_name ( ) )
if target_handler_name == handler_name :
return handler_task
except ( UndefinedError , AnsibleUndefinedVariable ) as e :
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
return None
2016-08-15 18:45:02 +00:00
def parent_handler_match ( target_handler , handler_name ) :
if target_handler :
2016-08-26 17:42:13 +00:00
if isinstance ( target_handler , ( TaskInclude , IncludeRole ) ) :
2016-08-15 18:45:02 +00:00
try :
handler_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , task = target_handler )
templar = Templar ( loader = self . _loader , variables = handler_vars )
target_handler_name = templar . template ( target_handler . name )
if target_handler_name == handler_name :
return True
else :
target_handler_name = templar . template ( target_handler . get_name ( ) )
if target_handler_name == handler_name :
return True
except ( UndefinedError , AnsibleUndefinedVariable ) as e :
pass
return parent_handler_match ( target_handler . _parent , handler_name )
else :
return False
2016-08-26 19:55:56 +00:00
passes = 1
while not self . _tqm . _terminated and passes < 3 :
2014-11-14 22:14:08 +00:00
try :
2016-08-26 19:55:56 +00:00
task_result = self . _final_q . get ( timeout = timeout )
2016-07-31 08:23:28 +00:00
original_host = get_original_host ( task_result . _host )
original_task = iterator . get_original_task ( original_host , task_result . _task )
task_result . _host = original_host
task_result . _task = original_task
# send callbacks for 'non final' results
if ' _ansible_retry ' in task_result . _result :
self . _tqm . send_callback ( ' v2_runner_retry ' , task_result )
continue
elif ' _ansible_item_result ' in task_result . _result :
if task_result . is_failed ( ) or task_result . is_unreachable ( ) :
self . _tqm . send_callback ( ' v2_runner_item_on_failed ' , task_result )
elif task_result . is_skipped ( ) :
self . _tqm . send_callback ( ' v2_runner_item_on_skipped ' , task_result )
2015-12-16 06:48:22 +00:00
else :
2016-07-31 08:23:28 +00:00
self . _tqm . send_callback ( ' v2_runner_item_on_ok ' , task_result )
continue
2015-12-16 06:48:22 +00:00
2016-07-31 08:23:28 +00:00
if original_task . register :
if original_task . run_once :
host_list = [ host for host in self . _inventory . get_hosts ( iterator . _play . hosts ) if host . name not in self . _tqm . _unreachable_hosts ]
else :
host_list = [ original_host ]
2016-02-11 02:27:14 +00:00
2016-07-31 08:23:28 +00:00
clean_copy = strip_internal_keys ( task_result . _result )
if ' invocation ' in clean_copy :
del clean_copy [ ' invocation ' ]
2014-11-14 22:14:08 +00:00
2016-07-31 08:23:28 +00:00
for target_host in host_list :
self . _variable_manager . set_nonpersistent_facts ( target_host , { original_task . register : clean_copy } )
2015-07-26 16:21:38 +00:00
2016-07-31 08:23:28 +00:00
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result . is_failed ( ) :
role_ran = True
if not original_task . ignore_errors :
display . debug ( " marking %s as failed " % original_host . name )
if original_task . run_once :
# if we're using run_once, we have to fail every host here
2016-08-05 18:44:57 +00:00
for h in self . _inventory . get_hosts ( iterator . _play . hosts ) :
if h . name not in self . _tqm . _unreachable_hosts :
state , _ = iterator . get_next_task_for_host ( h , peek = True )
iterator . mark_host_failed ( h )
state , new_task = iterator . get_next_task_for_host ( h , peek = True )
2016-07-06 19:40:11 +00:00
else :
2016-07-31 08:23:28 +00:00
iterator . mark_host_failed ( original_host )
2015-11-30 16:27:05 +00:00
2016-07-31 08:23:28 +00:00
# only add the host to the failed list officially if it has
# been failed by the iterator
if iterator . is_failed ( original_host ) :
self . _tqm . _failed_hosts [ original_host . name ] = True
self . _tqm . _stats . increment ( ' failures ' , original_host . name )
else :
# otherwise, we grab the current state and if we're iterating on
# the rescue portion of a block then we save the failed task in a
# special var for use within the rescue/always
state , _ = iterator . get_next_task_for_host ( original_host , peek = True )
if state . run_state == iterator . ITERATING_RESCUE :
self . _variable_manager . set_nonpersistent_facts (
original_host ,
dict (
ansible_failed_task = original_task . serialize ( ) ,
ansible_failed_result = task_result . _result ,
) ,
)
2015-07-07 19:47:51 +00:00
else :
2016-07-31 08:23:28 +00:00
self . _tqm . _stats . increment ( ' ok ' , original_host . name )
self . _tqm . send_callback ( ' v2_runner_on_failed ' , task_result , ignore_errors = original_task . ignore_errors )
elif task_result . is_unreachable ( ) :
self . _tqm . _unreachable_hosts [ original_host . name ] = True
self . _tqm . _stats . increment ( ' dark ' , original_host . name )
self . _tqm . send_callback ( ' v2_runner_on_unreachable ' , task_result )
elif task_result . is_skipped ( ) :
self . _tqm . _stats . increment ( ' skipped ' , original_host . name )
self . _tqm . send_callback ( ' v2_runner_on_skipped ' , task_result )
else :
role_ran = True
2015-07-07 19:47:51 +00:00
2016-07-31 08:23:28 +00:00
if original_task . loop :
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result . _result . get ( ' results ' , [ ] )
2016-01-18 19:50:20 +00:00
else :
2016-07-31 08:23:28 +00:00
result_items = [ task_result . _result ]
for result_item in result_items :
if ' _ansible_notify ' in result_item :
if task_result . is_changed ( ) :
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item [ ' _ansible_notify ' ] :
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
if handler_name in self . _listening_handlers :
for listening_handler_name in self . _listening_handlers [ handler_name ] :
listening_handler = search_handler_blocks ( listening_handler_name , iterator . _play . handlers )
if listening_handler is None :
raise AnsibleError ( " The requested handler listener ' %s ' was not found in any of the known handlers " % listening_handler_name )
if original_host not in self . _notified_handlers [ listening_handler ] :
self . _notified_handlers [ listening_handler ] . append ( original_host )
display . vv ( " NOTIFIED HANDLER %s " % ( listening_handler_name , ) )
else :
target_handler = search_handler_blocks ( handler_name , iterator . _play . handlers )
2016-08-15 18:45:02 +00:00
if target_handler is not None :
2016-07-31 08:23:28 +00:00
if original_host not in self . _notified_handlers [ target_handler ] :
self . _notified_handlers [ target_handler ] . append ( original_host )
# FIXME: should this be a callback?
display . vv ( " NOTIFIED HANDLER %s " % ( handler_name , ) )
else :
2016-08-15 18:45:02 +00:00
# As there may be more than one handler with the notified name as the
# parent, so we just keep track of whether or not we found one at all
found = False
for target_handler in self . _notified_handlers :
if parent_handler_match ( target_handler , handler_name ) :
self . _notified_handlers [ target_handler ] . append ( original_host )
display . vv ( " NOTIFIED HANDLER %s " % ( target_handler . get_name ( ) , ) )
found = True
# and if none were found, then we raise an error
if not found :
raise AnsibleError ( " The requested handler ' %s ' was found in neither the main handlers list nor the listening handlers list " % handler_name )
2016-07-31 08:23:28 +00:00
if ' add_host ' in result_item :
# this task added a new host (add_host module)
new_host_info = result_item . get ( ' add_host ' , dict ( ) )
self . _add_host ( new_host_info , iterator )
elif ' add_group ' in result_item :
# this task added a new group (group_by module)
self . _add_group ( original_host , result_item )
elif ' ansible_facts ' in result_item :
loop_var = ' item '
if original_task . loop_control :
loop_var = original_task . loop_control . loop_var or ' item '
item = result_item . get ( loop_var , None )
if original_task . action == ' include_vars ' :
for ( var_name , var_value ) in iteritems ( result_item [ ' ansible_facts ' ] ) :
# find the host we're actually refering too here, which may
# be a host that is not really in inventory at all
if original_task . delegate_to is not None and original_task . delegate_facts :
task_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , host = host , task = task )
self . add_tqm_variables ( task_vars , play = iterator . _play )
if item is not None :
task_vars [ loop_var ] = item
templar = Templar ( loader = self . _loader , variables = task_vars )
host_name = templar . template ( original_task . delegate_to )
actual_host = self . _inventory . get_host ( host_name )
if actual_host is None :
actual_host = Host ( name = host_name )
else :
actual_host = original_host
if original_task . run_once :
host_list = [ host for host in self . _inventory . get_hosts ( iterator . _play . hosts ) if host . name not in self . _tqm . _unreachable_hosts ]
else :
host_list = [ actual_host ]
for target_host in host_list :
self . _variable_manager . set_host_variable ( target_host , var_name , var_value )
2016-01-18 19:50:20 +00:00
else :
2016-07-31 08:23:28 +00:00
if original_task . run_once :
host_list = [ host for host in self . _inventory . get_hosts ( iterator . _play . hosts ) if host . name not in self . _tqm . _unreachable_hosts ]
else :
host_list = [ original_host ]
for target_host in host_list :
if original_task . action == ' set_fact ' :
self . _variable_manager . set_nonpersistent_facts ( target_host , result_item [ ' ansible_facts ' ] . copy ( ) )
else :
self . _variable_manager . set_host_facts ( target_host , result_item [ ' ansible_facts ' ] . copy ( ) )
if ' diff ' in task_result . _result :
if self . _diff :
self . _tqm . send_callback ( ' v2_on_file_diff ' , task_result )
2016-09-06 00:07:58 +00:00
if original_task . action in [ ' include ' , ' include_role ' ] :
2016-07-31 08:23:28 +00:00
self . _tqm . _stats . increment ( ' ok ' , original_host . name )
if ' changed ' in task_result . _result and task_result . _result [ ' changed ' ] :
self . _tqm . _stats . increment ( ' changed ' , original_host . name )
# finally, send the ok for this task
self . _tqm . send_callback ( ' v2_runner_on_ok ' , task_result )
self . _pending_results - = 1
if original_host . name in self . _blocked_hosts :
del self . _blocked_hosts [ original_host . name ]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
2016-09-06 00:07:58 +00:00
if original_task . _role is not None and role_ran : #TODO: and original_task.action != 'include_role':?
2016-07-31 08:23:28 +00:00
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for ( entry , role_obj ) in iteritems ( iterator . _play . ROLE_CACHE [ original_task . _role . _role_name ] ) :
if role_obj . _uuid == original_task . _role . _uuid :
role_obj . _had_task_run [ original_host . name ] = True
ret_results . append ( task_result )
2015-12-10 23:03:25 +00:00
2014-11-14 22:14:08 +00:00
except Queue . Empty :
2016-08-11 13:17:50 +00:00
passes + = 1
2014-11-14 22:14:08 +00:00
2015-12-10 23:03:25 +00:00
if one_pass :
break
2015-03-03 20:59:23 +00:00
return ret_results
2015-02-12 18:11:08 +00:00
def _wait_on_pending_results ( self , iterator ) :
2014-11-14 22:14:08 +00:00
'''
Wait for the shared counter to drop to zero , using a short sleep
between checks to ensure we don ' t spin lock
'''
2015-03-03 20:59:23 +00:00
ret_results = [ ]
2015-11-11 18:19:58 +00:00
display . debug ( " waiting for pending results... " )
2016-08-26 19:55:56 +00:00
dead_check = 10
2014-11-14 22:14:08 +00:00
while self . _pending_results > 0 and not self . _tqm . _terminated :
2016-07-14 20:37:35 +00:00
2015-03-03 20:59:23 +00:00
results = self . _process_pending_results ( iterator )
ret_results . extend ( results )
2016-07-14 20:37:35 +00:00
2016-08-26 19:55:56 +00:00
dead_check - = 1
if dead_check == 0 :
if self . _pending_results > 0 and self . _tqm . has_dead_workers ( ) :
raise AnsibleError ( " A worker was found in a dead state " )
dead_check = 10
2015-11-11 18:19:58 +00:00
display . debug ( " no more pending results, returning what we have " )
2014-11-14 22:14:08 +00:00
2015-03-03 20:59:23 +00:00
return ret_results
2015-10-27 18:12:17 +00:00
def _add_host ( self , host_info , iterator ) :
2015-01-02 13:51:15 +00:00
'''
Helper function to add a new host to inventory based on a task result .
'''
host_name = host_info . get ( ' host_name ' )
2016-04-18 07:10:09 +00:00
# Check if host in inventory, add if not
new_host = self . _inventory . get_host ( host_name )
if not new_host :
2015-07-07 19:47:51 +00:00
new_host = Host ( name = host_name )
2015-01-02 13:51:15 +00:00
self . _inventory . _hosts_cache [ host_name ] = new_host
2016-06-06 07:38:37 +00:00
self . _inventory . get_host_vars ( new_host )
2015-01-02 13:51:15 +00:00
allgroup = self . _inventory . get_group ( ' all ' )
allgroup . add_host ( new_host )
# Set/update the vars for this host
2016-01-14 22:55:44 +00:00
new_host . vars = combine_vars ( new_host . vars , self . _inventory . get_host_vars ( new_host ) )
new_host . vars = combine_vars ( new_host . vars , host_info . get ( ' host_vars ' , dict ( ) ) )
2015-01-02 13:51:15 +00:00
new_groups = host_info . get ( ' groups ' , [ ] )
for group_name in new_groups :
if not self . _inventory . get_group ( group_name ) :
new_group = Group ( group_name )
self . _inventory . add_group ( new_group )
2016-06-06 07:38:37 +00:00
self . _inventory . get_group_vars ( new_group )
2015-01-02 13:51:15 +00:00
new_group . vars = self . _inventory . get_group_variables ( group_name )
else :
new_group = self . _inventory . get_group ( group_name )
new_group . add_host ( new_host )
# add this host to the group cache
2015-09-17 22:58:10 +00:00
if self . _inventory . groups is not None :
if group_name in self . _inventory . groups :
if new_host not in self . _inventory . get_group ( group_name ) . hosts :
self . _inventory . get_group ( group_name ) . hosts . append ( new_host . name )
2015-01-02 13:51:15 +00:00
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
self . _inventory . clear_pattern_cache ( )
2016-08-30 12:27:06 +00:00
# clear cache of group dict, which is used in magic host variables
self . _inventory . clear_group_dict_cache ( )
2015-10-27 18:12:17 +00:00
# also clear the hostvar cache entry for the given play, so that
# the new hosts are available if hostvars are referenced
self . _variable_manager . invalidate_hostvars_cache ( play = iterator . _play )
2015-10-19 18:53:52 +00:00
def _add_group ( self , host , result_item ) :
2015-01-02 13:51:15 +00:00
'''
Helper function to add a group ( if it does not exist ) , and to assign the
specified host to that group .
'''
2015-10-19 18:53:52 +00:00
changed = False
2015-01-02 13:51:15 +00:00
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
2015-10-19 18:53:52 +00:00
real_host = self . _inventory . get_host ( host . name )
2015-07-10 05:53:59 +00:00
2015-10-19 18:53:52 +00:00
group_name = result_item . get ( ' add_group ' )
new_group = self . _inventory . get_group ( group_name )
if not new_group :
2016-08-30 12:27:06 +00:00
# clear cache of group dict, which is used in magic host variables
self . _inventory . clear_group_dict_cache ( )
2015-10-19 18:53:52 +00:00
# create the new group and add it to inventory
new_group = Group ( name = group_name )
self . _inventory . add_group ( new_group )
new_group . vars = self . _inventory . get_group_vars ( new_group )
# and add the group to the proper hierarchy
allgroup = self . _inventory . get_group ( ' all ' )
allgroup . add_child_group ( new_group )
changed = True
if group_name not in host . get_groups ( ) :
new_group . add_host ( real_host )
changed = True
2015-01-02 13:51:15 +00:00
2015-07-10 05:53:59 +00:00
return changed
2015-01-02 13:51:15 +00:00
2015-07-24 20:21:16 +00:00
def _load_included_file ( self , included_file , iterator , is_handler = False ) :
2015-02-12 18:11:08 +00:00
'''
Loads an included YAML file of tasks , applying the optional set of variables .
'''
2015-11-16 22:13:55 +00:00
display . debug ( " loading included file: %s " % included_file . _filename )
2015-05-29 04:58:38 +00:00
try :
data = self . _loader . load_from_file ( included_file . _filename )
2015-07-14 12:25:48 +00:00
if data is None :
return [ ]
2015-10-28 18:00:03 +00:00
elif not isinstance ( data , list ) :
raise AnsibleError ( " included task files must contain a list of tasks " )
2016-08-15 20:21:37 +00:00
ti_copy = included_file . _task . copy ( )
temp_vars = ti_copy . vars . copy ( )
temp_vars . update ( included_file . _args )
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = included_file . _task . vars . pop ( ' tags ' , [ ] )
if isinstance ( tags , string_types ) :
tags = tags . split ( ' , ' )
if len ( tags ) > 0 :
if len ( included_file . _task . tags ) > 0 :
raise AnsibleParserError ( " Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement " ,
obj = included_file . _task . _ds )
display . deprecated ( " You should not specify tags in the include parameters. All tags should be specified using the task-level option " )
included_file . _task . tags = tags
ti_copy . vars = temp_vars
2015-10-28 18:00:03 +00:00
block_list = load_list_of_blocks (
data ,
2016-08-01 19:10:02 +00:00
play = iterator . _play ,
2016-02-13 06:02:47 +00:00
parent_block = None ,
2016-08-15 20:21:37 +00:00
task_include = ti_copy ,
2015-10-28 18:00:03 +00:00
role = included_file . _task . _role ,
use_handlers = is_handler ,
2016-03-31 04:44:41 +00:00
loader = self . _loader ,
variable_manager = self . _variable_manager ,
2015-10-28 18:00:03 +00:00
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file . _hosts :
self . _tqm . _stats . increment ( ' ok ' , host . name )
2015-08-27 06:16:11 +00:00
except AnsibleError as e :
2015-10-28 18:00:03 +00:00
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
2015-05-29 04:58:38 +00:00
for host in included_file . _hosts :
2016-01-18 22:42:50 +00:00
tr = TaskResult ( host = host , task = included_file . _task , return_data = dict ( failed = True , reason = to_unicode ( e ) ) )
2015-05-29 04:58:38 +00:00
iterator . mark_host_failed ( host )
self . _tqm . _failed_hosts [ host . name ] = True
self . _tqm . _stats . increment ( ' failures ' , host . name )
self . _tqm . send_callback ( ' v2_runner_on_failed ' , tr )
return [ ]
2015-02-12 18:11:08 +00:00
2015-10-28 18:00:03 +00:00
# finally, send the callback and return the list of blocks loaded
self . _tqm . send_callback ( ' v2_playbook_on_include ' , included_file )
2015-11-16 22:13:55 +00:00
display . debug ( " done processing included file " )
2015-03-25 18:51:40 +00:00
return block_list
2015-02-12 18:11:08 +00:00
2015-07-21 16:12:22 +00:00
def run_handlers ( self , iterator , play_context ) :
2014-11-14 22:14:08 +00:00
'''
Runs handlers on those hosts which have been notified .
'''
2016-08-05 18:44:57 +00:00
result = self . _tqm . RUN_OK
2014-11-14 22:14:08 +00:00
2015-03-25 18:51:40 +00:00
for handler_block in iterator . _play . handlers :
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block . block :
2016-06-12 23:03:18 +00:00
if handler in self . _notified_handlers and len ( self . _notified_handlers [ handler ] ) :
result = self . _do_handler_run ( handler , handler . get_name ( ) , iterator = iterator , play_context = play_context )
2015-09-12 12:45:24 +00:00
if not result :
break
return result
2015-09-18 22:54:48 +00:00
def _do_handler_run ( self , handler , handler_name , iterator , play_context , notified_hosts = None ) :
2015-09-12 12:45:24 +00:00
# FIXME: need to use iterator.get_failed_hosts() instead?
#if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
2016-01-23 14:25:50 +00:00
saved_name = handler . name
handler . name = handler_name
2015-09-12 12:45:24 +00:00
self . _tqm . send_callback ( ' v2_playbook_on_handler_task_start ' , handler )
2016-01-23 14:25:50 +00:00
handler . name = saved_name
2015-09-12 12:45:24 +00:00
if notified_hosts is None :
2016-06-12 23:03:18 +00:00
notified_hosts = self . _notified_handlers [ handler ]
2015-09-12 12:45:24 +00:00
2015-10-30 13:44:35 +00:00
run_once = False
try :
action = action_loader . get ( handler . action , class_only = True )
if handler . run_once or getattr ( action , ' BYPASS_HOST_LOOP ' , False ) :
run_once = True
except KeyError :
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
2015-09-12 12:45:24 +00:00
host_results = [ ]
for host in notified_hosts :
2016-08-05 18:44:57 +00:00
if not handler . has_triggered ( host ) and ( not iterator . is_failed ( host ) or play_context . force_handlers ) :
2015-09-12 12:45:24 +00:00
task_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , host = host , task = handler )
2015-11-04 16:26:06 +00:00
self . add_tqm_variables ( task_vars , play = iterator . _play )
2015-09-12 12:45:24 +00:00
self . _queue_task ( host , handler , task_vars , play_context )
2015-10-30 13:44:35 +00:00
if run_once :
break
2015-09-12 12:45:24 +00:00
# collect the results from the handler run
host_results = self . _wait_on_pending_results ( iterator )
try :
included_files = IncludedFile . process_include_results (
host_results ,
self . _tqm ,
iterator = iterator ,
2015-12-17 00:12:05 +00:00
inventory = self . _inventory ,
2015-09-12 12:45:24 +00:00
loader = self . _loader ,
variable_manager = self . _variable_manager
)
except AnsibleError as e :
return False
result = True
if len ( included_files ) > 0 :
for included_file in included_files :
try :
new_blocks = self . _load_included_file ( included_file , iterator = iterator , is_handler = True )
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks :
iterator . _play . handlers . append ( block )
for task in block . block :
result = self . _do_handler_run (
handler = task ,
2015-12-17 00:12:05 +00:00
handler_name = None ,
2015-09-12 12:45:24 +00:00
iterator = iterator ,
play_context = play_context ,
notified_hosts = included_file . _hosts [ : ] ,
)
if not result :
break
except AnsibleError as e :
for host in included_file . _hosts :
iterator . mark_host_failed ( host )
self . _tqm . _failed_hosts [ host . name ] = True
2015-11-11 18:19:58 +00:00
display . warning ( str ( e ) )
2015-09-12 12:45:24 +00:00
continue
# wipe the notification list
2016-06-12 23:03:18 +00:00
self . _notified_handlers [ handler ] = [ ]
2015-11-11 18:19:58 +00:00
display . debug ( " done running handlers, result is: %s " % result )
2014-11-14 22:14:08 +00:00
return result
2015-07-24 00:47:24 +00:00
def _take_step ( self , task , host = None ) :
ret = False
2016-03-08 15:55:38 +00:00
msg = u ' Perform task: %s ' % task
2015-07-24 00:47:24 +00:00
if host :
2016-03-08 15:55:38 +00:00
msg + = u ' on %s ' % host
msg + = u ' (N)o/(y)es/(c)ontinue: '
2015-11-11 18:19:58 +00:00
resp = display . prompt ( msg )
2015-07-24 00:47:24 +00:00
if resp . lower ( ) in [ ' y ' , ' yes ' ] :
2015-11-11 18:19:58 +00:00
display . debug ( " User ran task " )
2015-07-24 00:47:24 +00:00
ret = True
elif resp . lower ( ) in [ ' c ' , ' continue ' ] :
2015-11-11 18:19:58 +00:00
display . debug ( " User ran task and cancled step mode " )
2015-07-24 00:47:24 +00:00
self . _step = False
ret = True
else :
2015-11-11 18:19:58 +00:00
display . debug ( " User skipped task " )
2015-07-24 00:47:24 +00:00
2015-11-11 18:19:58 +00:00
display . banner ( msg )
2015-07-24 00:47:24 +00:00
return ret
2015-07-18 19:24:44 +00:00
2016-07-08 21:08:38 +00:00
def _execute_meta ( self , task , play_context , iterator , target_host = None ) :
2015-07-18 19:24:44 +00:00
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task . args . get ( ' _raw_params ' )
2016-07-08 21:08:38 +00:00
# FIXME(s):
# * raise an error or show a warning when a conditional is used
# on a meta task that doesn't support them
def _evaluate_conditional ( h ) :
all_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , host = host , task = task )
templar = Templar ( loader = self . _loader , variables = all_vars )
return task . evaluate_conditional ( templar , all_vars )
if target_host :
host_list = [ target_host ]
2015-07-18 19:24:44 +00:00
else :
2016-07-08 21:08:38 +00:00
host_list = [ host for host in self . _inventory . get_hosts ( iterator . _play . hosts ) if host . name not in self . _tqm . _unreachable_hosts ]
results = [ ]
for host in host_list :
result = None
if meta_action == ' noop ' :
# FIXME: issue a callback for the noop here?
result = TaskResult ( host , task , dict ( changed = False , msg = " noop " ) )
elif meta_action == ' flush_handlers ' :
self . run_handlers ( iterator , play_context )
elif meta_action == ' refresh_inventory ' :
self . _inventory . refresh_inventory ( )
result = TaskResult ( host , task , dict ( changed = False , msg = " inventory successfully refreshed " ) )
elif meta_action == ' clear_facts ' :
if _evaluate_conditional ( host ) :
self . _variable_manager . clear_facts ( target_host )
result = TaskResult ( host , task , dict ( changed = True , msg = " inventory successfully refreshed " ) )
else :
result = TaskResult ( host , task , dict ( changed = False , skipped = True ) )
elif meta_action == ' clear_host_errors ' :
if _evaluate_conditional ( host ) :
self . _tqm . _failed_hosts . pop ( host . name , False )
self . _tqm . _unreachable_hosts . pop ( host . name , False )
iterator . _host_states [ host . name ] . fail_state = iterator . FAILED_NONE
result = TaskResult ( host , task , dict ( changed = True , msg = " successfully cleared host errors " ) )
else :
result = TaskResult ( host , task , dict ( changed = False , skipped = True ) )
elif meta_action == ' end_play ' :
if _evaluate_conditional ( host ) :
iterator . _host_states [ host . name ] . run_state = iterator . ITERATING_COMPLETE
result = TaskResult ( host , task , dict ( changed = True , msg = " ending play " ) )
else :
result = TaskResult ( host , task , dict ( changed = False , skipped = True ) )
#elif meta_action == 'reset_connection':
# connection_info.connection.close()
else :
raise AnsibleError ( " invalid meta action requested: %s " % meta_action , obj = task . _ds )
if result is not None :
results . append ( result )
return results