2014-11-14 22:14:08 +00:00
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import ( absolute_import , division , print_function )
__metaclass__ = type
2015-10-16 00:55:23 +00:00
from ansible . compat . six . moves import queue as Queue
2015-10-28 14:13:51 +00:00
from ansible . compat . six import iteritems , text_type , string_types
2015-09-03 06:23:27 +00:00
2015-11-04 16:26:06 +00:00
import json
2014-11-14 22:14:08 +00:00
import time
2015-11-05 21:21:34 +00:00
import zlib
2014-11-14 22:14:08 +00:00
2015-09-22 16:41:06 +00:00
from jinja2 . exceptions import UndefinedError
2015-08-28 16:14:23 +00:00
from ansible import constants as C
2015-12-17 17:43:36 +00:00
from ansible . errors import AnsibleError , AnsibleParserError , AnsibleUndefinedVariable
2015-12-11 19:55:44 +00:00
from ansible . executor . play_iterator import PlayIterator
2015-12-10 23:03:25 +00:00
from ansible . executor . process . worker import WorkerProcess
2015-05-29 04:58:38 +00:00
from ansible . executor . task_result import TaskResult
2015-01-02 13:51:15 +00:00
from ansible . inventory . host import Host
from ansible . inventory . group import Group
2015-03-25 18:51:40 +00:00
from ansible . playbook . helpers import load_list_of_blocks
2015-07-24 20:21:16 +00:00
from ansible . playbook . included_file import IncludedFile
2015-10-14 21:50:23 +00:00
from ansible . plugins import action_loader , connection_loader , filter_loader , lookup_loader , module_loader , test_loader
2015-07-07 19:47:51 +00:00
from ansible . template import Templar
2016-01-18 22:42:50 +00:00
from ansible . utils . unicode import to_unicode
2015-11-11 18:19:58 +00:00
from ansible . vars . unsafe_proxy import wrap_var
2016-01-14 22:55:44 +00:00
from ansible . vars import combine_vars
2014-11-14 22:14:08 +00:00
2015-07-23 14:24:50 +00:00
try :
from __main__ import display
except ImportError :
from ansible . utils . display import Display
display = Display ( )
2014-11-14 22:14:08 +00:00
__all__ = [ ' StrategyBase ' ]
2015-11-11 18:19:58 +00:00
2015-10-22 20:03:37 +00:00
# TODO: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
2015-05-02 04:48:11 +00:00
class SharedPluginLoaderObj :
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__ ( self ) :
2015-08-28 20:32:09 +00:00
self . action_loader = action_loader
self . connection_loader = connection_loader
2015-05-02 04:48:11 +00:00
self . filter_loader = filter_loader
2015-10-14 21:50:23 +00:00
self . test_loader = test_loader
2015-05-02 04:48:11 +00:00
self . lookup_loader = lookup_loader
2015-05-02 06:34:03 +00:00
self . module_loader = module_loader
2014-11-14 22:14:08 +00:00
2015-11-11 18:19:58 +00:00
2014-11-14 22:14:08 +00:00
class StrategyBase :
'''
This is the base class for strategy plugins , which contains some common
code useful to all strategies like running handlers , cleanup actions , etc .
'''
def __init__ ( self , tqm ) :
self . _tqm = tqm
self . _inventory = tqm . get_inventory ( )
self . _workers = tqm . get_workers ( )
self . _notified_handlers = tqm . get_notified_handlers ( )
self . _variable_manager = tqm . get_variable_manager ( )
self . _loader = tqm . get_loader ( )
self . _final_q = tqm . _final_q
2015-07-24 14:03:43 +00:00
self . _step = getattr ( tqm . _options , ' step ' , False )
2015-07-26 16:21:38 +00:00
self . _diff = getattr ( tqm . _options , ' diff ' , False )
2015-11-11 18:19:58 +00:00
# Backwards compat: self._display isn't really needed, just import the global display and use that.
2015-07-23 14:24:50 +00:00
self . _display = display
2014-11-14 22:14:08 +00:00
# internal counters
self . _pending_results = 0
self . _cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self . _blocked_hosts = dict ( )
2015-07-21 16:12:22 +00:00
def run ( self , iterator , play_context , result = True ) :
2015-06-01 21:41:52 +00:00
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = self . _tqm . _failed_hosts . keys ( )
unreachable_hosts = self . _tqm . _unreachable_hosts . keys ( )
2015-01-12 22:04:56 +00:00
2015-11-11 18:19:58 +00:00
display . debug ( " running handlers " )
2015-07-21 16:12:22 +00:00
result & = self . run_handlers ( iterator , play_context )
2015-01-12 22:04:56 +00:00
2015-06-01 21:41:52 +00:00
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set ( failed_hosts ) . union ( self . _tqm . _failed_hosts . keys ( ) )
unreachable_hosts = set ( unreachable_hosts ) . union ( self . _tqm . _unreachable_hosts . keys ( ) )
2015-11-28 18:37:02 +00:00
# return the appropriate code, depending on the status hosts after the run
2015-06-01 21:41:52 +00:00
if len ( unreachable_hosts ) > 0 :
return 3
elif len ( failed_hosts ) > 0 :
return 2
elif not result :
return 1
2015-01-12 22:04:56 +00:00
else :
return 0
2014-11-14 22:14:08 +00:00
def get_hosts_remaining ( self , play ) :
2015-11-11 18:19:58 +00:00
return [ host for host in self . _inventory . get_hosts ( play . hosts )
if host . name not in self . _tqm . _failed_hosts and host . name not in self . _tqm . _unreachable_hosts ]
2014-11-14 22:14:08 +00:00
2015-02-09 22:54:44 +00:00
def get_failed_hosts ( self , play ) :
return [ host for host in self . _inventory . get_hosts ( play . hosts ) if host . name in self . _tqm . _failed_hosts ]
2014-11-14 22:14:08 +00:00
2015-06-23 01:03:55 +00:00
def add_tqm_variables ( self , vars , play ) :
'''
Base class method to add extra variables / information to the list of task
vars sent through the executor engine regarding the task queue manager state .
'''
2015-11-04 16:26:06 +00:00
vars [ ' ansible_current_hosts ' ] = [ h . name for h in self . get_hosts_remaining ( play ) ]
vars [ ' ansible_failed_hosts ' ] = [ h . name for h in self . get_failed_hosts ( play ) ]
2015-06-23 01:03:55 +00:00
2015-07-21 16:12:22 +00:00
def _queue_task ( self , host , task , task_vars , play_context ) :
2014-11-14 22:14:08 +00:00
''' handles queueing the task up to be sent to a worker '''
2015-11-11 18:19:58 +00:00
display . debug ( " entering _queue_task() for %s / %s " % ( host , task ) )
2014-11-14 22:14:08 +00:00
2015-12-10 23:03:25 +00:00
task_vars [ ' hostvars ' ] = self . _tqm . hostvars
2014-11-14 22:14:08 +00:00
# and then queue the new task
2015-11-11 18:19:58 +00:00
display . debug ( " %s - putting task ( %s ) in queue " % ( host , task ) )
2014-11-14 22:14:08 +00:00
try :
2015-11-11 18:19:58 +00:00
display . debug ( " worker is %d (out of %d available) " % ( self . _cur_worker + 1 , len ( self . _workers ) ) )
2014-11-14 22:14:08 +00:00
2015-05-02 04:48:11 +00:00
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj ( )
2015-12-14 08:06:52 +00:00
queued = False
2015-12-10 23:03:25 +00:00
while True :
( worker_prc , main_q , rslt_q ) = self . _workers [ self . _cur_worker ]
if worker_prc is None or not worker_prc . is_alive ( ) :
worker_prc = WorkerProcess ( rslt_q , task_vars , host , task , play_context , self . _loader , self . _variable_manager , shared_loader_obj )
self . _workers [ self . _cur_worker ] [ 0 ] = worker_prc
worker_prc . start ( )
2015-12-14 08:06:52 +00:00
queued = True
2015-12-10 23:03:25 +00:00
self . _cur_worker + = 1
if self . _cur_worker > = len ( self . _workers ) :
self . _cur_worker = 0
time . sleep ( 0.0001 )
2015-12-14 08:06:52 +00:00
if queued :
break
2015-12-10 23:03:25 +00:00
del task_vars
2015-05-19 00:26:59 +00:00
self . _pending_results + = 1
2015-04-13 16:35:20 +00:00
except ( EOFError , IOError , AssertionError ) as e :
2014-11-14 22:14:08 +00:00
# most likely an abort
2015-11-11 18:19:58 +00:00
display . debug ( " got an error while queuing: %s " % e )
2014-11-14 22:14:08 +00:00
return
2015-11-11 18:19:58 +00:00
display . debug ( " exiting _queue_task() for %s / %s " % ( host , task ) )
2014-11-14 22:14:08 +00:00
2015-12-10 23:03:25 +00:00
def _process_pending_results ( self , iterator , one_pass = False ) :
2014-11-14 22:14:08 +00:00
'''
Reads results off the final queue and takes appropriate action
based on the result ( executing callbacks , updating state , etc . ) .
'''
2015-03-03 20:59:23 +00:00
ret_results = [ ]
2014-11-14 22:14:08 +00:00
while not self . _final_q . empty ( ) and not self . _tqm . _terminated :
try :
2015-11-16 21:12:57 +00:00
result = self . _final_q . get ( )
2015-11-11 18:19:58 +00:00
display . debug ( " got result from result worker: %s " % ( [ text_type ( x ) for x in result ] , ) )
2014-11-14 22:14:08 +00:00
2015-12-16 06:48:22 +00:00
# helper method, used to find the original host from the one
# returned in the result/message, which has been serialized and
# thus had some information stripped from it to speed up the
# serialization process
def get_original_host ( host ) :
if host . name in self . _inventory . _hosts_cache :
return self . _inventory . _hosts_cache [ host . name ]
else :
return self . _inventory . get_host ( host . name )
2014-11-14 22:14:08 +00:00
# all host status messages contain 2 entries: (msg, task_result)
if result [ 0 ] in ( ' host_task_ok ' , ' host_task_failed ' , ' host_task_skipped ' , ' host_unreachable ' ) :
task_result = result [ 1 ]
2015-12-16 06:48:22 +00:00
host = get_original_host ( task_result . _host )
2014-11-14 22:14:08 +00:00
task = task_result . _task
2015-06-05 11:15:35 +00:00
if result [ 0 ] == ' host_task_failed ' or task_result . is_failed ( ) :
2015-01-12 22:04:56 +00:00
if not task . ignore_errors :
2015-11-11 18:19:58 +00:00
display . debug ( " marking %s as failed " % host . name )
2015-09-29 18:53:50 +00:00
if task . run_once :
# if we're using run_once, we have to fail every host here
[ iterator . mark_host_failed ( h ) for h in self . _inventory . get_hosts ( iterator . _play . hosts ) if h . name not in self . _tqm . _unreachable_hosts ]
else :
iterator . mark_host_failed ( host )
2016-02-11 02:27:14 +00:00
# only add the host to the failed list officially if it has
# been failed by the iterator
if iterator . is_failed ( host ) :
2015-12-11 19:55:44 +00:00
self . _tqm . _failed_hosts [ host . name ] = True
self . _tqm . _stats . increment ( ' failures ' , host . name )
2015-06-17 19:38:52 +00:00
else :
self . _tqm . _stats . increment ( ' ok ' , host . name )
2015-07-11 18:53:23 +00:00
self . _tqm . send_callback ( ' v2_runner_on_failed ' , task_result , ignore_errors = task . ignore_errors )
2014-11-14 22:14:08 +00:00
elif result [ 0 ] == ' host_unreachable ' :
2015-03-25 18:51:40 +00:00
self . _tqm . _unreachable_hosts [ host . name ] = True
self . _tqm . _stats . increment ( ' dark ' , host . name )
self . _tqm . send_callback ( ' v2_runner_on_unreachable ' , task_result )
2014-11-14 22:14:08 +00:00
elif result [ 0 ] == ' host_task_skipped ' :
2015-03-25 18:51:40 +00:00
self . _tqm . _stats . increment ( ' skipped ' , host . name )
self . _tqm . send_callback ( ' v2_runner_on_skipped ' , task_result )
2014-11-14 22:14:08 +00:00
elif result [ 0 ] == ' host_task_ok ' :
2015-10-28 18:00:03 +00:00
if task . action != ' include ' :
self . _tqm . _stats . increment ( ' ok ' , host . name )
if ' changed ' in task_result . _result and task_result . _result [ ' changed ' ] :
self . _tqm . _stats . increment ( ' changed ' , host . name )
self . _tqm . send_callback ( ' v2_runner_on_ok ' , task_result )
2014-11-14 22:14:08 +00:00
2015-12-09 19:51:43 +00:00
if self . _diff :
2015-07-26 16:21:38 +00:00
self . _tqm . send_callback ( ' v2_on_file_diff ' , task_result )
2014-11-14 22:14:08 +00:00
self . _pending_results - = 1
if host . name in self . _blocked_hosts :
del self . _blocked_hosts [ host . name ]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if task_result . _task . _role is not None and result [ 0 ] in ( ' host_task_ok ' , ' host_task_failed ' ) :
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
2015-09-03 06:23:27 +00:00
for ( entry , role_obj ) in iteritems ( iterator . _play . ROLE_CACHE [ task_result . _task . _role . _role_name ] ) :
2015-08-11 20:34:58 +00:00
if role_obj . _uuid == task_result . _task . _role . _uuid :
role_obj . _had_task_run [ host . name ] = True
2014-11-14 22:14:08 +00:00
2015-03-03 20:59:23 +00:00
ret_results . append ( task_result )
2015-01-02 13:51:15 +00:00
elif result [ 0 ] == ' add_host ' :
2015-10-19 18:53:52 +00:00
result_item = result [ 1 ]
new_host_info = result_item . get ( ' add_host ' , dict ( ) )
2015-07-26 16:21:38 +00:00
2015-10-27 18:12:17 +00:00
self . _add_host ( new_host_info , iterator )
2015-01-02 13:51:15 +00:00
elif result [ 0 ] == ' add_group ' :
2015-12-16 06:48:22 +00:00
host = get_original_host ( result [ 1 ] )
2015-10-19 18:53:52 +00:00
result_item = result [ 2 ]
self . _add_group ( host , result_item )
2015-01-02 13:51:15 +00:00
2014-11-14 22:14:08 +00:00
elif result [ 0 ] == ' notify_handler ' :
2015-07-20 18:36:31 +00:00
task_result = result [ 1 ]
2015-01-15 07:13:45 +00:00
handler_name = result [ 2 ]
2015-07-20 18:36:31 +00:00
2015-12-16 06:48:22 +00:00
original_host = get_original_host ( task_result . _host )
original_task = iterator . get_original_task ( original_host , task_result . _task )
2015-01-26 17:29:59 +00:00
if handler_name not in self . _notified_handlers :
self . _notified_handlers [ handler_name ] = [ ]
2015-12-16 06:48:22 +00:00
if original_host not in self . _notified_handlers [ handler_name ] :
self . _notified_handlers [ handler_name ] . append ( original_host )
2015-11-11 18:19:58 +00:00
display . vv ( " NOTIFIED HANDLER %s " % ( handler_name , ) )
2014-11-14 22:14:08 +00:00
2015-07-08 16:38:24 +00:00
elif result [ 0 ] == ' register_host_var ' :
# essentially the same as 'set_host_var' below, however we
2015-09-09 16:21:07 +00:00
# never follow the delegate_to value for registered vars and
# the variable goes in the fact_cache
2015-12-16 06:48:22 +00:00
host = get_original_host ( result [ 1 ] )
2015-11-30 16:27:05 +00:00
task = result [ 2 ]
2015-09-25 18:54:20 +00:00
var_value = wrap_var ( result [ 3 ] )
2015-11-30 16:27:05 +00:00
var_name = task . register
2015-09-10 17:59:39 +00:00
2015-11-30 16:27:05 +00:00
if task . run_once :
host_list = [ host for host in self . _inventory . get_hosts ( iterator . _play . hosts ) if host . name not in self . _tqm . _unreachable_hosts ]
else :
host_list = [ host ]
for target_host in host_list :
self . _variable_manager . set_nonpersistent_facts ( target_host , { var_name : var_value } )
2015-07-08 16:38:24 +00:00
2015-07-07 19:47:51 +00:00
elif result [ 0 ] in ( ' set_host_var ' , ' set_host_facts ' ) :
2015-12-16 06:48:22 +00:00
host = get_original_host ( result [ 1 ] )
2015-07-07 19:47:51 +00:00
task = result [ 2 ]
item = result [ 3 ]
2015-11-30 16:27:05 +00:00
# find the host we're actually refering too here, which may
# be a host that is not really in inventory at all
2015-12-08 19:00:17 +00:00
if task . delegate_to is not None and task . delegate_facts :
2015-07-07 19:47:51 +00:00
task_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , host = host , task = task )
2015-11-05 21:30:03 +00:00
self . add_tqm_variables ( task_vars , play = iterator . _play )
2015-07-07 19:47:51 +00:00
if item is not None :
task_vars [ ' item ' ] = item
templar = Templar ( loader = self . _loader , variables = task_vars )
host_name = templar . template ( task . delegate_to )
2015-11-30 16:27:05 +00:00
actual_host = self . _inventory . get_host ( host_name )
if actual_host is None :
actual_host = Host ( name = host_name )
2015-07-07 19:47:51 +00:00
else :
2015-11-30 16:27:05 +00:00
actual_host = host
2015-07-07 19:47:51 +00:00
2016-01-18 19:50:20 +00:00
if task . run_once :
host_list = [ host for host in self . _inventory . get_hosts ( iterator . _play . hosts ) if host . name not in self . _tqm . _unreachable_hosts ]
else :
host_list = [ actual_host ]
2015-07-07 19:47:51 +00:00
if result [ 0 ] == ' set_host_var ' :
var_name = result [ 4 ]
2015-09-26 19:16:14 +00:00
var_value = result [ 5 ]
2015-11-30 16:27:05 +00:00
for target_host in host_list :
self . _variable_manager . set_host_variable ( target_host , var_name , var_value )
2015-07-07 19:47:51 +00:00
elif result [ 0 ] == ' set_host_facts ' :
facts = result [ 4 ]
2016-01-18 19:50:20 +00:00
for target_host in host_list :
if task . action == ' set_fact ' :
self . _variable_manager . set_nonpersistent_facts ( target_host , facts )
else :
self . _variable_manager . set_host_facts ( target_host , facts )
2014-11-14 22:14:08 +00:00
else :
raise AnsibleError ( " unknown result message received: %s " % result [ 0 ] )
2015-12-10 23:03:25 +00:00
2014-11-14 22:14:08 +00:00
except Queue . Empty :
2015-11-16 21:12:57 +00:00
time . sleep ( 0.0001 )
2014-11-14 22:14:08 +00:00
2015-12-10 23:03:25 +00:00
if one_pass :
break
2015-03-03 20:59:23 +00:00
return ret_results
2015-02-12 18:11:08 +00:00
def _wait_on_pending_results ( self , iterator ) :
2014-11-14 22:14:08 +00:00
'''
Wait for the shared counter to drop to zero , using a short sleep
between checks to ensure we don ' t spin lock
'''
2015-03-03 20:59:23 +00:00
ret_results = [ ]
2015-11-11 18:19:58 +00:00
display . debug ( " waiting for pending results... " )
2014-11-14 22:14:08 +00:00
while self . _pending_results > 0 and not self . _tqm . _terminated :
2015-03-03 20:59:23 +00:00
results = self . _process_pending_results ( iterator )
ret_results . extend ( results )
2015-11-16 21:12:57 +00:00
time . sleep ( 0.0001 )
2015-11-11 18:19:58 +00:00
display . debug ( " no more pending results, returning what we have " )
2014-11-14 22:14:08 +00:00
2015-03-03 20:59:23 +00:00
return ret_results
2015-10-27 18:12:17 +00:00
def _add_host ( self , host_info , iterator ) :
2015-01-02 13:51:15 +00:00
'''
Helper function to add a new host to inventory based on a task result .
'''
host_name = host_info . get ( ' host_name ' )
# Check if host in cache, add if not
if host_name in self . _inventory . _hosts_cache :
new_host = self . _inventory . _hosts_cache [ host_name ]
else :
2015-07-07 19:47:51 +00:00
new_host = Host ( name = host_name )
2015-01-02 13:51:15 +00:00
self . _inventory . _hosts_cache [ host_name ] = new_host
allgroup = self . _inventory . get_group ( ' all ' )
allgroup . add_host ( new_host )
# Set/update the vars for this host
2016-01-14 22:55:44 +00:00
new_host . vars = combine_vars ( new_host . vars , self . _inventory . get_host_vars ( new_host ) )
new_host . vars = combine_vars ( new_host . vars , host_info . get ( ' host_vars ' , dict ( ) ) )
2015-01-02 13:51:15 +00:00
new_groups = host_info . get ( ' groups ' , [ ] )
for group_name in new_groups :
if not self . _inventory . get_group ( group_name ) :
new_group = Group ( group_name )
self . _inventory . add_group ( new_group )
new_group . vars = self . _inventory . get_group_variables ( group_name )
else :
new_group = self . _inventory . get_group ( group_name )
new_group . add_host ( new_host )
# add this host to the group cache
2015-09-17 22:58:10 +00:00
if self . _inventory . groups is not None :
if group_name in self . _inventory . groups :
if new_host not in self . _inventory . get_group ( group_name ) . hosts :
self . _inventory . get_group ( group_name ) . hosts . append ( new_host . name )
2015-01-02 13:51:15 +00:00
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
self . _inventory . clear_pattern_cache ( )
2015-10-27 18:12:17 +00:00
# also clear the hostvar cache entry for the given play, so that
# the new hosts are available if hostvars are referenced
self . _variable_manager . invalidate_hostvars_cache ( play = iterator . _play )
2015-10-19 18:53:52 +00:00
def _add_group ( self , host , result_item ) :
2015-01-02 13:51:15 +00:00
'''
Helper function to add a group ( if it does not exist ) , and to assign the
specified host to that group .
'''
2015-10-19 18:53:52 +00:00
changed = False
2015-01-02 13:51:15 +00:00
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
2015-10-19 18:53:52 +00:00
real_host = self . _inventory . get_host ( host . name )
2015-07-10 05:53:59 +00:00
2015-10-19 18:53:52 +00:00
group_name = result_item . get ( ' add_group ' )
new_group = self . _inventory . get_group ( group_name )
if not new_group :
# create the new group and add it to inventory
new_group = Group ( name = group_name )
self . _inventory . add_group ( new_group )
new_group . vars = self . _inventory . get_group_vars ( new_group )
# and add the group to the proper hierarchy
allgroup = self . _inventory . get_group ( ' all ' )
allgroup . add_child_group ( new_group )
changed = True
if group_name not in host . get_groups ( ) :
new_group . add_host ( real_host )
changed = True
2015-01-02 13:51:15 +00:00
2015-07-10 05:53:59 +00:00
return changed
2015-01-02 13:51:15 +00:00
2015-07-24 20:21:16 +00:00
def _load_included_file ( self , included_file , iterator , is_handler = False ) :
2015-02-12 18:11:08 +00:00
'''
Loads an included YAML file of tasks , applying the optional set of variables .
'''
2015-11-16 22:13:55 +00:00
display . debug ( " loading included file: %s " % included_file . _filename )
2015-05-29 04:58:38 +00:00
try :
data = self . _loader . load_from_file ( included_file . _filename )
2015-07-14 12:25:48 +00:00
if data is None :
return [ ]
2015-10-28 18:00:03 +00:00
elif not isinstance ( data , list ) :
raise AnsibleError ( " included task files must contain a list of tasks " )
block_list = load_list_of_blocks (
data ,
play = included_file . _task . _block . _play ,
2016-02-13 06:02:47 +00:00
parent_block = None ,
2015-10-28 18:00:03 +00:00
task_include = included_file . _task ,
role = included_file . _task . _role ,
use_handlers = is_handler ,
loader = self . _loader
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file . _hosts :
self . _tqm . _stats . increment ( ' ok ' , host . name )
2015-08-27 06:16:11 +00:00
except AnsibleError as e :
2015-10-28 18:00:03 +00:00
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
2015-05-29 04:58:38 +00:00
for host in included_file . _hosts :
2016-01-18 22:42:50 +00:00
tr = TaskResult ( host = host , task = included_file . _task , return_data = dict ( failed = True , reason = to_unicode ( e ) ) )
2015-05-29 04:58:38 +00:00
iterator . mark_host_failed ( host )
self . _tqm . _failed_hosts [ host . name ] = True
self . _tqm . _stats . increment ( ' failures ' , host . name )
self . _tqm . send_callback ( ' v2_runner_on_failed ' , tr )
return [ ]
2015-02-26 15:51:12 +00:00
# set the vars for this task from those specified as params to the include
2015-03-25 18:51:40 +00:00
for b in block_list :
2015-09-01 16:31:35 +00:00
# first make a copy of the including task, so that each has a unique copy to modify
2016-02-13 06:02:47 +00:00
b . _task_include = b . _task_include . copy ( )
2015-09-01 16:31:35 +00:00
# then we create a temporary set of vars to ensure the variable reference is unique
2015-08-12 14:12:05 +00:00
temp_vars = b . _task_include . vars . copy ( )
2015-08-11 05:28:42 +00:00
temp_vars . update ( included_file . _args . copy ( ) )
2015-10-28 14:13:51 +00:00
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = temp_vars . pop ( ' tags ' , [ ] )
if isinstance ( tags , string_types ) :
2016-02-06 20:19:03 +00:00
tags = tags . split ( ' , ' )
2015-10-28 14:13:51 +00:00
if len ( tags ) > 0 :
if len ( b . _task_include . tags ) > 0 :
2016-01-20 20:11:44 +00:00
raise AnsibleParserError ( " Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement " ,
2015-11-11 18:19:58 +00:00
obj = included_file . _task . _ds )
display . deprecated ( " You should not specify tags in the include parameters. All tags should be specified using the task-level option " )
2015-10-28 14:13:51 +00:00
b . _task_include . tags = tags
2015-08-12 14:12:05 +00:00
b . _task_include . vars = temp_vars
2015-02-12 18:11:08 +00:00
2015-10-28 18:00:03 +00:00
# finally, send the callback and return the list of blocks loaded
self . _tqm . send_callback ( ' v2_playbook_on_include ' , included_file )
2015-11-16 22:13:55 +00:00
display . debug ( " done processing included file " )
2015-03-25 18:51:40 +00:00
return block_list
2015-02-12 18:11:08 +00:00
2015-07-21 16:12:22 +00:00
def run_handlers ( self , iterator , play_context ) :
2014-11-14 22:14:08 +00:00
'''
Runs handlers on those hosts which have been notified .
'''
result = True
2015-03-25 18:51:40 +00:00
for handler_block in iterator . _play . handlers :
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block . block :
2015-09-18 22:54:48 +00:00
handler_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , task = handler )
templar = Templar ( loader = self . _loader , variables = handler_vars )
2015-09-22 16:41:06 +00:00
try :
2015-09-29 16:29:02 +00:00
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
handler_name = templar . template ( handler . name )
if handler_name not in self . _notified_handlers :
handler_name = templar . template ( handler . get_name ( ) )
2015-09-22 16:41:06 +00:00
except ( UndefinedError , AnsibleUndefinedVariable ) :
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
2015-09-29 16:29:02 +00:00
if handler_name in self . _notified_handlers and len ( self . _notified_handlers [ handler_name ] ) :
2015-09-18 22:54:48 +00:00
result = self . _do_handler_run ( handler , handler_name , iterator = iterator , play_context = play_context )
2015-09-12 12:45:24 +00:00
if not result :
break
return result
2015-09-18 22:54:48 +00:00
def _do_handler_run ( self , handler , handler_name , iterator , play_context , notified_hosts = None ) :
2015-09-12 12:45:24 +00:00
# FIXME: need to use iterator.get_failed_hosts() instead?
#if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
2016-01-23 14:25:50 +00:00
saved_name = handler . name
handler . name = handler_name
2015-09-12 12:45:24 +00:00
self . _tqm . send_callback ( ' v2_playbook_on_handler_task_start ' , handler )
2016-01-23 14:25:50 +00:00
handler . name = saved_name
2015-09-12 12:45:24 +00:00
if notified_hosts is None :
2015-09-18 22:54:48 +00:00
notified_hosts = self . _notified_handlers [ handler_name ]
2015-09-12 12:45:24 +00:00
2015-10-30 13:44:35 +00:00
run_once = False
try :
action = action_loader . get ( handler . action , class_only = True )
if handler . run_once or getattr ( action , ' BYPASS_HOST_LOOP ' , False ) :
run_once = True
except KeyError :
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
2015-09-12 12:45:24 +00:00
host_results = [ ]
for host in notified_hosts :
if not handler . has_triggered ( host ) and ( host . name not in self . _tqm . _failed_hosts or play_context . force_handlers ) :
task_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , host = host , task = handler )
2015-11-04 16:26:06 +00:00
self . add_tqm_variables ( task_vars , play = iterator . _play )
2015-09-12 12:45:24 +00:00
self . _queue_task ( host , handler , task_vars , play_context )
2015-10-30 13:44:35 +00:00
if run_once :
break
2015-09-12 12:45:24 +00:00
# collect the results from the handler run
host_results = self . _wait_on_pending_results ( iterator )
try :
included_files = IncludedFile . process_include_results (
host_results ,
self . _tqm ,
iterator = iterator ,
2015-12-17 00:12:05 +00:00
inventory = self . _inventory ,
2015-09-12 12:45:24 +00:00
loader = self . _loader ,
variable_manager = self . _variable_manager
)
except AnsibleError as e :
return False
result = True
if len ( included_files ) > 0 :
for included_file in included_files :
try :
new_blocks = self . _load_included_file ( included_file , iterator = iterator , is_handler = True )
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks :
iterator . _play . handlers . append ( block )
for task in block . block :
result = self . _do_handler_run (
handler = task ,
2015-12-17 00:12:05 +00:00
handler_name = None ,
2015-09-12 12:45:24 +00:00
iterator = iterator ,
play_context = play_context ,
notified_hosts = included_file . _hosts [ : ] ,
)
if not result :
break
except AnsibleError as e :
for host in included_file . _hosts :
iterator . mark_host_failed ( host )
self . _tqm . _failed_hosts [ host . name ] = True
2015-11-11 18:19:58 +00:00
display . warning ( str ( e ) )
2015-09-12 12:45:24 +00:00
continue
# wipe the notification list
2015-09-18 22:54:48 +00:00
self . _notified_handlers [ handler_name ] = [ ]
2015-11-11 18:19:58 +00:00
display . debug ( " done running handlers, result is: %s " % result )
2014-11-14 22:14:08 +00:00
return result
2015-07-24 00:47:24 +00:00
def _take_step ( self , task , host = None ) :
ret = False
if host :
msg = u ' Perform task: %s on %s (y/n/c): ' % ( task , host )
else :
msg = u ' Perform task: %s (y/n/c): ' % task
2015-11-11 18:19:58 +00:00
resp = display . prompt ( msg )
2015-07-24 00:47:24 +00:00
if resp . lower ( ) in [ ' y ' , ' yes ' ] :
2015-11-11 18:19:58 +00:00
display . debug ( " User ran task " )
2015-07-24 00:47:24 +00:00
ret = True
elif resp . lower ( ) in [ ' c ' , ' continue ' ] :
2015-11-11 18:19:58 +00:00
display . debug ( " User ran task and cancled step mode " )
2015-07-24 00:47:24 +00:00
self . _step = False
ret = True
else :
2015-11-11 18:19:58 +00:00
display . debug ( " User skipped task " )
2015-07-24 00:47:24 +00:00
2015-11-11 18:19:58 +00:00
display . banner ( msg )
2015-07-24 00:47:24 +00:00
return ret
2015-07-18 19:24:44 +00:00
def _execute_meta ( self , task , play_context , iterator ) :
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task . args . get ( ' _raw_params ' )
if meta_action == ' noop ' :
# FIXME: issue a callback for the noop here?
pass
elif meta_action == ' flush_handlers ' :
self . run_handlers ( iterator , play_context )
elif meta_action == ' refresh_inventory ' :
self . _inventory . refresh_inventory ( )
#elif meta_action == 'reset_connection':
# connection_info.connection.close()
2016-02-03 19:52:29 +00:00
elif meta_action == ' clear_host_errors ' :
self . _tqm . _failed_hosts = dict ( )
self . _tqm . _unreachable_hosts = dict ( )
for host in iterator . _host_states :
iterator . _host_states [ host ] . fail_state = iterator . FAILED_NONE
2015-07-18 19:24:44 +00:00
else :
raise AnsibleError ( " invalid meta action requested: %s " % meta_action , obj = task . _ds )