2014-11-14 22:14:08 +00:00
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import ( absolute_import , division , print_function )
__metaclass__ = type
2015-10-16 00:55:23 +00:00
from ansible . compat . six . moves import queue as Queue
2015-10-28 14:13:51 +00:00
from ansible . compat . six import iteritems , text_type , string_types
2015-09-03 06:23:27 +00:00
2015-11-04 16:26:06 +00:00
import json
import pickle
import sys
2014-11-14 22:14:08 +00:00
import time
2015-11-05 21:21:34 +00:00
import zlib
2014-11-14 22:14:08 +00:00
2015-09-22 16:41:06 +00:00
from jinja2 . exceptions import UndefinedError
2015-08-28 16:14:23 +00:00
from ansible import constants as C
2015-09-22 16:41:06 +00:00
from ansible . errors import AnsibleError , AnsibleParserError , AnsibleUndefinedVariable
2015-05-29 04:58:38 +00:00
from ansible . executor . task_result import TaskResult
2015-01-02 13:51:15 +00:00
from ansible . inventory . host import Host
from ansible . inventory . group import Group
2015-02-12 18:11:08 +00:00
from ansible . playbook . handler import Handler
2015-03-25 18:51:40 +00:00
from ansible . playbook . helpers import load_list_of_blocks
2015-07-24 20:21:16 +00:00
from ansible . playbook . included_file import IncludedFile
2015-07-10 06:43:53 +00:00
from ansible . playbook . role import hash_params
2015-10-14 21:50:23 +00:00
from ansible . plugins import action_loader , connection_loader , filter_loader , lookup_loader , module_loader , test_loader
2015-07-07 19:47:51 +00:00
from ansible . template import Templar
2015-11-04 16:26:06 +00:00
from ansible . vars . unsafe_proxy import wrap_var , AnsibleJSONUnsafeEncoder
2014-11-14 22:14:08 +00:00
2015-07-23 14:24:50 +00:00
try :
from __main__ import display
except ImportError :
from ansible . utils . display import Display
display = Display ( )
2014-11-14 22:14:08 +00:00
__all__ = [ ' StrategyBase ' ]
2015-10-22 20:03:37 +00:00
# TODO: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
2015-05-02 04:48:11 +00:00
class SharedPluginLoaderObj :
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__ ( self ) :
2015-08-28 20:32:09 +00:00
self . action_loader = action_loader
self . connection_loader = connection_loader
2015-05-02 04:48:11 +00:00
self . filter_loader = filter_loader
2015-10-14 21:50:23 +00:00
self . test_loader = test_loader
2015-05-02 04:48:11 +00:00
self . lookup_loader = lookup_loader
2015-05-02 06:34:03 +00:00
self . module_loader = module_loader
2014-11-14 22:14:08 +00:00
class StrategyBase :
'''
This is the base class for strategy plugins , which contains some common
code useful to all strategies like running handlers , cleanup actions , etc .
'''
def __init__ ( self , tqm ) :
self . _tqm = tqm
self . _inventory = tqm . get_inventory ( )
self . _workers = tqm . get_workers ( )
self . _notified_handlers = tqm . get_notified_handlers ( )
self . _variable_manager = tqm . get_variable_manager ( )
self . _loader = tqm . get_loader ( )
self . _final_q = tqm . _final_q
2015-07-24 14:03:43 +00:00
self . _step = getattr ( tqm . _options , ' step ' , False )
2015-07-26 16:21:38 +00:00
self . _diff = getattr ( tqm . _options , ' diff ' , False )
2015-07-23 14:24:50 +00:00
self . _display = display
2014-11-14 22:14:08 +00:00
# internal counters
self . _pending_results = 0
self . _cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self . _blocked_hosts = dict ( )
2015-07-21 16:12:22 +00:00
def run ( self , iterator , play_context , result = True ) :
2015-06-01 21:41:52 +00:00
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = self . _tqm . _failed_hosts . keys ( )
unreachable_hosts = self . _tqm . _unreachable_hosts . keys ( )
2015-01-12 22:04:56 +00:00
2015-07-23 14:24:50 +00:00
self . _display . debug ( " running handlers " )
2015-07-21 16:12:22 +00:00
result & = self . run_handlers ( iterator , play_context )
2015-01-12 22:04:56 +00:00
2015-06-01 21:41:52 +00:00
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set ( failed_hosts ) . union ( self . _tqm . _failed_hosts . keys ( ) )
unreachable_hosts = set ( unreachable_hosts ) . union ( self . _tqm . _unreachable_hosts . keys ( ) )
2015-03-25 18:51:40 +00:00
# send the stats callback
self . _tqm . send_callback ( ' v2_playbook_on_stats ' , self . _tqm . _stats )
2015-06-01 21:41:52 +00:00
if len ( unreachable_hosts ) > 0 :
return 3
elif len ( failed_hosts ) > 0 :
return 2
elif not result :
return 1
2015-01-12 22:04:56 +00:00
else :
return 0
2014-11-14 22:14:08 +00:00
def get_hosts_remaining ( self , play ) :
2015-03-25 18:51:40 +00:00
return [ host for host in self . _inventory . get_hosts ( play . hosts ) if host . name not in self . _tqm . _failed_hosts and host . name not in self . _tqm . _unreachable_hosts ]
2014-11-14 22:14:08 +00:00
2015-02-09 22:54:44 +00:00
def get_failed_hosts ( self , play ) :
return [ host for host in self . _inventory . get_hosts ( play . hosts ) if host . name in self . _tqm . _failed_hosts ]
2014-11-14 22:14:08 +00:00
2015-06-23 01:03:55 +00:00
def add_tqm_variables ( self , vars , play ) :
'''
Base class method to add extra variables / information to the list of task
vars sent through the executor engine regarding the task queue manager state .
'''
2015-11-04 16:26:06 +00:00
vars [ ' ansible_current_hosts ' ] = [ h . name for h in self . get_hosts_remaining ( play ) ]
vars [ ' ansible_failed_hosts ' ] = [ h . name for h in self . get_failed_hosts ( play ) ]
2015-06-23 01:03:55 +00:00
2015-07-21 16:12:22 +00:00
def _queue_task ( self , host , task , task_vars , play_context ) :
2014-11-14 22:14:08 +00:00
''' handles queueing the task up to be sent to a worker '''
2015-07-23 14:24:50 +00:00
self . _display . debug ( " entering _queue_task() for %s / %s " % ( host , task ) )
2014-11-14 22:14:08 +00:00
# and then queue the new task
2015-07-23 14:24:50 +00:00
self . _display . debug ( " %s - putting task ( %s ) in queue " % ( host , task ) )
2014-11-14 22:14:08 +00:00
try :
2015-07-23 14:24:50 +00:00
self . _display . debug ( " worker is %d (out of %d available) " % ( self . _cur_worker + 1 , len ( self . _workers ) ) )
2014-11-14 22:14:08 +00:00
( worker_prc , main_q , rslt_q ) = self . _workers [ self . _cur_worker ]
self . _cur_worker + = 1
if self . _cur_worker > = len ( self . _workers ) :
self . _cur_worker = 0
2015-05-02 04:48:11 +00:00
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj ( )
2015-11-05 21:21:34 +00:00
# compress (and convert) the data if so configured, which can
# help a lot when the variable dictionary is huge. We pop the
# hostvars out of the task variables right now, due to the fact
# that they're not JSON serializable
compressed_vars = False
hostvars = task_vars . pop ( ' hostvars ' , None )
if C . DEFAULT_VAR_COMPRESSION_LEVEL > 0 :
zip_vars = zlib . compress ( json . dumps ( task_vars ) , C . DEFAULT_VAR_COMPRESSION_LEVEL )
compressed_vars = True
# we're done with the original dict now, so delete it to
# try and reclaim some memory space, which is helpful if the
# data contained in the dict is very large
del task_vars
else :
zip_vars = task_vars
# and queue the task
main_q . put ( ( host , task , self . _loader . get_basedir ( ) , zip_vars , hostvars , compressed_vars , play_context , shared_loader_obj ) , block = False )
# nuke the hostvars object too, as its no longer needed
del hostvars
2015-05-19 00:26:59 +00:00
self . _pending_results + = 1
2015-04-13 16:35:20 +00:00
except ( EOFError , IOError , AssertionError ) as e :
2014-11-14 22:14:08 +00:00
# most likely an abort
2015-07-23 14:24:50 +00:00
self . _display . debug ( " got an error while queuing: %s " % e )
2014-11-14 22:14:08 +00:00
return
2015-07-23 14:24:50 +00:00
self . _display . debug ( " exiting _queue_task() for %s / %s " % ( host , task ) )
2014-11-14 22:14:08 +00:00
2015-02-12 18:11:08 +00:00
def _process_pending_results ( self , iterator ) :
2014-11-14 22:14:08 +00:00
'''
Reads results off the final queue and takes appropriate action
based on the result ( executing callbacks , updating state , etc . ) .
'''
2015-03-03 20:59:23 +00:00
ret_results = [ ]
2014-11-14 22:14:08 +00:00
while not self . _final_q . empty ( ) and not self . _tqm . _terminated :
try :
result = self . _final_q . get ( block = False )
2015-09-04 05:39:08 +00:00
self . _display . debug ( " got result from result worker: %s " % ( [ text_type ( x ) for x in result ] , ) )
2014-11-14 22:14:08 +00:00
# all host status messages contain 2 entries: (msg, task_result)
if result [ 0 ] in ( ' host_task_ok ' , ' host_task_failed ' , ' host_task_skipped ' , ' host_unreachable ' ) :
task_result = result [ 1 ]
host = task_result . _host
task = task_result . _task
2015-06-05 11:15:35 +00:00
if result [ 0 ] == ' host_task_failed ' or task_result . is_failed ( ) :
2015-01-12 22:04:56 +00:00
if not task . ignore_errors :
2015-07-23 14:24:50 +00:00
self . _display . debug ( " marking %s as failed " % host . name )
2015-09-29 18:53:50 +00:00
if task . run_once :
# if we're using run_once, we have to fail every host here
[ iterator . mark_host_failed ( h ) for h in self . _inventory . get_hosts ( iterator . _play . hosts ) if h . name not in self . _tqm . _unreachable_hosts ]
else :
iterator . mark_host_failed ( host )
2015-03-25 18:51:40 +00:00
self . _tqm . _failed_hosts [ host . name ] = True
2015-06-17 19:38:52 +00:00
self . _tqm . _stats . increment ( ' failures ' , host . name )
else :
self . _tqm . _stats . increment ( ' ok ' , host . name )
2015-07-11 18:53:23 +00:00
self . _tqm . send_callback ( ' v2_runner_on_failed ' , task_result , ignore_errors = task . ignore_errors )
2014-11-14 22:14:08 +00:00
elif result [ 0 ] == ' host_unreachable ' :
2015-03-25 18:51:40 +00:00
self . _tqm . _unreachable_hosts [ host . name ] = True
self . _tqm . _stats . increment ( ' dark ' , host . name )
self . _tqm . send_callback ( ' v2_runner_on_unreachable ' , task_result )
2014-11-14 22:14:08 +00:00
elif result [ 0 ] == ' host_task_skipped ' :
2015-03-25 18:51:40 +00:00
self . _tqm . _stats . increment ( ' skipped ' , host . name )
self . _tqm . send_callback ( ' v2_runner_on_skipped ' , task_result )
2014-11-14 22:14:08 +00:00
elif result [ 0 ] == ' host_task_ok ' :
2015-10-28 18:00:03 +00:00
if task . action != ' include ' :
self . _tqm . _stats . increment ( ' ok ' , host . name )
if ' changed ' in task_result . _result and task_result . _result [ ' changed ' ] :
self . _tqm . _stats . increment ( ' changed ' , host . name )
self . _tqm . send_callback ( ' v2_runner_on_ok ' , task_result )
2014-11-14 22:14:08 +00:00
2015-07-26 16:21:38 +00:00
if self . _diff and ' diff ' in task_result . _result :
self . _tqm . send_callback ( ' v2_on_file_diff ' , task_result )
2014-11-14 22:14:08 +00:00
self . _pending_results - = 1
if host . name in self . _blocked_hosts :
del self . _blocked_hosts [ host . name ]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if task_result . _task . _role is not None and result [ 0 ] in ( ' host_task_ok ' , ' host_task_failed ' ) :
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
2015-09-03 06:23:27 +00:00
for ( entry , role_obj ) in iteritems ( iterator . _play . ROLE_CACHE [ task_result . _task . _role . _role_name ] ) :
2015-08-11 20:34:58 +00:00
if role_obj . _uuid == task_result . _task . _role . _uuid :
role_obj . _had_task_run [ host . name ] = True
2014-11-14 22:14:08 +00:00
2015-03-03 20:59:23 +00:00
ret_results . append ( task_result )
2015-01-02 13:51:15 +00:00
elif result [ 0 ] == ' add_host ' :
2015-10-19 18:53:52 +00:00
result_item = result [ 1 ]
new_host_info = result_item . get ( ' add_host ' , dict ( ) )
2015-07-26 16:21:38 +00:00
2015-10-27 18:12:17 +00:00
self . _add_host ( new_host_info , iterator )
2015-01-02 13:51:15 +00:00
elif result [ 0 ] == ' add_group ' :
2015-10-19 18:53:52 +00:00
host = result [ 1 ]
result_item = result [ 2 ]
self . _add_group ( host , result_item )
2015-01-02 13:51:15 +00:00
2014-11-14 22:14:08 +00:00
elif result [ 0 ] == ' notify_handler ' :
2015-07-20 18:36:31 +00:00
task_result = result [ 1 ]
2015-01-15 07:13:45 +00:00
handler_name = result [ 2 ]
2015-07-20 18:36:31 +00:00
original_task = iterator . get_original_task ( task_result . _host , task_result . _task )
2015-01-26 17:29:59 +00:00
if handler_name not in self . _notified_handlers :
self . _notified_handlers [ handler_name ] = [ ]
2015-07-20 18:36:31 +00:00
if task_result . _host not in self . _notified_handlers [ handler_name ] :
self . _notified_handlers [ handler_name ] . append ( task_result . _host )
2015-09-29 16:29:02 +00:00
self . _display . vv ( " NOTIFIED HANDLER %s " % ( handler_name , ) )
2014-11-14 22:14:08 +00:00
2015-07-08 16:38:24 +00:00
elif result [ 0 ] == ' register_host_var ' :
# essentially the same as 'set_host_var' below, however we
2015-09-09 16:21:07 +00:00
# never follow the delegate_to value for registered vars and
# the variable goes in the fact_cache
2015-07-08 16:38:24 +00:00
host = result [ 1 ]
var_name = result [ 2 ]
2015-09-25 18:54:20 +00:00
var_value = wrap_var ( result [ 3 ] )
2015-09-10 17:59:39 +00:00
2015-09-10 21:36:06 +00:00
self . _variable_manager . set_nonpersistent_facts ( host , { var_name : var_value } )
2015-07-08 16:38:24 +00:00
2015-07-07 19:47:51 +00:00
elif result [ 0 ] in ( ' set_host_var ' , ' set_host_facts ' ) :
host = result [ 1 ]
task = result [ 2 ]
item = result [ 3 ]
if task . delegate_to is not None :
task_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , host = host , task = task )
2015-11-05 21:30:03 +00:00
self . add_tqm_variables ( task_vars , play = iterator . _play )
2015-07-07 19:47:51 +00:00
if item is not None :
task_vars [ ' item ' ] = item
templar = Templar ( loader = self . _loader , variables = task_vars )
host_name = templar . template ( task . delegate_to )
target_host = self . _inventory . get_host ( host_name )
if target_host is None :
target_host = Host ( name = host_name )
else :
target_host = host
if result [ 0 ] == ' set_host_var ' :
var_name = result [ 4 ]
2015-09-26 19:16:14 +00:00
var_value = result [ 5 ]
2015-09-25 18:54:20 +00:00
2015-07-07 19:47:51 +00:00
self . _variable_manager . set_host_variable ( target_host , var_name , var_value )
elif result [ 0 ] == ' set_host_facts ' :
facts = result [ 4 ]
2015-09-10 21:36:06 +00:00
if task . action == ' set_fact ' :
self . _variable_manager . set_nonpersistent_facts ( target_host , facts )
else :
self . _variable_manager . set_host_facts ( target_host , facts )
2014-11-14 22:14:08 +00:00
else :
raise AnsibleError ( " unknown result message received: %s " % result [ 0 ] )
except Queue . Empty :
pass
2015-03-03 20:59:23 +00:00
return ret_results
2015-02-12 18:11:08 +00:00
def _wait_on_pending_results ( self , iterator ) :
2014-11-14 22:14:08 +00:00
'''
Wait for the shared counter to drop to zero , using a short sleep
between checks to ensure we don ' t spin lock
'''
2015-03-03 20:59:23 +00:00
ret_results = [ ]
2015-07-23 14:24:50 +00:00
self . _display . debug ( " waiting for pending results... " )
2014-11-14 22:14:08 +00:00
while self . _pending_results > 0 and not self . _tqm . _terminated :
2015-03-03 20:59:23 +00:00
results = self . _process_pending_results ( iterator )
ret_results . extend ( results )
2014-11-14 22:14:08 +00:00
time . sleep ( 0.01 )
2015-07-23 14:24:50 +00:00
self . _display . debug ( " no more pending results, returning what we have " )
2014-11-14 22:14:08 +00:00
2015-03-03 20:59:23 +00:00
return ret_results
2015-10-27 18:12:17 +00:00
def _add_host ( self , host_info , iterator ) :
2015-01-02 13:51:15 +00:00
'''
Helper function to add a new host to inventory based on a task result .
'''
host_name = host_info . get ( ' host_name ' )
# Check if host in cache, add if not
if host_name in self . _inventory . _hosts_cache :
new_host = self . _inventory . _hosts_cache [ host_name ]
else :
2015-07-07 19:47:51 +00:00
new_host = Host ( name = host_name )
2015-01-02 13:51:15 +00:00
self . _inventory . _hosts_cache [ host_name ] = new_host
allgroup = self . _inventory . get_group ( ' all ' )
allgroup . add_host ( new_host )
# Set/update the vars for this host
new_vars = host_info . get ( ' host_vars ' , dict ( ) )
2015-10-02 06:47:09 +00:00
new_host . vars = self . _inventory . get_host_vars ( new_host )
2015-01-02 13:51:15 +00:00
new_host . vars . update ( new_vars )
new_groups = host_info . get ( ' groups ' , [ ] )
for group_name in new_groups :
if not self . _inventory . get_group ( group_name ) :
new_group = Group ( group_name )
self . _inventory . add_group ( new_group )
new_group . vars = self . _inventory . get_group_variables ( group_name )
else :
new_group = self . _inventory . get_group ( group_name )
new_group . add_host ( new_host )
# add this host to the group cache
2015-09-17 22:58:10 +00:00
if self . _inventory . groups is not None :
if group_name in self . _inventory . groups :
if new_host not in self . _inventory . get_group ( group_name ) . hosts :
self . _inventory . get_group ( group_name ) . hosts . append ( new_host . name )
2015-01-02 13:51:15 +00:00
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
self . _inventory . clear_pattern_cache ( )
2015-10-27 18:12:17 +00:00
# also clear the hostvar cache entry for the given play, so that
# the new hosts are available if hostvars are referenced
self . _variable_manager . invalidate_hostvars_cache ( play = iterator . _play )
2015-10-19 18:53:52 +00:00
def _add_group ( self , host , result_item ) :
2015-01-02 13:51:15 +00:00
'''
Helper function to add a group ( if it does not exist ) , and to assign the
specified host to that group .
'''
2015-10-19 18:53:52 +00:00
changed = False
2015-01-02 13:51:15 +00:00
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
2015-10-19 18:53:52 +00:00
real_host = self . _inventory . get_host ( host . name )
2015-07-10 05:53:59 +00:00
2015-10-19 18:53:52 +00:00
group_name = result_item . get ( ' add_group ' )
new_group = self . _inventory . get_group ( group_name )
if not new_group :
# create the new group and add it to inventory
new_group = Group ( name = group_name )
self . _inventory . add_group ( new_group )
new_group . vars = self . _inventory . get_group_vars ( new_group )
# and add the group to the proper hierarchy
allgroup = self . _inventory . get_group ( ' all ' )
allgroup . add_child_group ( new_group )
changed = True
if group_name not in host . get_groups ( ) :
new_group . add_host ( real_host )
changed = True
2015-01-02 13:51:15 +00:00
2015-07-10 05:53:59 +00:00
return changed
2015-01-02 13:51:15 +00:00
2015-07-24 20:21:16 +00:00
def _load_included_file ( self , included_file , iterator , is_handler = False ) :
2015-02-12 18:11:08 +00:00
'''
Loads an included YAML file of tasks , applying the optional set of variables .
'''
2015-05-29 04:58:38 +00:00
try :
data = self . _loader . load_from_file ( included_file . _filename )
2015-07-14 12:25:48 +00:00
if data is None :
return [ ]
2015-10-28 18:00:03 +00:00
elif not isinstance ( data , list ) :
raise AnsibleError ( " included task files must contain a list of tasks " )
block_list = load_list_of_blocks (
data ,
play = included_file . _task . _block . _play ,
parent_block = included_file . _task . _block ,
task_include = included_file . _task ,
role = included_file . _task . _role ,
use_handlers = is_handler ,
loader = self . _loader
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file . _hosts :
self . _tqm . _stats . increment ( ' ok ' , host . name )
2015-08-27 06:16:11 +00:00
except AnsibleError as e :
2015-10-28 18:00:03 +00:00
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
2015-05-29 04:58:38 +00:00
for host in included_file . _hosts :
tr = TaskResult ( host = host , task = included_file . _task , return_data = dict ( failed = True , reason = str ( e ) ) )
iterator . mark_host_failed ( host )
self . _tqm . _failed_hosts [ host . name ] = True
self . _tqm . _stats . increment ( ' failures ' , host . name )
self . _tqm . send_callback ( ' v2_runner_on_failed ' , tr )
return [ ]
2015-02-26 15:51:12 +00:00
# set the vars for this task from those specified as params to the include
2015-03-25 18:51:40 +00:00
for b in block_list :
2015-09-01 16:31:35 +00:00
# first make a copy of the including task, so that each has a unique copy to modify
# FIXME: not sure if this is the best way to fix this, as we might be losing
# information in the copy. Previously we assigned the include params to
# the block variables directly, which caused other problems, so we may
# need to figure out a third option if this also presents problems.
b . _task_include = b . _task_include . copy ( exclude_block = True )
# then we create a temporary set of vars to ensure the variable reference is unique
2015-08-12 14:12:05 +00:00
temp_vars = b . _task_include . vars . copy ( )
2015-08-11 05:28:42 +00:00
temp_vars . update ( included_file . _args . copy ( ) )
2015-10-28 14:13:51 +00:00
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = temp_vars . pop ( ' tags ' , [ ] )
if isinstance ( tags , string_types ) :
tags = [ tags ]
if len ( tags ) > 0 :
if len ( b . _task_include . tags ) > 0 :
raise AnsibleParserError ( " Include tasks should not specify tags in more than one way (both via args and directly on the task) " , obj = included_file . _task . _ds )
self . _display . deprecated ( " You should not specify tags in the include parameters. All tags should be specified using the task-level option " )
b . _task_include . tags = tags
2015-08-12 14:12:05 +00:00
b . _task_include . vars = temp_vars
2015-02-12 18:11:08 +00:00
2015-10-28 18:00:03 +00:00
# finally, send the callback and return the list of blocks loaded
self . _tqm . send_callback ( ' v2_playbook_on_include ' , included_file )
2015-03-25 18:51:40 +00:00
return block_list
2015-02-12 18:11:08 +00:00
2015-07-21 16:12:22 +00:00
def run_handlers ( self , iterator , play_context ) :
2014-11-14 22:14:08 +00:00
'''
Runs handlers on those hosts which have been notified .
'''
result = True
2015-03-25 18:51:40 +00:00
for handler_block in iterator . _play . handlers :
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block . block :
2015-09-18 22:54:48 +00:00
handler_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , task = handler )
templar = Templar ( loader = self . _loader , variables = handler_vars )
2015-09-22 16:41:06 +00:00
try :
2015-09-29 16:29:02 +00:00
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
handler_name = templar . template ( handler . name )
if handler_name not in self . _notified_handlers :
handler_name = templar . template ( handler . get_name ( ) )
2015-09-22 16:41:06 +00:00
except ( UndefinedError , AnsibleUndefinedVariable ) :
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
2015-09-29 16:29:02 +00:00
if handler_name in self . _notified_handlers and len ( self . _notified_handlers [ handler_name ] ) :
2015-09-18 22:54:48 +00:00
result = self . _do_handler_run ( handler , handler_name , iterator = iterator , play_context = play_context )
2015-09-12 12:45:24 +00:00
if not result :
break
return result
2015-09-18 22:54:48 +00:00
def _do_handler_run ( self , handler , handler_name , iterator , play_context , notified_hosts = None ) :
2015-09-12 12:45:24 +00:00
# FIXME: need to use iterator.get_failed_hosts() instead?
#if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
self . _tqm . send_callback ( ' v2_playbook_on_handler_task_start ' , handler )
if notified_hosts is None :
2015-09-18 22:54:48 +00:00
notified_hosts = self . _notified_handlers [ handler_name ]
2015-09-12 12:45:24 +00:00
2015-10-30 13:44:35 +00:00
run_once = False
try :
action = action_loader . get ( handler . action , class_only = True )
if handler . run_once or getattr ( action , ' BYPASS_HOST_LOOP ' , False ) :
run_once = True
except KeyError :
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
2015-09-12 12:45:24 +00:00
host_results = [ ]
for host in notified_hosts :
if not handler . has_triggered ( host ) and ( host . name not in self . _tqm . _failed_hosts or play_context . force_handlers ) :
task_vars = self . _variable_manager . get_vars ( loader = self . _loader , play = iterator . _play , host = host , task = handler )
2015-11-04 16:26:06 +00:00
self . add_tqm_variables ( task_vars , play = iterator . _play )
2015-09-12 12:45:24 +00:00
self . _queue_task ( host , handler , task_vars , play_context )
2015-10-30 13:44:35 +00:00
if run_once :
break
2015-09-12 12:45:24 +00:00
# collect the results from the handler run
host_results = self . _wait_on_pending_results ( iterator )
try :
included_files = IncludedFile . process_include_results (
host_results ,
self . _tqm ,
iterator = iterator ,
loader = self . _loader ,
variable_manager = self . _variable_manager
)
except AnsibleError as e :
return False
result = True
if len ( included_files ) > 0 :
for included_file in included_files :
try :
new_blocks = self . _load_included_file ( included_file , iterator = iterator , is_handler = True )
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks :
iterator . _play . handlers . append ( block )
for task in block . block :
result = self . _do_handler_run (
handler = task ,
iterator = iterator ,
play_context = play_context ,
notified_hosts = included_file . _hosts [ : ] ,
)
if not result :
break
except AnsibleError as e :
for host in included_file . _hosts :
iterator . mark_host_failed ( host )
self . _tqm . _failed_hosts [ host . name ] = True
self . _display . warning ( str ( e ) )
continue
# wipe the notification list
2015-09-18 22:54:48 +00:00
self . _notified_handlers [ handler_name ] = [ ]
2015-09-12 12:45:24 +00:00
self . _display . debug ( " done running handlers, result is: %s " % result )
2014-11-14 22:14:08 +00:00
return result
2015-07-24 00:47:24 +00:00
def _take_step ( self , task , host = None ) :
ret = False
if host :
msg = u ' Perform task: %s on %s (y/n/c): ' % ( task , host )
else :
msg = u ' Perform task: %s (y/n/c): ' % task
resp = self . _display . prompt ( msg )
if resp . lower ( ) in [ ' y ' , ' yes ' ] :
self . _display . debug ( " User ran task " )
ret = True
elif resp . lower ( ) in [ ' c ' , ' continue ' ] :
self . _display . debug ( " User ran task and cancled step mode " )
self . _step = False
ret = True
else :
self . _display . debug ( " User skipped task " )
self . _display . banner ( msg )
return ret
2015-07-18 19:24:44 +00:00
def _execute_meta ( self , task , play_context , iterator ) :
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task . args . get ( ' _raw_params ' )
if meta_action == ' noop ' :
# FIXME: issue a callback for the noop here?
pass
elif meta_action == ' flush_handlers ' :
self . run_handlers ( iterator , play_context )
elif meta_action == ' refresh_inventory ' :
self . _inventory . refresh_inventory ( )
#elif meta_action == 'reset_connection':
# connection_info.connection.close()
else :
raise AnsibleError ( " invalid meta action requested: %s " % meta_action , obj = task . _ds )