V2 fixing bugs

This commit is contained in:
James Cammarata
2015-03-25 13:51:40 -05:00
parent 79cf7e7292
commit 785c0c0c8c
34 changed files with 505 additions and 426 deletions

View File

@@ -240,7 +240,10 @@ class PluginLoader:
continue
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
if kwargs.get('class_only', False):
yield getattr(self._module_cache[path], self.class_name)
else:
yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
action_loader = PluginLoader(
'ActionModule',

View File

@@ -231,7 +231,7 @@ class ActionModule(ActionBase):
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
if (self._connection_info.become and self._connection_info.become_user != 'root':
if self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', tmp_src, tmp)
if raw:

View File

@@ -19,7 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.utils.display import Display
#from ansible.utils.display import Display
__all__ = ["CallbackBase"]
@@ -34,8 +34,8 @@ class CallbackBase:
# FIXME: the list of functions here needs to be updated once we have
# finalized the list of callback methods used in the default callback
def __init__(self):
self._display = Display()
def __init__(self, display):
self._display = display
def set_connection_info(self, conn_info):
# FIXME: this is a temporary hack, as the connection info object

View File

@@ -30,25 +30,15 @@ class CallbackModule(CallbackBase):
to stdout when new callback events are received.
'''
def _print_banner(self, msg, color=None):
'''
Prints a header-looking line with stars taking up to 80 columns
of width (3 columns, minimum)
'''
msg = msg.strip()
star_len = (80 - len(msg))
if star_len < 0:
star_len = 3
stars = "*" * star_len
self._display.display("\n%s %s" % (msg, stars), color=color)
CALLBACK_VERSION = 2.0
def on_any(self, *args, **kwargs):
def v2_on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, task, result, ignore_errors=False):
def v2_runner_on_failed(self, result, ignore_errors=False):
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red')
def runner_on_ok(self, task, result):
def v2_runner_on_ok(self, result):
if result._task.action == 'include':
msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
@@ -68,7 +58,7 @@ class CallbackModule(CallbackBase):
msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
self._display.display(msg, color=color)
def runner_on_skipped(self, task, result):
def v2_runner_on_skipped(self, result):
msg = "skipping: [%s]" % result._host.get_name()
if self._display._verbosity > 0 or 'verbose_always' in result._result:
indent = None
@@ -78,57 +68,66 @@ class CallbackModule(CallbackBase):
msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
self._display.display(msg, color='cyan')
def runner_on_unreachable(self, task, result):
def v2_runner_on_unreachable(self, result):
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red')
def runner_on_no_hosts(self, task):
def v2_runner_on_no_hosts(self, task):
pass
def runner_on_async_poll(self, host, res, jid, clock):
def v2_runner_on_async_poll(self, result):
pass
def runner_on_async_ok(self, host, res, jid):
def v2_runner_on_async_ok(self, result):
pass
def runner_on_async_failed(self, host, res, jid):
def v2_runner_on_async_failed(self, result):
pass
def playbook_on_start(self):
def v2_runner_on_file_diff(self, result, diff):
pass
def playbook_on_notify(self, host, handler):
def v2_playbook_on_start(self):
pass
def playbook_on_no_hosts_matched(self):
def v2_playbook_on_notify(self, result, handler):
pass
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color='cyan')
def playbook_on_no_hosts_remaining(self):
self._print_banner("NO MORE HOSTS LEFT")
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def playbook_on_task_start(self, name, is_conditional):
self._print_banner("TASK [%s]" % name.strip())
def v2_playbook_on_task_start(self, task, is_conditional):
self._display.banner("TASK [%s]" % task.get_name().strip())
def playbook_on_cleanup_task_start(self, name):
self._print_banner("CLEANUP TASK [%s]" % name.strip())
def v2_playbook_on_cleanup_task_start(self, task):
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
def playbook_on_handler_task_start(self, name):
self._print_banner("RUNNING HANDLER [%s]" % name.strip())
def v2_playbook_on_handler_task_start(self, task):
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
def v2_playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
def v2_playbook_on_import_for_host(self, result, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
def v2_playbook_on_not_import_for_host(self, result, missing_file):
pass
def playbook_on_play_start(self, name):
self._print_banner("PLAY [%s]" % name.strip())
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = "PLAY"
else:
msg = "PLAY [%s]" % name
def playbook_on_stats(self, stats):
self._display.banner(name)
def v2_playbook_on_stats(self, stats):
pass

View File

@@ -31,6 +31,8 @@ class CallbackModule(CallbackBase):
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
def _print_banner(self, msg):
'''
Prints a header-looking line with stars taking up to 80 columns

View File

@@ -28,7 +28,7 @@ from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.role import ROLE_CACHE, hash_params
from ansible.plugins import module_loader
from ansible.utils.debug import debug
@@ -49,7 +49,7 @@ class StrategyBase:
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm.get_notified_handlers()
self._callback = tqm.get_callback()
#self._callback = tqm.get_callback()
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
@@ -73,6 +73,9 @@ class StrategyBase:
debug("running handlers")
result &= self.run_handlers(iterator, connection_info)
# send the stats callback
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
if not result:
if num_unreachable > 0:
return 3
@@ -84,7 +87,7 @@ class StrategyBase:
return 0
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.get_name() not in self._tqm._unreachable_hosts]
return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
@@ -132,17 +135,23 @@ class StrategyBase:
task = task_result._task
if result[0] == 'host_task_failed':
if not task.ignore_errors:
debug("marking %s as failed" % host.get_name())
debug("marking %s as failed" % host.name)
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.get_name()] = True
self._callback.runner_on_failed(task, task_result)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result)
elif result[0] == 'host_unreachable':
self._tqm._unreachable_hosts[host.get_name()] = True
self._callback.runner_on_unreachable(task, task_result)
self._tqm._unreachable_hosts[host.name] = True
self._tqm._stats.increment('dark', host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif result[0] == 'host_task_skipped':
self._callback.runner_on_skipped(task, task_result)
self._tqm._stats.increment('skipped', host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
elif result[0] == 'host_task_ok':
self._callback.runner_on_ok(task, task_result)
self._tqm._stats.increment('ok', host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', host.name)
self._tqm.send_callback('v2_runner_on_ok', task_result)
self._pending_results -= 1
if host.name in self._blocked_hosts:
@@ -160,22 +169,6 @@ class StrategyBase:
ret_results.append(task_result)
#elif result[0] == 'include':
# host = result[1]
# task = result[2]
# include_file = result[3]
# include_vars = result[4]
#
# if isinstance(task, Handler):
# # FIXME: figure out how to make includes work for handlers
# pass
# else:
# original_task = iterator.get_original_task(host, task)
# if original_task and original_task._role:
# include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file)
# new_tasks = self._load_included_file(original_task, include_file, include_vars)
# iterator.add_tasks(host, new_tasks)
elif result[0] == 'add_host':
task_result = result[1]
new_host_info = task_result.get('add_host', dict())
@@ -322,14 +315,11 @@ class StrategyBase:
loader=self._loader
)
task_list = compile_block_list(block_list)
# set the vars for this task from those specified as params to the include
for t in task_list:
t.vars = included_file._args.copy()
for b in block_list:
b._vars = included_file._args.copy()
return task_list
return block_list
def cleanup(self, iterator, connection_info):
'''
@@ -361,7 +351,7 @@ class StrategyBase:
while work_to_do:
work_to_do = False
for host in failed_hosts:
host_name = host.get_name()
host_name = host.name
if host_name in self._tqm._failed_hosts:
iterator.mark_host_failed(host)
@@ -377,7 +367,7 @@ class StrategyBase:
self._blocked_hosts[host_name] = True
task = iterator.get_next_task_for_host(host)
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
self._callback.playbook_on_cleanup_task_start(task.get_name())
self._tqm.send_callback('v2_playbook_on_cleanup_task_start', task)
self._queue_task(host, task, task_vars, connection_info)
self._process_pending_results(iterator)
@@ -398,31 +388,28 @@ class StrategyBase:
# FIXME: getting the handlers from the iterators play should be
# a method on the iterator, which may also filter the list
# of handlers based on the notified list
handlers = compile_block_list(iterator._play.handlers)
debug("handlers are: %s" % handlers)
for handler in handlers:
handler_name = handler.get_name()
if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
if not len(self.get_hosts_remaining(iterator._play)):
self._callback.playbook_on_no_hosts_remaining()
result = False
break
self._callback.playbook_on_handler_task_start(handler_name)
for host in self._notified_handlers[handler_name]:
if not handler.has_triggered(host):
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
self._queue_task(host, handler, task_vars, connection_info)
handler.flag_for_host(host)
self._process_pending_results(iterator)
self._wait_on_pending_results(iterator)
# wipe the notification list
self._notified_handlers[handler_name] = []
debug("done running handlers, result is: %s" % result)
for handler_block in iterator._play.handlers:
debug("handlers are: %s" % handlers)
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
handler_name = handler.get_name()
if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
if not len(self.get_hosts_remaining(iterator._play)):
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
for host in self._notified_handlers[handler_name]:
if not handler.has_triggered(host):
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
self._queue_task(host, handler, task_vars, connection_info)
handler.flag_for_host(host)
self._process_pending_results(iterator)
self._wait_on_pending_results(iterator)
# wipe the notification list
self._notified_handlers[handler_name] = []
debug("done running handlers, result is: %s" % result)
return result

View File

@@ -21,6 +21,7 @@ __metaclass__ = type
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
@@ -52,6 +53,9 @@ class StrategyModule(StrategyBase):
lowest_cur_block = len(iterator._blocks)
for (k, v) in host_tasks.iteritems():
if v is None:
continue
(s, t) = v
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
@@ -131,7 +135,7 @@ class StrategyModule(StrategyBase):
debug("done getting the remaining hosts for this loop")
if len(hosts_left) == 0:
debug("out of hosts to run on")
self._callback.playbook_on_no_hosts_remaining()
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
@@ -184,7 +188,6 @@ class StrategyModule(StrategyBase):
meta_action = task.args.get('_raw_params')
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
print("%s => NOOP" % host)
continue
elif meta_action == 'flush_handlers':
self.run_handlers(iterator, connection_info)
@@ -192,7 +195,7 @@ class StrategyModule(StrategyBase):
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
else:
if not callback_sent:
self._callback.playbook_on_task_start(task.get_name(), False)
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
callback_sent = True
self._blocked_hosts[host.get_name()] = True
@@ -234,6 +237,10 @@ class StrategyModule(StrategyBase):
include_results = [ res._result ]
for include_result in include_results:
# if the task result was skipped or failed, continue
if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result:
continue
original_task = iterator.get_original_task(res._host, res._task)
if original_task and original_task._role:
include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include'])
@@ -263,27 +270,31 @@ class StrategyModule(StrategyBase):
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
all_tasks = dict((host, []) for host in hosts_left)
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_tasks = self._load_included_file(included_file)
new_blocks = self._load_included_file(included_file)
except AnsibleError, e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
# FIXME: callback here?
print(e)
noop_tasks = [noop_task for t in new_tasks]
for host in hosts_left:
if host in included_file._hosts:
all_tasks[host].extend(new_tasks)
else:
all_tasks[host].extend(noop_tasks)
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
all_blocks[host].append(new_block)
else:
all_blocks[host].append(noop_block)
for host in hosts_left:
iterator.add_tasks(host, all_tasks[host])
iterator.add_tasks(host, all_blocks[host])
debug("results queue empty")
except (IOError, EOFError), e: