mirror of
https://github.com/ansible-collections/community.general.git
synced 2026-05-08 06:12:51 +00:00
Making the switch to v2
This commit is contained in:
21
lib/ansible/executor/__init__.py
Normal file
21
lib/ansible/executor/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
270
lib/ansible/executor/connection_info.py
Normal file
270
lib/ansible/executor/connection_info.py
Normal file
@@ -0,0 +1,270 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import pipes
|
||||
import random
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.boolean import boolean
|
||||
from ansible.errors import AnsibleError
|
||||
|
||||
__all__ = ['ConnectionInformation']
|
||||
|
||||
|
||||
class ConnectionInformation:
|
||||
|
||||
'''
|
||||
This class is used to consolidate the connection information for
|
||||
hosts in a play and child tasks, where the task may override some
|
||||
connection/authentication information.
|
||||
'''
|
||||
|
||||
def __init__(self, play=None, options=None, passwords=None):
|
||||
|
||||
if passwords is None:
|
||||
passwords = {}
|
||||
|
||||
# connection
|
||||
self.connection = None
|
||||
self.remote_addr = None
|
||||
self.remote_user = None
|
||||
self.password = passwords.get('conn_pass','')
|
||||
self.port = None
|
||||
self.private_key_file = C.DEFAULT_PRIVATE_KEY_FILE
|
||||
self.timeout = C.DEFAULT_TIMEOUT
|
||||
|
||||
# privilege escalation
|
||||
self.become = None
|
||||
self.become_method = None
|
||||
self.become_user = None
|
||||
self.become_pass = passwords.get('become_pass','')
|
||||
|
||||
# general flags (should we move out?)
|
||||
self.verbosity = 0
|
||||
self.only_tags = set()
|
||||
self.skip_tags = set()
|
||||
self.no_log = False
|
||||
self.check_mode = False
|
||||
|
||||
#TODO: just pull options setup to above?
|
||||
# set options before play to allow play to override them
|
||||
if options:
|
||||
self.set_options(options)
|
||||
|
||||
if play:
|
||||
self.set_play(play)
|
||||
|
||||
def __repr__(self):
|
||||
value = "CONNECTION INFO:\n"
|
||||
fields = self._get_fields()
|
||||
fields.sort()
|
||||
for field in fields:
|
||||
value += "%20s : %s\n" % (field, getattr(self, field))
|
||||
return value
|
||||
|
||||
def set_play(self, play):
|
||||
'''
|
||||
Configures this connection information instance with data from
|
||||
the play class.
|
||||
'''
|
||||
|
||||
if play.connection:
|
||||
self.connection = play.connection
|
||||
|
||||
if play.remote_user:
|
||||
self.remote_user = play.remote_user
|
||||
|
||||
if play.port:
|
||||
self.port = int(play.port)
|
||||
|
||||
if play.become is not None:
|
||||
self.become = play.become
|
||||
if play.become_method:
|
||||
self.become_method = play.become_method
|
||||
if play.become_user:
|
||||
self.become_user = play.become_user
|
||||
self.become_pass = play.become_pass
|
||||
|
||||
# non connection related
|
||||
self.no_log = play.no_log
|
||||
self.environment = play.environment
|
||||
|
||||
def set_options(self, options):
|
||||
'''
|
||||
Configures this connection information instance with data from
|
||||
options specified by the user on the command line. These have a
|
||||
higher precedence than those set on the play or host.
|
||||
'''
|
||||
|
||||
if options.connection:
|
||||
self.connection = options.connection
|
||||
|
||||
self.remote_user = options.remote_user
|
||||
self.private_key_file = options.private_key_file
|
||||
|
||||
# privilege escalation
|
||||
self.become = options.become
|
||||
self.become_method = options.become_method
|
||||
self.become_user = options.become_user
|
||||
self.become_pass = ''
|
||||
|
||||
# general flags (should we move out?)
|
||||
if options.verbosity:
|
||||
self.verbosity = options.verbosity
|
||||
#if options.no_log:
|
||||
# self.no_log = boolean(options.no_log)
|
||||
if options.check:
|
||||
self.check_mode = boolean(options.check)
|
||||
|
||||
# get the tag info from options, converting a comma-separated list
|
||||
# of values into a proper list if need be. We check to see if the
|
||||
# options have the attribute, as it is not always added via the CLI
|
||||
if hasattr(options, 'tags'):
|
||||
if isinstance(options.tags, list):
|
||||
self.only_tags.update(options.tags)
|
||||
elif isinstance(options.tags, basestring):
|
||||
self.only_tags.update(options.tags.split(','))
|
||||
|
||||
if len(self.only_tags) == 0:
|
||||
self.only_tags = set(['all'])
|
||||
|
||||
if hasattr(options, 'skip_tags'):
|
||||
if isinstance(options.skip_tags, list):
|
||||
self.skip_tags.update(options.skip_tags)
|
||||
elif isinstance(options.skip_tags, basestring):
|
||||
self.skip_tags.update(options.skip_tags.split(','))
|
||||
|
||||
def copy(self, ci):
|
||||
'''
|
||||
Copies the connection info from another connection info object, used
|
||||
when merging in data from task overrides.
|
||||
'''
|
||||
|
||||
for field in self._get_fields():
|
||||
value = getattr(ci, field, None)
|
||||
if isinstance(value, dict):
|
||||
setattr(self, field, value.copy())
|
||||
elif isinstance(value, set):
|
||||
setattr(self, field, value.copy())
|
||||
elif isinstance(value, list):
|
||||
setattr(self, field, value[:])
|
||||
else:
|
||||
setattr(self, field, value)
|
||||
|
||||
def set_task_override(self, task):
|
||||
'''
|
||||
Sets attributes from the task if they are set, which will override
|
||||
those from the play.
|
||||
'''
|
||||
|
||||
new_info = ConnectionInformation()
|
||||
new_info.copy(self)
|
||||
|
||||
for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'):
|
||||
if hasattr(task, attr):
|
||||
attr_val = getattr(task, attr)
|
||||
if attr_val:
|
||||
setattr(new_info, attr, attr_val)
|
||||
|
||||
return new_info
|
||||
|
||||
def make_become_cmd(self, cmd, executable, become_settings=None):
|
||||
|
||||
"""
|
||||
helper function to create privilege escalation commands
|
||||
"""
|
||||
|
||||
# FIXME: become settings should probably be stored in the connection info itself
|
||||
if become_settings is None:
|
||||
become_settings = {}
|
||||
|
||||
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
|
||||
success_key = 'BECOME-SUCCESS-%s' % randbits
|
||||
prompt = None
|
||||
becomecmd = None
|
||||
|
||||
executable = executable or '$SHELL'
|
||||
|
||||
success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
|
||||
if self.become:
|
||||
if self.become_method == 'sudo':
|
||||
# Rather than detect if sudo wants a password this time, -k makes sudo always ask for
|
||||
# a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
|
||||
# directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
|
||||
# string to the user's shell. We loop reading output until we see the randomly-generated
|
||||
# sudo prompt set with the -p option.
|
||||
prompt = '[sudo via ansible, key=%s] password: ' % randbits
|
||||
exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE)
|
||||
flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS)
|
||||
becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
|
||||
(exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd)
|
||||
|
||||
elif self.become_method == 'su':
|
||||
exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE)
|
||||
flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS)
|
||||
becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd)
|
||||
|
||||
elif self.become_method == 'pbrun':
|
||||
exe = become_settings.get('pbrun_exe', 'pbrun')
|
||||
flags = become_settings.get('pbrun_flags', '')
|
||||
becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, success_cmd)
|
||||
|
||||
elif self.become_method == 'pfexec':
|
||||
exe = become_settings.get('pfexec_exe', 'pbrun')
|
||||
flags = become_settings.get('pfexec_flags', '')
|
||||
# No user as it uses it's own exec_attr to figure it out
|
||||
becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
|
||||
|
||||
else:
|
||||
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
|
||||
|
||||
return (('%s -c ' % executable) + pipes.quote(becomecmd), prompt, success_key)
|
||||
|
||||
return (cmd, "", "")
|
||||
|
||||
def check_become_success(self, output, become_settings):
|
||||
#TODO: implement
|
||||
pass
|
||||
|
||||
def _get_fields(self):
|
||||
return [i for i in self.__dict__.keys() if i[:1] != '_']
|
||||
|
||||
def post_validate(self, templar):
|
||||
'''
|
||||
Finalizes templated values which may be set on this objects fields.
|
||||
'''
|
||||
|
||||
for field in self._get_fields():
|
||||
value = templar.template(getattr(self, field))
|
||||
setattr(self, field, value)
|
||||
|
||||
def update_vars(self, variables):
|
||||
'''
|
||||
Adds 'magic' variables relating to connections to the variable dictionary provided.
|
||||
'''
|
||||
|
||||
variables['ansible_connection'] = self.connection
|
||||
variables['ansible_ssh_host'] = self.remote_addr
|
||||
variables['ansible_ssh_pass'] = self.password
|
||||
variables['ansible_ssh_port'] = self.port
|
||||
variables['ansible_ssh_user'] = self.remote_user
|
||||
variables['ansible_ssh_private_key_file'] = self.private_key_file
|
||||
199
lib/ansible/executor/module_common.py
Normal file
199
lib/ansible/executor/module_common.py
Normal file
@@ -0,0 +1,199 @@
|
||||
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
# from python and deps
|
||||
from six.moves import StringIO
|
||||
import json
|
||||
import os
|
||||
import shlex
|
||||
|
||||
# from Ansible
|
||||
from ansible import __version__
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.parsing.utils.jsonify import jsonify
|
||||
|
||||
REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
|
||||
REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
|
||||
REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
|
||||
REPLACER_WINDOWS = "# POWERSHELL_COMMON"
|
||||
REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
|
||||
|
||||
# We could end up writing out parameters with unicode characters so we need to
|
||||
# specify an encoding for the python source file
|
||||
ENCODING_STRING = '# -*- coding: utf-8 -*-'
|
||||
|
||||
# we've moved the module_common relative to the snippets, so fix the path
|
||||
_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
|
||||
|
||||
# ******************************************************************************
|
||||
|
||||
def _slurp(path):
|
||||
if not os.path.exists(path):
|
||||
raise AnsibleError("imported module support code does not exist at %s" % path)
|
||||
fd = open(path)
|
||||
data = fd.read()
|
||||
fd.close()
|
||||
return data
|
||||
|
||||
def _find_snippet_imports(module_data, module_path, strip_comments):
|
||||
"""
|
||||
Given the source of the module, convert it to a Jinja2 template to insert
|
||||
module code and return whether it's a new or old style module.
|
||||
"""
|
||||
|
||||
module_style = 'old'
|
||||
if REPLACER in module_data:
|
||||
module_style = 'new'
|
||||
elif 'from ansible.module_utils.' in module_data:
|
||||
module_style = 'new'
|
||||
elif 'WANT_JSON' in module_data:
|
||||
module_style = 'non_native_want_json'
|
||||
|
||||
output = StringIO()
|
||||
lines = module_data.split('\n')
|
||||
snippet_names = []
|
||||
|
||||
for line in lines:
|
||||
|
||||
if REPLACER in line:
|
||||
output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py")))
|
||||
snippet_names.append('basic')
|
||||
if REPLACER_WINDOWS in line:
|
||||
ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
|
||||
output.write(ps_data)
|
||||
snippet_names.append('powershell')
|
||||
elif line.startswith('from ansible.module_utils.'):
|
||||
tokens=line.split(".")
|
||||
import_error = False
|
||||
if len(tokens) != 3:
|
||||
import_error = True
|
||||
if " import *" not in line:
|
||||
import_error = True
|
||||
if import_error:
|
||||
raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
|
||||
snippet_name = tokens[2].split()[0]
|
||||
snippet_names.append(snippet_name)
|
||||
output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py")))
|
||||
else:
|
||||
if strip_comments and line.startswith("#") or line == '':
|
||||
pass
|
||||
output.write(line)
|
||||
output.write("\n")
|
||||
|
||||
if not module_path.endswith(".ps1"):
|
||||
# Unixy modules
|
||||
if len(snippet_names) > 0 and not 'basic' in snippet_names:
|
||||
raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
|
||||
else:
|
||||
# Windows modules
|
||||
if len(snippet_names) > 0 and not 'powershell' in snippet_names:
|
||||
raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
|
||||
|
||||
return (output.getvalue(), module_style)
|
||||
|
||||
# ******************************************************************************
|
||||
|
||||
def modify_module(module_path, module_args, strip_comments=False):
|
||||
"""
|
||||
Used to insert chunks of code into modules before transfer rather than
|
||||
doing regular python imports. This allows for more efficient transfer in
|
||||
a non-bootstrapping scenario by not moving extra files over the wire and
|
||||
also takes care of embedding arguments in the transferred modules.
|
||||
|
||||
This version is done in such a way that local imports can still be
|
||||
used in the module code, so IDEs don't have to be aware of what is going on.
|
||||
|
||||
Example:
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
... will result in the insertion of basic.py into the module
|
||||
from the module_utils/ directory in the source tree.
|
||||
|
||||
All modules are required to import at least basic, though there will also
|
||||
be other snippets.
|
||||
|
||||
For powershell, there's equivalent conventions like this:
|
||||
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
which results in the inclusion of the common code from powershell.ps1
|
||||
|
||||
"""
|
||||
### TODO: Optimization ideas if this code is actually a source of slowness:
|
||||
# * Fix comment stripping: Currently doesn't preserve shebangs and encoding info (but we unconditionally add encoding info)
|
||||
# * Use pyminifier if installed
|
||||
# * comment stripping/pyminifier needs to have config setting to turn it
|
||||
# off for debugging purposes (goes along with keep remote but should be
|
||||
# separate otherwise users wouldn't be able to get info on what the
|
||||
# minifier output)
|
||||
# * Only split into lines and recombine into strings once
|
||||
# * Cache the modified module? If only the args are different and we do
|
||||
# that as the last step we could cache sll the work up to that point.
|
||||
|
||||
with open(module_path) as f:
|
||||
|
||||
# read in the module source
|
||||
module_data = f.read()
|
||||
|
||||
(module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)
|
||||
|
||||
#module_args_json = jsonify(module_args)
|
||||
module_args_json = json.dumps(module_args)
|
||||
encoded_args = repr(module_args_json.encode('utf-8'))
|
||||
|
||||
# these strings should be part of the 'basic' snippet which is required to be included
|
||||
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
|
||||
module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)
|
||||
|
||||
# FIXME: we're not passing around an inject dictionary anymore, so
|
||||
# this needs to be fixed with whatever method we use for vars
|
||||
# like this moving forward
|
||||
#if module_style == 'new':
|
||||
# facility = C.DEFAULT_SYSLOG_FACILITY
|
||||
# if 'ansible_syslog_facility' in inject:
|
||||
# facility = inject['ansible_syslog_facility']
|
||||
# module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
|
||||
|
||||
lines = module_data.split(b"\n", 1)
|
||||
shebang = None
|
||||
if lines[0].startswith(b"#!"):
|
||||
shebang = lines[0].strip()
|
||||
args = shlex.split(str(shebang[2:]))
|
||||
interpreter = args[0]
|
||||
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
|
||||
|
||||
# FIXME: more inject stuff here...
|
||||
#from ansible.utils.unicode import to_bytes
|
||||
#if interpreter_config in inject:
|
||||
# interpreter = to_bytes(inject[interpreter_config], errors='strict')
|
||||
# lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
|
||||
|
||||
lines.insert(1, ENCODING_STRING)
|
||||
else:
|
||||
lines.insert(0, ENCODING_STRING)
|
||||
|
||||
module_data = b"\n".join(lines)
|
||||
|
||||
return (module_data, module_style, shebang)
|
||||
|
||||
302
lib/ansible/executor/play_iterator.py
Normal file
302
lib/ansible/executor/play_iterator.py
Normal file
@@ -0,0 +1,302 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.errors import *
|
||||
from ansible.playbook.block import Block
|
||||
from ansible.playbook.task import Task
|
||||
|
||||
from ansible.utils.boolean import boolean
|
||||
|
||||
__all__ = ['PlayIterator']
|
||||
|
||||
class HostState:
|
||||
def __init__(self, blocks):
|
||||
self._blocks = blocks[:]
|
||||
|
||||
self.cur_block = 0
|
||||
self.cur_regular_task = 0
|
||||
self.cur_rescue_task = 0
|
||||
self.cur_always_task = 0
|
||||
self.cur_role = None
|
||||
self.run_state = PlayIterator.ITERATING_SETUP
|
||||
self.fail_state = PlayIterator.FAILED_NONE
|
||||
self.pending_setup = False
|
||||
self.child_state = None
|
||||
|
||||
def __repr__(self):
|
||||
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, child state? %s" % (
|
||||
self.cur_block,
|
||||
self.cur_regular_task,
|
||||
self.cur_rescue_task,
|
||||
self.cur_always_task,
|
||||
self.cur_role,
|
||||
self.run_state,
|
||||
self.fail_state,
|
||||
self.pending_setup,
|
||||
self.child_state,
|
||||
)
|
||||
|
||||
def get_current_block(self):
|
||||
return self._blocks[self.cur_block]
|
||||
|
||||
def copy(self):
|
||||
new_state = HostState(self._blocks)
|
||||
new_state.cur_block = self.cur_block
|
||||
new_state.cur_regular_task = self.cur_regular_task
|
||||
new_state.cur_rescue_task = self.cur_rescue_task
|
||||
new_state.cur_always_task = self.cur_always_task
|
||||
new_state.cur_role = self.cur_role
|
||||
new_state.run_state = self.run_state
|
||||
new_state.fail_state = self.fail_state
|
||||
new_state.pending_setup = self.pending_setup
|
||||
new_state.child_state = self.child_state
|
||||
return new_state
|
||||
|
||||
class PlayIterator:
|
||||
|
||||
# the primary running states for the play iteration
|
||||
ITERATING_SETUP = 0
|
||||
ITERATING_TASKS = 1
|
||||
ITERATING_RESCUE = 2
|
||||
ITERATING_ALWAYS = 3
|
||||
ITERATING_COMPLETE = 4
|
||||
|
||||
# the failure states for the play iteration, which are powers
|
||||
# of 2 as they may be or'ed together in certain circumstances
|
||||
FAILED_NONE = 0
|
||||
FAILED_SETUP = 1
|
||||
FAILED_TASKS = 2
|
||||
FAILED_RESCUE = 4
|
||||
FAILED_ALWAYS = 8
|
||||
|
||||
def __init__(self, inventory, play, connection_info, all_vars):
|
||||
self._play = play
|
||||
|
||||
self._blocks = []
|
||||
for block in self._play.compile():
|
||||
new_block = block.filter_tagged_tasks(connection_info, all_vars)
|
||||
if new_block.has_tasks():
|
||||
self._blocks.append(new_block)
|
||||
|
||||
self._host_states = {}
|
||||
for host in inventory.get_hosts(self._play.hosts):
|
||||
self._host_states[host.name] = HostState(blocks=self._blocks)
|
||||
|
||||
def get_host_state(self, host):
|
||||
try:
|
||||
return self._host_states[host.name].copy()
|
||||
except KeyError:
|
||||
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
|
||||
|
||||
def get_next_task_for_host(self, host, peek=False):
|
||||
|
||||
s = self.get_host_state(host)
|
||||
|
||||
task = None
|
||||
if s.run_state == self.ITERATING_COMPLETE:
|
||||
return None
|
||||
elif s.run_state == self.ITERATING_SETUP:
|
||||
s.run_state = self.ITERATING_TASKS
|
||||
s.pending_setup = True
|
||||
if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts):
|
||||
if not peek:
|
||||
# mark the host as having gathered facts
|
||||
host.set_gathered_facts(True)
|
||||
|
||||
task = Task()
|
||||
task.action = 'setup'
|
||||
task.args = {}
|
||||
task.set_loader(self._play._loader)
|
||||
else:
|
||||
s.pending_setup = False
|
||||
|
||||
if not task:
|
||||
(s, task) = self._get_next_task_from_state(s, peek=peek)
|
||||
|
||||
if task and task._role:
|
||||
# if we had a current role, mark that role as completed
|
||||
if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
|
||||
s.cur_role._completed = True
|
||||
s.cur_role = task._role
|
||||
|
||||
if not peek:
|
||||
self._host_states[host.name] = s
|
||||
|
||||
return (s, task)
|
||||
|
||||
|
||||
def _get_next_task_from_state(self, state, peek):
|
||||
|
||||
task = None
|
||||
|
||||
# if we previously encountered a child block and we have a
|
||||
# saved child state, try and get the next task from there
|
||||
if state.child_state:
|
||||
(state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
|
||||
if task:
|
||||
return (state.child_state, task)
|
||||
else:
|
||||
state.child_state = None
|
||||
|
||||
# try and find the next task, given the current state.
|
||||
while True:
|
||||
# try to get the current block from the list of blocks, and
|
||||
# if we run past the end of the list we know we're done with
|
||||
# this block
|
||||
try:
|
||||
block = state._blocks[state.cur_block]
|
||||
except IndexError:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
return (state, None)
|
||||
|
||||
if state.run_state == self.ITERATING_TASKS:
|
||||
# clear the pending setup flag, since we're past that and it didn't fail
|
||||
if state.pending_setup:
|
||||
state.pending_setup = False
|
||||
|
||||
if state.fail_state & self.FAILED_TASKS == self.FAILED_TASKS:
|
||||
state.run_state = self.ITERATING_RESCUE
|
||||
elif state.cur_regular_task >= len(block.block):
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
task = block.block[state.cur_regular_task]
|
||||
state.cur_regular_task += 1
|
||||
|
||||
elif state.run_state == self.ITERATING_RESCUE:
|
||||
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
elif state.cur_rescue_task >= len(block.rescue):
|
||||
if len(block.rescue) > 0:
|
||||
state.fail_state = self.FAILED_NONE
|
||||
state.run_state = self.ITERATING_ALWAYS
|
||||
else:
|
||||
task = block.rescue[state.cur_rescue_task]
|
||||
state.cur_rescue_task += 1
|
||||
|
||||
elif state.run_state == self.ITERATING_ALWAYS:
|
||||
if state.cur_always_task >= len(block.always):
|
||||
if state.fail_state != self.FAILED_NONE:
|
||||
state.run_state = self.ITERATING_COMPLETE
|
||||
else:
|
||||
state.cur_block += 1
|
||||
state.cur_regular_task = 0
|
||||
state.cur_rescue_task = 0
|
||||
state.cur_always_task = 0
|
||||
state.run_state = self.ITERATING_TASKS
|
||||
state.child_state = None
|
||||
else:
|
||||
task = block.always[state.cur_always_task]
|
||||
state.cur_always_task += 1
|
||||
|
||||
elif state.run_state == self.ITERATING_COMPLETE:
|
||||
return (state, None)
|
||||
|
||||
# if the current task is actually a child block, we dive into it
|
||||
if isinstance(task, Block):
|
||||
state.child_state = HostState(blocks=[task])
|
||||
state.child_state.run_state = self.ITERATING_TASKS
|
||||
state.child_state.cur_role = state.cur_role
|
||||
(state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
|
||||
|
||||
# if something above set the task, break out of the loop now
|
||||
if task:
|
||||
break
|
||||
|
||||
return (state, task)
|
||||
|
||||
def mark_host_failed(self, host):
|
||||
s = self.get_host_state(host)
|
||||
if s.pending_setup:
|
||||
s.fail_state |= self.FAILED_SETUP
|
||||
s.run_state = self.ITERATING_COMPLETE
|
||||
elif s.run_state == self.ITERATING_TASKS:
|
||||
s.fail_state |= self.FAILED_TASKS
|
||||
s.run_state = self.ITERATING_RESCUE
|
||||
elif s.run_state == self.ITERATING_RESCUE:
|
||||
s.fail_state |= self.FAILED_RESCUE
|
||||
s.run_state = self.ITERATING_ALWAYS
|
||||
elif s.run_state == self.ITERATING_ALWAYS:
|
||||
s.fail_state |= self.FAILED_ALWAYS
|
||||
s.run_state = self.ITERATING_COMPLETE
|
||||
self._host_states[host.name] = s
|
||||
|
||||
def get_failed_hosts(self):
|
||||
return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
|
||||
|
||||
def get_original_task(self, host, task):
|
||||
'''
|
||||
Finds the task in the task list which matches the UUID of the given task.
|
||||
The executor engine serializes/deserializes objects as they are passed through
|
||||
the different processes, and not all data structures are preserved. This method
|
||||
allows us to find the original task passed into the executor engine.
|
||||
'''
|
||||
def _search_block(block, task):
|
||||
for t in block.block:
|
||||
if isinstance(t, Block):
|
||||
res = _search_block(t, task)
|
||||
if res:
|
||||
return res
|
||||
elif t._uuid == task._uuid:
|
||||
return t
|
||||
for t in block.rescue:
|
||||
if isinstance(t, Block):
|
||||
res = _search_block(t, task)
|
||||
if res:
|
||||
return res
|
||||
elif t._uuid == task._uuid:
|
||||
return t
|
||||
for t in block.always:
|
||||
if isinstance(t, Block):
|
||||
res = _search_block(t, task)
|
||||
if res:
|
||||
return res
|
||||
elif t._uuid == task._uuid:
|
||||
return t
|
||||
return None
|
||||
|
||||
s = self.get_host_state(host)
|
||||
for block in s._blocks:
|
||||
res = _search_block(block, task)
|
||||
if res:
|
||||
return res
|
||||
|
||||
return None
|
||||
|
||||
def add_tasks(self, host, task_list):
|
||||
s = self.get_host_state(host)
|
||||
target_block = s._blocks[s.cur_block].copy(exclude_parent=True)
|
||||
|
||||
if s.run_state == self.ITERATING_TASKS:
|
||||
before = target_block.block[:s.cur_regular_task]
|
||||
after = target_block.block[s.cur_regular_task:]
|
||||
target_block.block = before + task_list + after
|
||||
elif s.run_state == self.ITERATING_RESCUE:
|
||||
before = target_block.rescue[:s.cur_rescue_task]
|
||||
after = target_block.rescue[s.cur_rescue_task:]
|
||||
target_block.rescue = before + task_list + after
|
||||
elif s.run_state == self.ITERATING_ALWAYS:
|
||||
before = target_block.always[:s.cur_always_task]
|
||||
after = target_block.always[s.cur_always_task:]
|
||||
target_block.always = before + task_list + after
|
||||
|
||||
s._blocks[s.cur_block] = target_block
|
||||
self._host_states[host.name] = s
|
||||
|
||||
211
lib/ansible/executor/playbook_executor.py
Normal file
211
lib/ansible/executor/playbook_executor.py
Normal file
@@ -0,0 +1,211 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import signal
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import *
|
||||
from ansible.executor.task_queue_manager import TaskQueueManager
|
||||
from ansible.playbook import Playbook
|
||||
from ansible.template import Templar
|
||||
|
||||
from ansible.utils.color import colorize, hostcolor
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
class PlaybookExecutor:
|
||||
|
||||
'''
|
||||
This is the primary class for executing playbooks, and thus the
|
||||
basis for bin/ansible-playbook operation.
|
||||
'''
|
||||
|
||||
def __init__(self, playbooks, inventory, variable_manager, loader, display, options, passwords):
|
||||
self._playbooks = playbooks
|
||||
self._inventory = inventory
|
||||
self._variable_manager = variable_manager
|
||||
self._loader = loader
|
||||
self._display = display
|
||||
self._options = options
|
||||
self.passwords = passwords
|
||||
|
||||
if options.listhosts or options.listtasks or options.listtags:
|
||||
self._tqm = None
|
||||
else:
|
||||
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords)
|
||||
|
||||
def run(self):
|
||||
|
||||
'''
|
||||
Run the given playbook, based on the settings in the play which
|
||||
may limit the runs to serialized groups, etc.
|
||||
'''
|
||||
|
||||
signal.signal(signal.SIGINT, self._cleanup)
|
||||
|
||||
result = 0
|
||||
entrylist = []
|
||||
entry = {}
|
||||
try:
|
||||
for playbook_path in self._playbooks:
|
||||
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
|
||||
|
||||
if self._tqm is None: # we are doing a listing
|
||||
entry = {'playbook': playbook_path}
|
||||
entry['plays'] = []
|
||||
|
||||
i = 1
|
||||
plays = pb.get_plays()
|
||||
self._display.vv('%d plays in %s' % (len(plays), playbook_path))
|
||||
|
||||
for play in plays:
|
||||
self._inventory.remove_restriction()
|
||||
|
||||
# Create a temporary copy of the play here, so we can run post_validate
|
||||
# on it without the templating changes affecting the original object.
|
||||
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
|
||||
templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False)
|
||||
new_play = play.copy()
|
||||
new_play.post_validate(templar)
|
||||
|
||||
if self._tqm is None:
|
||||
# we are just doing a listing
|
||||
|
||||
pname = new_play.get_name().strip()
|
||||
if pname == 'PLAY: <no name specified>':
|
||||
pname = 'PLAY: #%d' % i
|
||||
p = { 'name': pname }
|
||||
|
||||
if self._options.listhosts:
|
||||
p['pattern']=play.hosts
|
||||
p['hosts']=set(self._inventory.get_hosts(new_play.hosts))
|
||||
|
||||
#TODO: play tasks are really blocks, need to figure out how to get task objects from them
|
||||
elif self._options.listtasks:
|
||||
p['tasks'] = []
|
||||
for task in play.get_tasks():
|
||||
p['tasks'].append(task)
|
||||
#p['tasks'].append({'name': task.get_name().strip(), 'tags': task.tags})
|
||||
|
||||
elif self._options.listtags:
|
||||
p['tags'] = set(new_play.tags)
|
||||
for task in play.get_tasks():
|
||||
p['tags'].update(task)
|
||||
#p['tags'].update(task.tags)
|
||||
entry['plays'].append(p)
|
||||
|
||||
else:
|
||||
# we are actually running plays
|
||||
for batch in self._get_serialized_batches(new_play):
|
||||
if len(batch) == 0:
|
||||
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
|
||||
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
|
||||
result = 0
|
||||
break
|
||||
# restrict the inventory to the hosts in the serialized batch
|
||||
self._inventory.restrict_to_hosts(batch)
|
||||
# and run it...
|
||||
result = self._tqm.run(play=play)
|
||||
if result != 0:
|
||||
break
|
||||
|
||||
if result != 0:
|
||||
break
|
||||
|
||||
i = i + 1 # per play
|
||||
|
||||
if entry:
|
||||
entrylist.append(entry) # per playbook
|
||||
|
||||
if entrylist:
|
||||
return entrylist
|
||||
|
||||
finally:
|
||||
if self._tqm is not None:
|
||||
self._cleanup()
|
||||
|
||||
# FIXME: this stat summary stuff should be cleaned up and moved
|
||||
# to a new method, if it even belongs here...
|
||||
self._display.banner("PLAY RECAP")
|
||||
|
||||
hosts = sorted(self._tqm._stats.processed.keys())
|
||||
for h in hosts:
|
||||
t = self._tqm._stats.summarize(h)
|
||||
|
||||
self._display.display("%s : %s %s %s %s" % (
|
||||
hostcolor(h, t),
|
||||
colorize('ok', t['ok'], 'green'),
|
||||
colorize('changed', t['changed'], 'yellow'),
|
||||
colorize('unreachable', t['unreachable'], 'red'),
|
||||
colorize('failed', t['failures'], 'red')),
|
||||
screen_only=True
|
||||
)
|
||||
|
||||
self._display.display("%s : %s %s %s %s" % (
|
||||
hostcolor(h, t, False),
|
||||
colorize('ok', t['ok'], None),
|
||||
colorize('changed', t['changed'], None),
|
||||
colorize('unreachable', t['unreachable'], None),
|
||||
colorize('failed', t['failures'], None)),
|
||||
log_only=True
|
||||
)
|
||||
|
||||
self._display.display("", screen_only=True)
|
||||
# END STATS STUFF
|
||||
|
||||
return result
|
||||
|
||||
def _cleanup(self, signum=None, framenum=None):
|
||||
return self._tqm.cleanup()
|
||||
|
||||
def _get_serialized_batches(self, play):
|
||||
'''
|
||||
Returns a list of hosts, subdivided into batches based on
|
||||
the serial size specified in the play.
|
||||
'''
|
||||
|
||||
# make sure we have a unique list of hosts
|
||||
all_hosts = self._inventory.get_hosts(play.hosts)
|
||||
|
||||
# check to see if the serial number was specified as a percentage,
|
||||
# and convert it to an integer value based on the number of hosts
|
||||
if isinstance(play.serial, basestring) and play.serial.endswith('%'):
|
||||
serial_pct = int(play.serial.replace("%",""))
|
||||
serial = int((serial_pct/100.0) * len(all_hosts))
|
||||
else:
|
||||
serial = int(play.serial)
|
||||
|
||||
# if the serial count was not specified or is invalid, default to
|
||||
# a list of all hosts, otherwise split the list of hosts into chunks
|
||||
# which are based on the serial size
|
||||
if serial <= 0:
|
||||
return [all_hosts]
|
||||
else:
|
||||
serialized_batches = []
|
||||
|
||||
while len(all_hosts) > 0:
|
||||
play_hosts = []
|
||||
for x in range(serial):
|
||||
if len(all_hosts) > 0:
|
||||
play_hosts.append(all_hosts.pop(0))
|
||||
|
||||
serialized_batches.append(play_hosts)
|
||||
|
||||
return serialized_batches
|
||||
21
lib/ansible/executor/process/__init__.py
Normal file
21
lib/ansible/executor/process/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
176
lib/ansible/executor/process/result.py
Normal file
176
lib/ansible/executor/process/result.py
Normal file
@@ -0,0 +1,176 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from six.moves import queue
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
HAS_ATFORK=True
|
||||
try:
|
||||
from Crypto.Random import atfork
|
||||
except ImportError:
|
||||
HAS_ATFORK=False
|
||||
|
||||
from ansible.playbook.handler import Handler
|
||||
from ansible.playbook.task import Task
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
__all__ = ['ResultProcess']
|
||||
|
||||
|
||||
class ResultProcess(multiprocessing.Process):
|
||||
'''
|
||||
The result worker thread, which reads results from the results
|
||||
queue and fires off callbacks/etc. as necessary.
|
||||
'''
|
||||
|
||||
def __init__(self, final_q, workers):
|
||||
|
||||
# takes a task queue manager as the sole param:
|
||||
self._final_q = final_q
|
||||
self._workers = workers
|
||||
self._cur_worker = 0
|
||||
self._terminated = False
|
||||
|
||||
super(ResultProcess, self).__init__()
|
||||
|
||||
def _send_result(self, result):
|
||||
debug("sending result: %s" % (result,))
|
||||
self._final_q.put(result, block=False)
|
||||
debug("done sending result")
|
||||
|
||||
def _read_worker_result(self):
|
||||
result = None
|
||||
starting_point = self._cur_worker
|
||||
while True:
|
||||
(worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
|
||||
self._cur_worker += 1
|
||||
if self._cur_worker >= len(self._workers):
|
||||
self._cur_worker = 0
|
||||
|
||||
try:
|
||||
if not rslt_q.empty():
|
||||
debug("worker %d has data to read" % self._cur_worker)
|
||||
result = rslt_q.get(block=False)
|
||||
debug("got a result from worker %d: %s" % (self._cur_worker, result))
|
||||
break
|
||||
except queue.Empty:
|
||||
pass
|
||||
|
||||
if self._cur_worker == starting_point:
|
||||
break
|
||||
|
||||
return result
|
||||
|
||||
def terminate(self):
|
||||
self._terminated = True
|
||||
super(ResultProcess, self).terminate()
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
The main thread execution, which reads from the results queue
|
||||
indefinitely and sends callbacks/etc. when results are received.
|
||||
'''
|
||||
|
||||
if HAS_ATFORK:
|
||||
atfork()
|
||||
|
||||
while True:
|
||||
try:
|
||||
result = self._read_worker_result()
|
||||
if result is None:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
host_name = result._host.get_name()
|
||||
|
||||
# send callbacks, execute other options based on the result status
|
||||
# FIXME: this should all be cleaned up and probably moved to a sub-function.
|
||||
# the fact that this sometimes sends a TaskResult and other times
|
||||
# sends a raw dictionary back may be confusing, but the result vs.
|
||||
# results implementation for tasks with loops should be cleaned up
|
||||
# better than this
|
||||
if result.is_unreachable():
|
||||
self._send_result(('host_unreachable', result))
|
||||
elif result.is_failed():
|
||||
self._send_result(('host_task_failed', result))
|
||||
elif result.is_skipped():
|
||||
self._send_result(('host_task_skipped', result))
|
||||
else:
|
||||
# if this task is notifying a handler, do it now
|
||||
if result._task.notify:
|
||||
# The shared dictionary for notified handlers is a proxy, which
|
||||
# does not detect when sub-objects within the proxy are modified.
|
||||
# So, per the docs, we reassign the list so the proxy picks up and
|
||||
# notifies all other threads
|
||||
for notify in result._task.notify:
|
||||
self._send_result(('notify_handler', result._host, notify))
|
||||
|
||||
if result._task.loop:
|
||||
# this task had a loop, and has more than one result, so
|
||||
# loop over all of them instead of a single result
|
||||
result_items = result._result['results']
|
||||
else:
|
||||
result_items = [ result._result ]
|
||||
|
||||
for result_item in result_items:
|
||||
#if 'include' in result_item:
|
||||
# include_variables = result_item.get('include_variables', dict())
|
||||
# if 'item' in result_item:
|
||||
# include_variables['item'] = result_item['item']
|
||||
# self._send_result(('include', result._host, result._task, result_item['include'], include_variables))
|
||||
#elif 'add_host' in result_item:
|
||||
if 'add_host' in result_item:
|
||||
# this task added a new host (add_host module)
|
||||
self._send_result(('add_host', result_item))
|
||||
elif 'add_group' in result_item:
|
||||
# this task added a new group (group_by module)
|
||||
self._send_result(('add_group', result._host, result_item))
|
||||
elif 'ansible_facts' in result_item:
|
||||
# if this task is registering facts, do that now
|
||||
if result._task.action in ('set_fact', 'include_vars'):
|
||||
for (key, value) in result_item['ansible_facts'].iteritems():
|
||||
self._send_result(('set_host_var', result._host, key, value))
|
||||
else:
|
||||
self._send_result(('set_host_facts', result._host, result_item['ansible_facts']))
|
||||
|
||||
# finally, send the ok for this task
|
||||
self._send_result(('host_task_ok', result))
|
||||
|
||||
# if this task is registering a result, do it now
|
||||
if result._task.register:
|
||||
self._send_result(('set_host_var', result._host, result._task.register, result._result))
|
||||
|
||||
except queue.Empty:
|
||||
pass
|
||||
except (KeyboardInterrupt, IOError, EOFError):
|
||||
break
|
||||
except:
|
||||
# FIXME: we should probably send a proper callback here instead of
|
||||
# simply dumping a stack trace on the screen
|
||||
traceback.print_exc()
|
||||
break
|
||||
|
||||
155
lib/ansible/executor/process/worker.py
Normal file
155
lib/ansible/executor/process/worker.py
Normal file
@@ -0,0 +1,155 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from six.moves import queue
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
HAS_ATFORK=True
|
||||
try:
|
||||
from Crypto.Random import atfork
|
||||
except ImportError:
|
||||
HAS_ATFORK=False
|
||||
|
||||
from ansible.errors import AnsibleError, AnsibleConnectionFailure
|
||||
from ansible.executor.task_executor import TaskExecutor
|
||||
from ansible.executor.task_result import TaskResult
|
||||
from ansible.playbook.handler import Handler
|
||||
from ansible.playbook.task import Task
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
__all__ = ['WorkerProcess']
|
||||
|
||||
|
||||
class WorkerProcess(multiprocessing.Process):
|
||||
'''
|
||||
The worker thread class, which uses TaskExecutor to run tasks
|
||||
read from a job queue and pushes results into a results queue
|
||||
for reading later.
|
||||
'''
|
||||
|
||||
def __init__(self, tqm, main_q, rslt_q, loader):
|
||||
|
||||
# takes a task queue manager as the sole param:
|
||||
self._main_q = main_q
|
||||
self._rslt_q = rslt_q
|
||||
self._loader = loader
|
||||
|
||||
# dupe stdin, if we have one
|
||||
self._new_stdin = sys.stdin
|
||||
try:
|
||||
fileno = sys.stdin.fileno()
|
||||
if fileno is not None:
|
||||
try:
|
||||
self._new_stdin = os.fdopen(os.dup(fileno))
|
||||
except OSError, e:
|
||||
# couldn't dupe stdin, most likely because it's
|
||||
# not a valid file descriptor, so we just rely on
|
||||
# using the one that was passed in
|
||||
pass
|
||||
except ValueError:
|
||||
# couldn't get stdin's fileno, so we just carry on
|
||||
pass
|
||||
|
||||
super(WorkerProcess, self).__init__()
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
Called when the process is started, and loops indefinitely
|
||||
until an error is encountered (typically an IOerror from the
|
||||
queue pipe being disconnected). During the loop, we attempt
|
||||
to pull tasks off the job queue and run them, pushing the result
|
||||
onto the results queue. We also remove the host from the blocked
|
||||
hosts list, to signify that they are ready for their next task.
|
||||
'''
|
||||
|
||||
if HAS_ATFORK:
|
||||
atfork()
|
||||
|
||||
while True:
|
||||
task = None
|
||||
try:
|
||||
if not self._main_q.empty():
|
||||
debug("there's work to be done!")
|
||||
(host, task, basedir, job_vars, connection_info, shared_loader_obj) = self._main_q.get(block=False)
|
||||
debug("got a task/handler to work on: %s" % task)
|
||||
|
||||
# because the task queue manager starts workers (forks) before the
|
||||
# playbook is loaded, set the basedir of the loader inherted by
|
||||
# this fork now so that we can find files correctly
|
||||
self._loader.set_basedir(basedir)
|
||||
|
||||
# Serializing/deserializing tasks does not preserve the loader attribute,
|
||||
# since it is passed to the worker during the forking of the process and
|
||||
# would be wasteful to serialize. So we set it here on the task now, and
|
||||
# the task handles updating parent/child objects as needed.
|
||||
task.set_loader(self._loader)
|
||||
|
||||
# apply the given task's information to the connection info,
|
||||
# which may override some fields already set by the play or
|
||||
# the options specified on the command line
|
||||
new_connection_info = connection_info.set_task_override(task)
|
||||
|
||||
# execute the task and build a TaskResult from the result
|
||||
debug("running TaskExecutor() for %s/%s" % (host, task))
|
||||
executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._new_stdin, self._loader, shared_loader_obj).run()
|
||||
debug("done running TaskExecutor() for %s/%s" % (host, task))
|
||||
task_result = TaskResult(host, task, executor_result)
|
||||
|
||||
# put the result on the result queue
|
||||
debug("sending task result")
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
debug("done sending task result")
|
||||
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
|
||||
except queue.Empty:
|
||||
pass
|
||||
except (IOError, EOFError, KeyboardInterrupt):
|
||||
break
|
||||
except AnsibleConnectionFailure:
|
||||
try:
|
||||
if task:
|
||||
task_result = TaskResult(host, task, dict(unreachable=True))
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
except:
|
||||
# FIXME: most likely an abort, catch those kinds of errors specifically
|
||||
break
|
||||
except Exception, e:
|
||||
debug("WORKER EXCEPTION: %s" % e)
|
||||
debug("WORKER EXCEPTION: %s" % traceback.format_exc())
|
||||
try:
|
||||
if task:
|
||||
task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
|
||||
self._rslt_q.put(task_result, block=False)
|
||||
except:
|
||||
# FIXME: most likely an abort, catch those kinds of errors specifically
|
||||
break
|
||||
|
||||
debug("WORKER PROCESS EXITING")
|
||||
|
||||
|
||||
51
lib/ansible/executor/stats.py
Normal file
51
lib/ansible/executor/stats.py
Normal file
@@ -0,0 +1,51 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
class AggregateStats:
|
||||
''' holds stats about per-host activity during playbook runs '''
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.processed = {}
|
||||
self.failures = {}
|
||||
self.ok = {}
|
||||
self.dark = {}
|
||||
self.changed = {}
|
||||
self.skipped = {}
|
||||
|
||||
def increment(self, what, host):
|
||||
''' helper function to bump a statistic '''
|
||||
|
||||
self.processed[host] = 1
|
||||
prev = (getattr(self, what)).get(host, 0)
|
||||
getattr(self, what)[host] = prev+1
|
||||
|
||||
def summarize(self, host):
|
||||
''' return information about a particular host '''
|
||||
|
||||
return dict(
|
||||
ok = self.ok.get(host, 0),
|
||||
failures = self.failures.get(host, 0),
|
||||
unreachable = self.dark.get(host,0),
|
||||
changed = self.changed.get(host, 0),
|
||||
skipped = self.skipped.get(host, 0)
|
||||
)
|
||||
|
||||
454
lib/ansible/executor/task_executor.py
Normal file
454
lib/ansible/executor/task_executor.py
Normal file
@@ -0,0 +1,454 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import json
|
||||
import pipes
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError, AnsibleParserError
|
||||
from ansible.executor.connection_info import ConnectionInformation
|
||||
from ansible.playbook.conditional import Conditional
|
||||
from ansible.playbook.task import Task
|
||||
from ansible.plugins import lookup_loader, connection_loader, action_loader
|
||||
from ansible.template import Templar
|
||||
from ansible.utils.listify import listify_lookup_plugin_terms
|
||||
from ansible.utils.unicode import to_unicode
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
__all__ = ['TaskExecutor']
|
||||
|
||||
class TaskExecutor:
|
||||
|
||||
'''
|
||||
This is the main worker class for the executor pipeline, which
|
||||
handles loading an action plugin to actually dispatch the task to
|
||||
a given host. This class roughly corresponds to the old Runner()
|
||||
class.
|
||||
'''
|
||||
|
||||
def __init__(self, host, task, job_vars, connection_info, new_stdin, loader, shared_loader_obj):
|
||||
self._host = host
|
||||
self._task = task
|
||||
self._job_vars = job_vars
|
||||
self._connection_info = connection_info
|
||||
self._new_stdin = new_stdin
|
||||
self._loader = loader
|
||||
self._shared_loader_obj = shared_loader_obj
|
||||
|
||||
def run(self):
|
||||
'''
|
||||
The main executor entrypoint, where we determine if the specified
|
||||
task requires looping and either runs the task with
|
||||
'''
|
||||
|
||||
debug("in run()")
|
||||
|
||||
try:
|
||||
# lookup plugins need to know if this task is executing from
|
||||
# a role, so that it can properly find files/templates/etc.
|
||||
roledir = None
|
||||
if self._task._role:
|
||||
roledir = self._task._role._role_path
|
||||
self._job_vars['roledir'] = roledir
|
||||
|
||||
items = self._get_loop_items()
|
||||
if items is not None:
|
||||
if len(items) > 0:
|
||||
item_results = self._run_loop(items)
|
||||
|
||||
# loop through the item results, and remember the changed/failed
|
||||
# result flags based on any item there.
|
||||
changed = False
|
||||
failed = False
|
||||
for item in item_results:
|
||||
if 'changed' in item:
|
||||
changed = True
|
||||
if 'failed' in item:
|
||||
failed = True
|
||||
|
||||
# create the overall result item, and set the changed/failed
|
||||
# flags there to reflect the overall result of the loop
|
||||
res = dict(results=item_results)
|
||||
|
||||
if changed:
|
||||
res['changed'] = True
|
||||
|
||||
if failed:
|
||||
res['failed'] = True
|
||||
res['msg'] = 'One or more items failed'
|
||||
else:
|
||||
res['msg'] = 'All items completed'
|
||||
else:
|
||||
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
|
||||
else:
|
||||
debug("calling self._execute()")
|
||||
res = self._execute()
|
||||
debug("_execute() done")
|
||||
|
||||
# make sure changed is set in the result, if it's not present
|
||||
if 'changed' not in res:
|
||||
res['changed'] = False
|
||||
|
||||
debug("dumping result to json")
|
||||
result = json.dumps(res)
|
||||
debug("done dumping result, returning")
|
||||
return result
|
||||
except AnsibleError, e:
|
||||
return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
|
||||
|
||||
def _get_loop_items(self):
|
||||
'''
|
||||
Loads a lookup plugin to handle the with_* portion of a task (if specified),
|
||||
and returns the items result.
|
||||
'''
|
||||
|
||||
items = None
|
||||
if self._task.loop and self._task.loop in lookup_loader:
|
||||
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, variables=self._job_vars, loader=self._loader)
|
||||
items = lookup_loader.get(self._task.loop, loader=self._loader).run(terms=loop_terms, variables=self._job_vars)
|
||||
|
||||
return items
|
||||
|
||||
def _run_loop(self, items):
|
||||
'''
|
||||
Runs the task with the loop items specified and collates the result
|
||||
into an array named 'results' which is inserted into the final result
|
||||
along with the item for which the loop ran.
|
||||
'''
|
||||
|
||||
results = []
|
||||
|
||||
# make copies of the job vars and task so we can add the item to
|
||||
# the variables and re-validate the task with the item variable
|
||||
task_vars = self._job_vars.copy()
|
||||
|
||||
items = self._squash_items(items, task_vars)
|
||||
for item in items:
|
||||
task_vars['item'] = item
|
||||
|
||||
try:
|
||||
tmp_task = self._task.copy()
|
||||
except AnsibleParserError, e:
|
||||
results.append(dict(failed=True, msg=str(e)))
|
||||
continue
|
||||
|
||||
# now we swap the internal task with the copy, execute,
|
||||
# and swap them back so we can do the next iteration cleanly
|
||||
(self._task, tmp_task) = (tmp_task, self._task)
|
||||
res = self._execute(variables=task_vars)
|
||||
(self._task, tmp_task) = (tmp_task, self._task)
|
||||
|
||||
# now update the result with the item info, and append the result
|
||||
# to the list of results
|
||||
res['item'] = item
|
||||
results.append(res)
|
||||
|
||||
# FIXME: we should be sending back a callback result for each item in the loop here
|
||||
print(res)
|
||||
|
||||
return results
|
||||
|
||||
def _squash_items(self, items, variables):
|
||||
'''
|
||||
Squash items down to a comma-separated list for certain modules which support it
|
||||
(typically package management modules).
|
||||
'''
|
||||
|
||||
if len(items) > 0 and self._task.action in ('apt', 'yum', 'pkgng', 'zypper'):
|
||||
final_items = []
|
||||
for item in items:
|
||||
variables['item'] = item
|
||||
if self._task.evaluate_conditional(variables):
|
||||
final_items.append(item)
|
||||
return [",".join(final_items)]
|
||||
else:
|
||||
return items
|
||||
|
||||
def _execute(self, variables=None):
|
||||
'''
|
||||
The primary workhorse of the executor system, this runs the task
|
||||
on the specified host (which may be the delegated_to host) and handles
|
||||
the retry/until and block rescue/always execution
|
||||
'''
|
||||
|
||||
if variables is None:
|
||||
variables = self._job_vars
|
||||
|
||||
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
|
||||
|
||||
# fields set from the play/task may be based on variables, so we have to
|
||||
# do the same kind of post validation step on it here before we use it.
|
||||
self._connection_info.post_validate(templar=templar)
|
||||
|
||||
# now that the connection information is finalized, we can add 'magic'
|
||||
# variables to the variable dictionary
|
||||
self._connection_info.update_vars(variables)
|
||||
|
||||
# get the connection and the handler for this execution
|
||||
self._connection = self._get_connection(variables)
|
||||
self._handler = self._get_action_handler(connection=self._connection)
|
||||
|
||||
# Evaluate the conditional (if any) for this task, which we do before running
|
||||
# the final task post-validation. We do this before the post validation due to
|
||||
# the fact that the conditional may specify that the task be skipped due to a
|
||||
# variable not being present which would otherwise cause validation to fail
|
||||
if not self._task.evaluate_conditional(variables):
|
||||
debug("when evaulation failed, skipping this task")
|
||||
return dict(changed=False, skipped=True, skip_reason='Conditional check failed')
|
||||
|
||||
# Now we do final validation on the task, which sets all fields to their final values
|
||||
self._task.post_validate(templar=templar)
|
||||
|
||||
# if this task is a TaskInclude, we just return now with a success code so the
|
||||
# main thread can expand the task list for the given host
|
||||
if self._task.action == 'include':
|
||||
include_variables = self._task.args.copy()
|
||||
include_file = include_variables.get('_raw_params')
|
||||
del include_variables['_raw_params']
|
||||
return dict(changed=True, include=include_file, include_variables=include_variables)
|
||||
|
||||
# And filter out any fields which were set to default(omit), and got the omit token value
|
||||
omit_token = variables.get('omit')
|
||||
if omit_token is not None:
|
||||
self._task.args = dict(filter(lambda x: x[1] != omit_token, self._task.args.iteritems()))
|
||||
|
||||
# Read some values from the task, so that we can modify them if need be
|
||||
retries = self._task.retries
|
||||
if retries <= 0:
|
||||
retries = 1
|
||||
|
||||
delay = self._task.delay
|
||||
if delay < 0:
|
||||
delay = 1
|
||||
|
||||
# make a copy of the job vars here, in case we need to update them
|
||||
# with the registered variable value later on when testing conditions
|
||||
vars_copy = variables.copy()
|
||||
|
||||
debug("starting attempt loop")
|
||||
result = None
|
||||
for attempt in range(retries):
|
||||
if attempt > 0:
|
||||
# FIXME: this should use the callback/message passing mechanism
|
||||
print("FAILED - RETRYING: %s (%d retries left)" % (self._task, retries-attempt))
|
||||
result['attempts'] = attempt + 1
|
||||
|
||||
debug("running the handler")
|
||||
result = self._handler.run(task_vars=variables)
|
||||
debug("handler run complete")
|
||||
|
||||
if self._task.async > 0:
|
||||
# the async_wrapper module returns dumped JSON via its stdout
|
||||
# response, so we parse it here and replace the result
|
||||
try:
|
||||
result = json.loads(result.get('stdout'))
|
||||
except ValueError, e:
|
||||
return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e))
|
||||
|
||||
if self._task.poll > 0:
|
||||
result = self._poll_async_result(result=result)
|
||||
|
||||
# update the local copy of vars with the registered value, if specified,
|
||||
# or any facts which may have been generated by the module execution
|
||||
if self._task.register:
|
||||
vars_copy[self._task.register] = result
|
||||
|
||||
if 'ansible_facts' in result:
|
||||
vars_copy.update(result['ansible_facts'])
|
||||
|
||||
# create a conditional object to evaluate task conditions
|
||||
cond = Conditional(loader=self._loader)
|
||||
|
||||
# FIXME: make sure until is mutually exclusive with changed_when/failed_when
|
||||
if self._task.until:
|
||||
cond.when = self._task.until
|
||||
if cond.evaluate_conditional(vars_copy):
|
||||
break
|
||||
elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result:
|
||||
if self._task.changed_when:
|
||||
cond.when = [ self._task.changed_when ]
|
||||
result['changed'] = cond.evaluate_conditional(vars_copy)
|
||||
if self._task.failed_when:
|
||||
cond.when = [ self._task.failed_when ]
|
||||
failed_when_result = cond.evaluate_conditional(vars_copy)
|
||||
result['failed_when_result'] = result['failed'] = failed_when_result
|
||||
if failed_when_result:
|
||||
break
|
||||
elif 'failed' not in result and result.get('rc', 0) == 0:
|
||||
# if the result is not failed, stop trying
|
||||
break
|
||||
|
||||
if attempt < retries - 1:
|
||||
time.sleep(delay)
|
||||
|
||||
# do the final update of the local variables here, for both registered
|
||||
# values and any facts which may have been created
|
||||
if self._task.register:
|
||||
variables[self._task.register] = result
|
||||
|
||||
if 'ansible_facts' in result:
|
||||
variables.update(result['ansible_facts'])
|
||||
|
||||
# and return
|
||||
debug("attempt loop complete, returning result")
|
||||
return result
|
||||
|
||||
def _poll_async_result(self, result):
|
||||
'''
|
||||
Polls for the specified JID to be complete
|
||||
'''
|
||||
|
||||
async_jid = result.get('ansible_job_id')
|
||||
if async_jid is None:
|
||||
return dict(failed=True, msg="No job id was returned by the async task")
|
||||
|
||||
# Create a new psuedo-task to run the async_status module, and run
|
||||
# that (with a sleep for "poll" seconds between each retry) until the
|
||||
# async time limit is exceeded.
|
||||
|
||||
async_task = Task().load(dict(action='async_status jid=%s' % async_jid))
|
||||
|
||||
# Because this is an async task, the action handler is async. However,
|
||||
# we need the 'normal' action handler for the status check, so get it
|
||||
# now via the action_loader
|
||||
normal_handler = action_loader.get(
|
||||
'normal',
|
||||
task=async_task,
|
||||
connection=self._connection,
|
||||
connection_info=self._connection_info,
|
||||
loader=self._loader,
|
||||
shared_loader_obj=self._shared_loader_obj,
|
||||
)
|
||||
|
||||
time_left = self._task.async
|
||||
while time_left > 0:
|
||||
time.sleep(self._task.poll)
|
||||
|
||||
async_result = normal_handler.run()
|
||||
if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result:
|
||||
break
|
||||
|
||||
time_left -= self._task.poll
|
||||
|
||||
if int(async_result.get('finished', 0)) != 1:
|
||||
return dict(failed=True, msg="async task did not complete within the requested time")
|
||||
else:
|
||||
return async_result
|
||||
|
||||
def _get_connection(self, variables):
|
||||
'''
|
||||
Reads the connection property for the host, and returns the
|
||||
correct connection object from the list of connection plugins
|
||||
'''
|
||||
|
||||
# FIXME: delegate_to calculation should be done here
|
||||
# FIXME: calculation of connection params/auth stuff should be done here
|
||||
|
||||
self._connection_info.remote_addr = self._host.ipv4_address
|
||||
if self._task.delegate_to is not None:
|
||||
self._compute_delegate(variables)
|
||||
|
||||
conn_type = self._connection_info.connection
|
||||
if conn_type == 'smart':
|
||||
conn_type = 'ssh'
|
||||
if sys.platform.startswith('darwin') and self._connection_info.remote_pass:
|
||||
# due to a current bug in sshpass on OSX, which can trigger
|
||||
# a kernel panic even for non-privileged users, we revert to
|
||||
# paramiko on that OS when a SSH password is specified
|
||||
conn_type = "paramiko"
|
||||
else:
|
||||
# see if SSH can support ControlPersist if not use paramiko
|
||||
cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
(out, err) = cmd.communicate()
|
||||
if "Bad configuration option" in err:
|
||||
conn_type = "paramiko"
|
||||
|
||||
connection = connection_loader.get(conn_type, self._connection_info, self._new_stdin)
|
||||
if not connection:
|
||||
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
|
||||
|
||||
return connection
|
||||
|
||||
def _get_action_handler(self, connection):
|
||||
'''
|
||||
Returns the correct action plugin to handle the requestion task action
|
||||
'''
|
||||
|
||||
if self._task.action in action_loader:
|
||||
if self._task.async != 0:
|
||||
raise AnsibleError("async mode is not supported with the %s module" % module_name)
|
||||
handler_name = self._task.action
|
||||
elif self._task.async == 0:
|
||||
handler_name = 'normal'
|
||||
else:
|
||||
handler_name = 'async'
|
||||
|
||||
handler = action_loader.get(
|
||||
handler_name,
|
||||
task=self._task,
|
||||
connection=connection,
|
||||
connection_info=self._connection_info,
|
||||
loader=self._loader,
|
||||
shared_loader_obj=self._shared_loader_obj,
|
||||
)
|
||||
|
||||
if not handler:
|
||||
raise AnsibleError("the handler '%s' was not found" % handler_name)
|
||||
|
||||
return handler
|
||||
|
||||
def _compute_delegate(self, variables):
|
||||
|
||||
# get the vars for the delegate by its name
|
||||
try:
|
||||
this_info = variables['hostvars'][self._task.delegate_to]
|
||||
except:
|
||||
# make sure the inject is empty for non-inventory hosts
|
||||
this_info = {}
|
||||
|
||||
# get the real ssh_address for the delegate and allow ansible_ssh_host to be templated
|
||||
#self._connection_info.remote_user = self._compute_delegate_user(self.delegate_to, delegate['inject'])
|
||||
self._connection_info.remote_addr = this_info.get('ansible_ssh_host', self._task.delegate_to)
|
||||
self._connection_info.port = this_info.get('ansible_ssh_port', self._connection_info.port)
|
||||
self._connection_info.password = this_info.get('ansible_ssh_pass', self._connection_info.password)
|
||||
self._connection_info.private_key_file = this_info.get('ansible_ssh_private_key_file', self._connection_info.private_key_file)
|
||||
self._connection_info.connection = this_info.get('ansible_connection', self._connection_info.connection)
|
||||
self._connection_info.become_pass = this_info.get('ansible_sudo_pass', self._connection_info.become_pass)
|
||||
|
||||
if self._connection_info.remote_addr in ('127.0.0.1', 'localhost'):
|
||||
self._connection_info.connection = 'local'
|
||||
|
||||
# Last chance to get private_key_file from global variables.
|
||||
# this is useful if delegated host is not defined in the inventory
|
||||
#if delegate['private_key_file'] is None:
|
||||
# delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
|
||||
|
||||
#if delegate['private_key_file'] is not None:
|
||||
# delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
|
||||
|
||||
for i in this_info:
|
||||
if i.startswith("ansible_") and i.endswith("_interpreter"):
|
||||
variables[i] = this_info[i]
|
||||
|
||||
233
lib/ansible/executor/task_queue_manager.py
Normal file
233
lib/ansible/executor/task_queue_manager.py
Normal file
@@ -0,0 +1,233 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.executor.connection_info import ConnectionInformation
|
||||
from ansible.executor.play_iterator import PlayIterator
|
||||
from ansible.executor.process.worker import WorkerProcess
|
||||
from ansible.executor.process.result import ResultProcess
|
||||
from ansible.executor.stats import AggregateStats
|
||||
from ansible.plugins import callback_loader, strategy_loader
|
||||
from ansible.template import Templar
|
||||
|
||||
from ansible.utils.debug import debug
|
||||
|
||||
__all__ = ['TaskQueueManager']
|
||||
|
||||
class TaskQueueManager:
|
||||
|
||||
'''
|
||||
This class handles the multiprocessing requirements of Ansible by
|
||||
creating a pool of worker forks, a result handler fork, and a
|
||||
manager object with shared datastructures/queues for coordinating
|
||||
work between all processes.
|
||||
|
||||
The queue manager is responsible for loading the play strategy plugin,
|
||||
which dispatches the Play's tasks to hosts.
|
||||
'''
|
||||
|
||||
def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None):
|
||||
|
||||
self._inventory = inventory
|
||||
self._variable_manager = variable_manager
|
||||
self._loader = loader
|
||||
self._display = display
|
||||
self._options = options
|
||||
self._stats = AggregateStats()
|
||||
self.passwords = passwords
|
||||
|
||||
# a special flag to help us exit cleanly
|
||||
self._terminated = False
|
||||
|
||||
# this dictionary is used to keep track of notified handlers
|
||||
self._notified_handlers = dict()
|
||||
|
||||
# dictionaries to keep track of failed/unreachable hosts
|
||||
self._failed_hosts = dict()
|
||||
self._unreachable_hosts = dict()
|
||||
|
||||
self._final_q = multiprocessing.Queue()
|
||||
|
||||
# load callback plugins
|
||||
self._callback_plugins = self._load_callbacks(stdout_callback)
|
||||
|
||||
# create the pool of worker threads, based on the number of forks specified
|
||||
try:
|
||||
fileno = sys.stdin.fileno()
|
||||
except ValueError:
|
||||
fileno = None
|
||||
|
||||
self._workers = []
|
||||
for i in range(self._options.forks):
|
||||
main_q = multiprocessing.Queue()
|
||||
rslt_q = multiprocessing.Queue()
|
||||
|
||||
prc = WorkerProcess(self, main_q, rslt_q, loader)
|
||||
prc.start()
|
||||
|
||||
self._workers.append((prc, main_q, rslt_q))
|
||||
|
||||
self._result_prc = ResultProcess(self._final_q, self._workers)
|
||||
self._result_prc.start()
|
||||
|
||||
def _initialize_notified_handlers(self, handlers):
|
||||
'''
|
||||
Clears and initializes the shared notified handlers dict with entries
|
||||
for each handler in the play, which is an empty array that will contain
|
||||
inventory hostnames for those hosts triggering the handler.
|
||||
'''
|
||||
|
||||
# Zero the dictionary first by removing any entries there.
|
||||
# Proxied dicts don't support iteritems, so we have to use keys()
|
||||
for key in self._notified_handlers.keys():
|
||||
del self._notified_handlers[key]
|
||||
|
||||
# FIXME: there is a block compile helper for this...
|
||||
handler_list = []
|
||||
for handler_block in handlers:
|
||||
for handler in handler_block.block:
|
||||
handler_list.append(handler)
|
||||
|
||||
# then initialize it with the handler names from the handler list
|
||||
for handler in handler_list:
|
||||
self._notified_handlers[handler.get_name()] = []
|
||||
|
||||
def _load_callbacks(self, stdout_callback):
|
||||
'''
|
||||
Loads all available callbacks, with the exception of those which
|
||||
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
|
||||
only one such callback plugin will be loaded.
|
||||
'''
|
||||
|
||||
loaded_plugins = []
|
||||
|
||||
stdout_callback_loaded = False
|
||||
if stdout_callback is None:
|
||||
stdout_callback = C.DEFAULT_STDOUT_CALLBACK
|
||||
|
||||
if stdout_callback not in callback_loader:
|
||||
raise AnsibleError("Invalid callback for stdout specified: %s" % stdout_callback)
|
||||
|
||||
for callback_plugin in callback_loader.all(class_only=True):
|
||||
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
|
||||
# we only allow one callback of type 'stdout' to be loaded, so check
|
||||
# the name of the current plugin and type to see if we need to skip
|
||||
# loading this callback plugin
|
||||
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
|
||||
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
|
||||
if callback_type == 'stdout':
|
||||
if callback_name != stdout_callback or stdout_callback_loaded:
|
||||
continue
|
||||
stdout_callback_loaded = True
|
||||
|
||||
loaded_plugins.append(callback_plugin(self._display))
|
||||
else:
|
||||
loaded_plugins.append(callback_plugin())
|
||||
|
||||
return loaded_plugins
|
||||
|
||||
def run(self, play):
|
||||
'''
|
||||
Iterates over the roles/tasks in a play, using the given (or default)
|
||||
strategy for queueing tasks. The default is the linear strategy, which
|
||||
operates like classic Ansible by keeping all hosts in lock-step with
|
||||
a given task (meaning no hosts move on to the next task until all hosts
|
||||
are done with the current task).
|
||||
'''
|
||||
|
||||
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
|
||||
templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False)
|
||||
|
||||
new_play = play.copy()
|
||||
new_play.post_validate(templar)
|
||||
|
||||
connection_info = ConnectionInformation(new_play, self._options, self.passwords)
|
||||
for callback_plugin in self._callback_plugins:
|
||||
if hasattr(callback_plugin, 'set_connection_info'):
|
||||
callback_plugin.set_connection_info(connection_info)
|
||||
|
||||
self.send_callback('v2_playbook_on_play_start', new_play)
|
||||
|
||||
# initialize the shared dictionary containing the notified handlers
|
||||
self._initialize_notified_handlers(new_play.handlers)
|
||||
|
||||
# load the specified strategy (or the default linear one)
|
||||
strategy = strategy_loader.get(new_play.strategy, self)
|
||||
if strategy is None:
|
||||
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
|
||||
|
||||
# build the iterator
|
||||
iterator = PlayIterator(inventory=self._inventory, play=new_play, connection_info=connection_info, all_vars=all_vars)
|
||||
|
||||
# and run the play using the strategy
|
||||
return strategy.run(iterator, connection_info)
|
||||
|
||||
def cleanup(self):
|
||||
debug("RUNNING CLEANUP")
|
||||
|
||||
self.terminate()
|
||||
|
||||
self._final_q.close()
|
||||
self._result_prc.terminate()
|
||||
|
||||
for (worker_prc, main_q, rslt_q) in self._workers:
|
||||
rslt_q.close()
|
||||
main_q.close()
|
||||
worker_prc.terminate()
|
||||
|
||||
def get_inventory(self):
|
||||
return self._inventory
|
||||
|
||||
def get_variable_manager(self):
|
||||
return self._variable_manager
|
||||
|
||||
def get_loader(self):
|
||||
return self._loader
|
||||
|
||||
def get_notified_handlers(self):
|
||||
return self._notified_handlers
|
||||
|
||||
def get_workers(self):
|
||||
return self._workers[:]
|
||||
|
||||
def terminate(self):
|
||||
self._terminated = True
|
||||
|
||||
def send_callback(self, method_name, *args, **kwargs):
|
||||
for callback_plugin in self._callback_plugins:
|
||||
# a plugin that set self.disabled to True will not be called
|
||||
# see osx_say.py example for such a plugin
|
||||
if getattr(callback_plugin, 'disabled', False):
|
||||
continue
|
||||
methods = [
|
||||
getattr(callback_plugin, method_name, None),
|
||||
getattr(callback_plugin, 'on_any', None)
|
||||
]
|
||||
for method in methods:
|
||||
if method is not None:
|
||||
method(*args, **kwargs)
|
||||
|
||||
0
lib/ansible/executor/task_queue_manager.py:
Normal file
0
lib/ansible/executor/task_queue_manager.py:
Normal file
61
lib/ansible/executor/task_result.py
Normal file
61
lib/ansible/executor/task_result.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.parsing import DataLoader
|
||||
|
||||
class TaskResult:
|
||||
'''
|
||||
This class is responsible for interpretting the resulting data
|
||||
from an executed task, and provides helper methods for determining
|
||||
the result of a given task.
|
||||
'''
|
||||
|
||||
def __init__(self, host, task, return_data):
|
||||
self._host = host
|
||||
self._task = task
|
||||
if isinstance(return_data, dict):
|
||||
self._result = return_data.copy()
|
||||
else:
|
||||
self._result = DataLoader().load(return_data)
|
||||
|
||||
def is_changed(self):
|
||||
return self._check_key('changed')
|
||||
|
||||
def is_skipped(self):
|
||||
return self._check_key('skipped')
|
||||
|
||||
def is_failed(self):
|
||||
if 'failed_when_result' in self._result:
|
||||
return self._check_key('failed_when_result')
|
||||
else:
|
||||
return self._check_key('failed') or self._result.get('rc', 0) != 0
|
||||
|
||||
def is_unreachable(self):
|
||||
return self._check_key('unreachable')
|
||||
|
||||
def _check_key(self, key):
|
||||
if 'results' in self._result:
|
||||
flag = False
|
||||
for res in self._result.get('results', []):
|
||||
if isinstance(res, dict):
|
||||
flag |= res.get(key, False)
|
||||
else:
|
||||
return self._result.get(key, False)
|
||||
Reference in New Issue
Block a user